From 1ea51101e36fb71cfa73e95122a5571348c6bbc9 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 07:14:19 +0000 Subject: [PATCH] Release 0.0.1-beta7 --- .gitignore | 1 + poetry.lock | 29 +- pyproject.toml | 6 +- reference.md | 5523 ++++-- src/gooey/__init__.py | 542 +- src/gooey/ai_animation_generator/__init__.py | 2 - src/gooey/ai_animation_generator/client.py | 133 - src/gooey/ai_art_qr_code/__init__.py | 2 - src/gooey/ai_art_qr_code/client.py | 133 - src/gooey/ai_background_changer/__init__.py | 2 - src/gooey/ai_background_changer/client.py | 133 - .../__init__.py | 2 - .../client.py | 133 - src/gooey/ai_image_with_a_face/__init__.py | 2 - src/gooey/ai_image_with_a_face/client.py | 133 - src/gooey/bulk_runner/__init__.py | 2 - src/gooey/bulk_runner/client.py | 133 - src/gooey/chyron_plant_bot/__init__.py | 2 - src/gooey/chyron_plant_bot/client.py | 133 - src/gooey/client.py | 15656 +++++++++++----- .../compare_ai_image_generators/__init__.py | 2 - .../compare_ai_image_generators/client.py | 133 - .../compare_ai_image_upscalers/__init__.py | 2 - .../compare_ai_image_upscalers/client.py | 133 - src/gooey/compare_ai_translations/__init__.py | 2 - src/gooey/compare_ai_translations/client.py | 133 - .../compare_ai_voice_generators/__init__.py | 2 - .../compare_ai_voice_generators/client.py | 133 - .../copilot_for_your_enterprise/__init__.py | 40 +- .../copilot_for_your_enterprise/client.py | 575 +- .../types/__init__.py | 40 +- ...c_form_video_bots_request_lipsync_model.py | 5 - ...orm_video_bots_request_openai_tts_model.py | 5 - ...video_bots_request_response_format_type.py | 5 - ...y => video_bots_page_request_asr_model.py} | 2 +- ...video_bots_page_request_citation_style.py} | 2 +- ...ideo_bots_page_request_embedding_model.py} | 2 +- .../video_bots_page_request_lipsync_model.py | 5 + ...ideo_bots_page_request_openai_tts_model.py | 5 + ...eo_bots_page_request_openai_voice_name.py} | 2 +- ..._bots_page_request_response_format_type.py | 5 + ...video_bots_page_request_selected_model.py} | 2 +- ...deo_bots_page_request_translation_model.py | 5 + .../video_bots_page_request_tts_provider.py} | 2 +- src/gooey/copilot_integrations/__init__.py | 40 +- src/gooey/copilot_integrations/client.py | 482 +- .../copilot_integrations/types/__init__.py | 40 +- .../types/create_stream_request_asr_model.py} | 2 +- .../create_stream_request_citation_style.py} | 2 +- .../create_stream_request_embedding_model.py} | 2 +- .../create_stream_request_lipsync_model.py | 5 + ...create_stream_request_openai_tts_model.py} | 2 +- ...reate_stream_request_openai_voice_name.py} | 2 +- ...te_stream_request_response_format_type.py} | 2 +- .../create_stream_request_selected_model.py} | 2 +- ...create_stream_request_translation_model.py | 5 + .../create_stream_request_tts_provider.py} | 2 +- ...ots_stream_create_request_lipsync_model.py | 5 - ..._stream_create_request_openai_tts_model.py | 5 - .../types/video_bots_stream_response.py | 5 +- src/gooey/core/__init__.py | 2 - src/gooey/core/client_wrapper.py | 7 +- src/gooey/core/file.py | 9 +- src/gooey/core/jsonable_encoder.py | 8 +- src/gooey/core/pydantic_utilities.py | 137 +- src/gooey/core/query_encoder.py | 45 +- src/gooey/core/serialization.py | 5 +- .../__init__.py | 2 - .../client.py | 133 - .../edit_an_image_with_ai_prompt/__init__.py | 2 - .../edit_an_image_with_ai_prompt/client.py | 133 - src/gooey/embeddings/__init__.py | 2 - src/gooey/embeddings/client.py | 133 - src/gooey/errors/__init__.py | 10 +- src/gooey/errors/bad_request_error.py | 9 - src/gooey/errors/internal_server_error.py | 9 - src/gooey/errors/payment_required_error.py | 5 +- src/gooey/evaluator/__init__.py | 4 +- src/gooey/evaluator/client.py | 325 +- src/gooey/evaluator/types/__init__.py | 6 +- ..._eval_page_request_response_format_type.py | 5 + .../bulk_eval_page_request_selected_model.py} | 2 +- src/gooey/functions/client.py | 259 +- .../__init__.py | 2 - .../client.py | 133 - .../__init__.py | 2 - .../client.py | 133 - .../large_language_models_gpt3/__init__.py | 2 - .../large_language_models_gpt3/client.py | 133 - src/gooey/letter_writer/__init__.py | 2 - src/gooey/letter_writer/client.py | 133 - src/gooey/lip_syncing/__init__.py | 4 +- src/gooey/lip_syncing/client.py | 297 +- src/gooey/lip_syncing/types/__init__.py | 4 +- ...ync_form_lipsync_request_selected_model.py | 5 - .../lipsync_page_request_selected_model.py | 5 + .../lipsync_video_with_any_text/__init__.py | 2 - .../lipsync_video_with_any_text/client.py | 133 - src/gooey/misc/client.py | 126 +- .../__init__.py | 2 - .../client.py | 133 - .../__init__.py | 2 - .../client.py | 133 - .../__init__.py | 2 - .../client.py | 133 - .../search_your_docs_with_gpt/__init__.py | 2 - src/gooey/search_your_docs_with_gpt/client.py | 133 - src/gooey/smart_gpt/__init__.py | 4 +- src/gooey/smart_gpt/client.py | 309 +- src/gooey/smart_gpt/types/__init__.py | 6 +- ..._smart_gpt_request_response_format_type.py | 5 - ...t_gpt_page_request_response_format_type.py | 5 + .../smart_gpt_page_request_selected_model.py} | 2 +- .../__init__.py | 2 - .../speech_recognition_translation/client.py | 133 - .../summarize_your_docs_with_gpt/__init__.py | 2 - .../summarize_your_docs_with_gpt/client.py | 133 - .../__init__.py | 2 - .../client.py | 133 - .../text_guided_audio_generator/__init__.py | 2 - .../text_guided_audio_generator/client.py | 133 - src/gooey/types/__init__.py | 424 +- src/gooey/types/agg_function.py | 7 +- src/gooey/types/agg_function_result.py | 7 +- .../types/animate_request_selected_model.py | 5 - src/gooey/types/animation_prompt.py | 5 +- src/gooey/types/asr_chunk.py | 7 +- src/gooey/types/asr_output_json.py | 7 +- src/gooey/types/asr_page_output.py | 7 +- .../types/asr_page_output_output_text_item.py | 1 - .../types/asr_page_request_output_format.py | 5 + .../asr_page_request_selected_model.py} | 2 +- .../asr_page_request_translation_model.py | 5 + src/gooey/types/asr_page_status_response.py | 9 +- .../types/async_api_response_model_v3.py | 7 +- src/gooey/types/balance_response.py | 7 +- src/gooey/types/bot_broadcast_filters.py | 5 +- src/gooey/types/bulk_eval_page_output.py | 7 +- .../types/bulk_eval_page_status_response.py | 9 +- src/gooey/types/bulk_runner_page_output.py | 5 +- .../types/bulk_runner_page_status_response.py | 9 +- src/gooey/types/button_pressed.py | 7 +- src/gooey/types/called_function_response.py | 9 +- ...hat_completion_content_part_image_param.py | 7 +- ...chat_completion_content_part_text_param.py | 5 +- src/gooey/types/chyron_plant_page_output.py | 7 +- src/gooey/types/chyron_plant_page_request.py | 9 +- .../chyron_plant_page_status_response.py | 9 +- src/gooey/types/compare_llm_page_output.py | 7 +- ..._llm_page_request_response_format_type.py} | 2 +- ..._llm_page_request_selected_models_item.py} | 2 +- .../types/compare_llm_page_status_response.py | 9 +- .../types/compare_text2img_page_output.py | 7 +- ...ompare_text2img_page_request_scheduler.py} | 2 +- ...2img_page_request_selected_models_item.py} | 2 +- .../compare_text2img_page_status_response.py | 9 +- .../types/compare_upscaler_page_output.py | 5 +- ...aler_page_request_selected_models_item.py} | 2 +- .../compare_upscaler_page_status_response.py | 9 +- src/gooey/types/console_logs.py | 7 +- src/gooey/types/conversation_entry.py | 9 +- src/gooey/types/conversation_entry_content.py | 1 - .../types/conversation_entry_content_item.py | 6 +- src/gooey/types/conversation_start.py | 7 +- src/gooey/types/create_stream_response.py | 7 +- src/gooey/types/deforum_sd_page_output.py | 7 +- .../deforum_sd_page_request_selected_model.py | 5 + .../types/deforum_sd_page_status_response.py | 9 +- src/gooey/types/doc_extract_page_output.py | 7 +- ...tract_page_request_response_format_type.py | 5 + ...xtract_page_request_selected_asr_model.py} | 2 +- ...doc_extract_page_request_selected_model.py | 47 + .../types/doc_extract_page_status_response.py | 9 +- src/gooey/types/doc_search_page_output.py | 9 +- ...doc_search_page_request_citation_style.py} | 2 +- ...oc_search_page_request_embedding_model.py} | 2 +- ... doc_search_page_request_keyword_query.py} | 2 +- ...earch_page_request_response_format_type.py | 5 + .../doc_search_page_request_selected_model.py | 47 + .../types/doc_search_page_status_response.py | 9 +- src/gooey/types/doc_summary_page_output.py | 9 +- ...mmary_page_request_response_format_type.py | 5 + ...summary_page_request_selected_asr_model.py | 23 + ...doc_summary_page_request_selected_model.py | 47 + .../types/doc_summary_page_status_response.py | 9 +- ...oc_summary_request_response_format_type.py | 5 - .../email_face_inpainting_page_output.py | 7 +- ..._inpainting_page_request_selected_model.py | 7 + ...il_face_inpainting_page_status_response.py | 9 +- src/gooey/types/embeddings_page_output.py | 7 +- ...embeddings_page_request_selected_model.py} | 2 +- .../types/embeddings_page_status_response.py | 9 +- src/gooey/types/eval_prompt.py | 5 +- .../types/face_inpainting_page_output.py | 7 +- ...inpainting_page_request_selected_model.py} | 2 +- .../face_inpainting_page_status_response.py | 9 +- src/gooey/types/failed_reponse_model_v2.py | 21 - src/gooey/types/failed_response_detail.py | 38 - src/gooey/types/final_response.py | 7 +- src/gooey/types/functions_page_output.py | 9 +- .../types/functions_page_status_response.py | 9 +- src/gooey/types/generic_error_response.py | 7 +- .../types/generic_error_response_detail.py | 5 +- src/gooey/types/google_gpt_page_output.py | 11 +- ...google_gpt_page_request_embedding_model.py | 18 + ...e_gpt_page_request_response_format_type.py | 5 + .../google_gpt_page_request_selected_model.py | 47 + .../types/google_gpt_page_status_response.py | 9 +- .../types/google_image_gen_page_output.py | 7 +- ..._image_gen_page_request_selected_model.py} | 2 +- .../google_image_gen_page_status_response.py | 9 +- src/gooey/types/http_validation_error.py | 7 +- .../types/image_segmentation_page_output.py | 7 +- ...egmentation_page_request_selected_model.py | 5 + ...image_segmentation_page_status_response.py | 9 +- src/gooey/types/image_url.py | 7 +- src/gooey/types/img2img_page_output.py | 7 +- ...page_request_selected_controlnet_model.py} | 7 +- ...request_selected_controlnet_model_item.py} | 2 +- ...=> img2img_page_request_selected_model.py} | 2 +- .../types/img2img_page_status_response.py | 9 +- src/gooey/types/letter_writer_page_output.py | 9 +- src/gooey/types/letter_writer_page_request.py | 11 +- .../letter_writer_page_status_response.py | 9 +- src/gooey/types/lipsync_page_output.py | 7 +- .../types/lipsync_page_status_response.py | 9 +- src/gooey/types/lipsync_tts_page_output.py | 7 +- ...psync_tts_page_request_openai_tts_model.py | 5 + ...ync_tts_page_request_openai_voice_name.py} | 2 +- ...lipsync_tts_page_request_selected_model.py | 5 + .../lipsync_tts_page_request_tts_provider.py} | 2 +- .../types/lipsync_tts_page_status_response.py | 9 +- .../lipsync_tts_request_openai_tts_model.py | 5 - .../lipsync_tts_request_selected_model.py | 5 - .../types/llm_request_response_format_type.py | 5 - src/gooey/types/message_part.py | 9 +- .../types/object_inpainting_page_output.py | 7 +- ..._inpainting_page_request_selected_model.py | 7 + .../object_inpainting_page_status_response.py | 9 +- ...lize_email_request_response_format_type.py | 5 - ...ersonalize_email_request_selected_model.py | 47 - .../types/portrait_request_selected_model.py | 5 - ...est_image_prompt_controlnet_models_item.py | 20 + ...rt_qr_code_async_form_request_scheduler.py | 23 + ..._request_selected_controlnet_model_item.py | 20 + ..._code_async_form_request_selected_model.py | 22 + ..._v3asr_async_form_request_output_format.py | 5 + ...v3asr_async_form_request_selected_model.py | 23 + ...r_async_form_request_translation_model.py} | 2 +- ...async_form_request_response_format_type.py | 5 + ...eval_async_form_request_selected_model.py} | 2 +- ...async_form_request_selected_models_item.py | 7 + ...async_form_request_response_format_type.py | 5 + ...async_form_request_selected_models_item.py | 47 + ...e_text2img_async_form_request_scheduler.py | 23 + ...async_form_request_selected_models_item.py | 22 + ...um_sd_async_form_request_selected_model.py | 5 + ...async_form_request_response_format_type.py | 5 + ...t_async_form_request_selected_asr_model.py | 23 + ...tract_async_form_request_selected_model.py | 47 + ...earch_async_form_request_citation_style.py | 25 + ...rch_async_form_request_embedding_model.py} | 2 +- ...search_async_form_request_keyword_query.py | 5 + ...async_form_request_response_format_type.py | 5 + ...earch_async_form_request_selected_model.py | 47 + ...async_form_request_response_format_type.py | 5 + ...y_async_form_request_selected_asr_model.py | 23 + ...mmary_async_form_request_selected_model.py | 47 + ...nting_async_form_request_selected_model.py | 7 + ...ings_async_form_request_selected_model.py} | 2 +- ...nting_async_form_request_selected_model.py | 7 + ..._gpt_async_form_request_embedding_model.py | 18 + ...async_form_request_response_format_type.py | 5 + ...e_gpt_async_form_request_selected_model.py | 47 + ...e_gen_async_form_request_selected_model.py | 21 + ...ation_async_form_request_selected_model.py | 5 + ..._form_request_selected_controlnet_model.py | 21 + ...request_selected_controlnet_model_item.py} | 2 +- ...g2img_async_form_request_selected_model.py | 21 + ...psync_async_form_request_selected_model.py | 5 + ...tts_async_form_request_openai_tts_model.py | 5 + ...ts_async_form_request_openai_voice_name.py | 7 + ...c_tts_async_form_request_selected_model.py | 5 + ...ync_tts_async_form_request_tts_provider.py | 7 + ...nting_async_form_request_selected_model.py | 7 + ...aker_async_form_request_embedding_model.py | 18 + ...async_form_request_response_format_type.py | 7 + ...maker_async_form_request_selected_model.py | 47 + ...r_doc_async_form_request_citation_style.py | 25 + ..._doc_async_form_request_embedding_model.py | 18 + ...er_doc_async_form_request_keyword_query.py | 5 + ...async_form_request_response_format_type.py | 7 + ...r_doc_async_form_request_selected_model.py | 47 + ...async_form_request_response_format_type.py | 5 + ...mmary_async_form_request_selected_model.py | 47 + ...async_form_request_response_format_type.py | 5 + ...t_gpt_async_form_request_selected_model.py | 47 + ...async_form_request_response_format_type.py | 7 + ...email_async_form_request_selected_model.py | 47 + ...ech_async_form_request_openai_tts_model.py | 5 + ...ch_async_form_request_openai_voice_name.py | 7 + ..._speech_async_form_request_tts_provider.py | 7 + ...late_async_form_request_selected_model.py} | 2 +- ...video_bots_async_form_request_asr_model.py | 23 + ..._bots_async_form_request_citation_style.py | 25 + ...bots_async_form_request_embedding_model.py | 18 + ...o_bots_async_form_request_lipsync_model.py | 5 + ...ots_async_form_request_openai_tts_model.py | 5 + ...ts_async_form_request_openai_voice_name.py | 7 + ...async_form_request_response_format_type.py | 5 + ..._bots_async_form_request_selected_model.py | 47 + ...ts_async_form_request_translation_model.py | 5 + ...eo_bots_async_form_request_tts_provider.py | 7 + .../product_image_request_selected_model.py | 5 - src/gooey/types/prompt_tree_node.py | 11 +- src/gooey/types/prompt_tree_node_prompt.py | 1 - .../types/qr_code_generator_page_output.py | 7 +- ...est_image_prompt_controlnet_models_item.py | 20 + ..._code_generator_page_request_scheduler.py} | 2 +- ...request_selected_controlnet_model_item.py} | 2 +- ..._generator_page_request_selected_model.py} | 2 +- .../qr_code_generator_page_status_response.py | 9 +- .../types/rag_request_response_format_type.py | 5 - src/gooey/types/recipe_function.py | 7 +- .../types/related_doc_search_response.py | 7 +- .../types/related_google_gpt_response.py | 9 +- .../types/related_qn_a_doc_page_output.py | 11 +- ...d_qn_a_doc_page_request_citation_style.py} | 2 +- ...d_qn_a_doc_page_request_embedding_model.py | 18 + ...ed_qn_a_doc_page_request_keyword_query.py} | 2 +- ..._doc_page_request_response_format_type.py} | 2 +- ...d_qn_a_doc_page_request_selected_model.py} | 2 +- .../related_qn_a_doc_page_status_response.py | 7 +- src/gooey/types/related_qn_a_page_output.py | 11 +- ...lated_qn_a_page_request_embedding_model.py | 18 + ..._qn_a_page_request_response_format_type.py | 5 + ...elated_qn_a_page_request_selected_model.py | 47 + .../related_qn_a_page_status_response.py | 7 +- ...emove_background_request_selected_model.py | 5 - src/gooey/types/reply_button.py | 5 +- src/gooey/types/response_model.py | 11 +- .../types/response_model_final_prompt.py | 1 - src/gooey/types/run_settings.py | 7 +- src/gooey/types/run_start.py | 7 +- src/gooey/types/sad_talker_settings.py | 7 +- src/gooey/types/search_reference.py | 5 +- ...eo_content_request_response_format_type.py | 5 - ...le_also_ask_doc_request_embedding_model.py | 18 - ...so_ask_doc_request_response_format_type.py | 5 - ...ple_also_ask_doc_request_selected_model.py | 47 - ...e_also_ask_request_response_format_type.py | 5 - ..._people_also_ask_request_selected_model.py | 47 - src/gooey/types/seo_summary_page_output.py | 11 +- ...mmary_page_request_response_format_type.py | 5 + ...seo_summary_page_request_selected_model.py | 47 + .../types/seo_summary_page_status_response.py | 7 +- src/gooey/types/smart_gpt_page_output.py | 9 +- .../types/smart_gpt_page_status_response.py | 7 +- .../types/social_lookup_email_page_output.py | 9 +- ...mail_page_request_response_format_type.py} | 2 +- ...okup_email_page_request_selected_model.py} | 2 +- ...ocial_lookup_email_page_status_response.py | 7 +- ...peech_recognition_request_output_format.py | 5 - ...h_recognition_request_translation_model.py | 5 - src/gooey/types/stream_error.py | 7 +- ...thesize_data_request_selected_asr_model.py | 23 - .../synthesize_data_request_selected_model.py | 47 - src/gooey/types/text2audio_page_output.py | 7 +- .../types/text2audio_page_status_response.py | 7 +- src/gooey/types/text_to_speech_page_output.py | 7 +- ...to_speech_page_request_openai_tts_model.py | 5 + ..._speech_page_request_openai_voice_name.py} | 2 +- ...xt_to_speech_page_request_tts_provider.py} | 2 +- .../text_to_speech_page_status_response.py | 7 +- src/gooey/types/training_data_model.py | 5 +- .../types/translate_request_selected_model.py | 5 - src/gooey/types/translation_page_output.py | 7 +- ...translation_page_request_selected_model.py | 5 + .../types/translation_page_status_response.py | 7 +- src/gooey/types/validation_error.py | 7 +- src/gooey/types/vcard.py | 5 +- src/gooey/types/video_bots_page_output.py | 13 +- .../video_bots_page_output_final_prompt.py | 1 - .../types/video_bots_page_status_response.py | 7 +- .../web_search_llm_request_selected_model.py | 47 - src/gooey/version.py | 1 - src/gooey/web_search_gpt3/__init__.py | 2 - src/gooey/web_search_gpt3/client.py | 133 - tests/custom/test_client.py | 1 + tests/utils/assets/models/__init__.py | 2 +- tests/utils/assets/models/circle.py | 1 + .../assets/models/object_with_defaults.py | 1 + .../models/object_with_optional_field.py | 9 +- tests/utils/assets/models/shape.py | 5 +- tests/utils/assets/models/square.py | 1 + .../assets/models/undiscriminated_shape.py | 1 - tests/utils/test_http_client.py | 14 + tests/utils/test_query_encoding.py | 35 +- tests/utils/test_serialization.py | 4 +- 399 files changed, 19545 insertions(+), 12341 deletions(-) delete mode 100644 src/gooey/ai_animation_generator/__init__.py delete mode 100644 src/gooey/ai_animation_generator/client.py delete mode 100644 src/gooey/ai_art_qr_code/__init__.py delete mode 100644 src/gooey/ai_art_qr_code/client.py delete mode 100644 src/gooey/ai_background_changer/__init__.py delete mode 100644 src/gooey/ai_background_changer/client.py delete mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py delete mode 100644 src/gooey/ai_generated_photo_from_email_profile_lookup/client.py delete mode 100644 src/gooey/ai_image_with_a_face/__init__.py delete mode 100644 src/gooey/ai_image_with_a_face/client.py delete mode 100644 src/gooey/bulk_runner/__init__.py delete mode 100644 src/gooey/bulk_runner/client.py delete mode 100644 src/gooey/chyron_plant_bot/__init__.py delete mode 100644 src/gooey/chyron_plant_bot/client.py delete mode 100644 src/gooey/compare_ai_image_generators/__init__.py delete mode 100644 src/gooey/compare_ai_image_generators/client.py delete mode 100644 src/gooey/compare_ai_image_upscalers/__init__.py delete mode 100644 src/gooey/compare_ai_image_upscalers/client.py delete mode 100644 src/gooey/compare_ai_translations/__init__.py delete mode 100644 src/gooey/compare_ai_translations/client.py delete mode 100644 src/gooey/compare_ai_voice_generators/__init__.py delete mode 100644 src/gooey/compare_ai_voice_generators/client.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py rename src/gooey/copilot_for_your_enterprise/types/{async_form_video_bots_request_asr_model.py => video_bots_page_request_asr_model.py} (90%) rename src/gooey/copilot_for_your_enterprise/types/{async_form_video_bots_request_citation_style.py => video_bots_page_request_citation_style.py} (89%) rename src/gooey/{types/web_search_llm_request_embedding_model.py => copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py} (87%) create mode 100644 src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py create mode 100644 src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py rename src/gooey/{types/lipsync_tts_request_openai_voice_name.py => copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py} (76%) create mode 100644 src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py rename src/gooey/{types/rag_request_selected_model.py => copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py} (95%) create mode 100644 src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py rename src/gooey/{types/text_to_speech_request_tts_provider.py => copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py} (79%) rename src/gooey/{types/doc_summary_request_selected_asr_model.py => copilot_integrations/types/create_stream_request_asr_model.py} (90%) rename src/gooey/{types/rag_request_citation_style.py => copilot_integrations/types/create_stream_request_citation_style.py} (90%) rename src/gooey/{types/embed_request_selected_model.py => copilot_integrations/types/create_stream_request_embedding_model.py} (87%) create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py rename src/gooey/{types/text_to_speech_request_openai_tts_model.py => copilot_integrations/types/create_stream_request_openai_tts_model.py} (64%) rename src/gooey/{types/text_to_speech_request_openai_voice_name.py => copilot_integrations/types/create_stream_request_openai_voice_name.py} (76%) rename src/gooey/{types/web_search_llm_request_response_format_type.py => copilot_integrations/types/create_stream_request_response_format_type.py} (65%) rename src/gooey/{types/llm_request_selected_models_item.py => copilot_integrations/types/create_stream_request_selected_model.py} (95%) create mode 100644 src/gooey/copilot_integrations/types/create_stream_request_translation_model.py rename src/gooey/{types/lipsync_tts_request_tts_provider.py => copilot_integrations/types/create_stream_request_tts_provider.py} (79%) delete mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py delete mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py delete mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py delete mode 100644 src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py delete mode 100644 src/gooey/edit_an_image_with_ai_prompt/__init__.py delete mode 100644 src/gooey/edit_an_image_with_ai_prompt/client.py delete mode 100644 src/gooey/embeddings/__init__.py delete mode 100644 src/gooey/embeddings/client.py delete mode 100644 src/gooey/errors/bad_request_error.py delete mode 100644 src/gooey/errors/internal_server_error.py create mode 100644 src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py rename src/gooey/{types/doc_summary_request_selected_model.py => evaluator/types/bulk_eval_page_request_selected_model.py} (95%) delete mode 100644 src/gooey/generate_people_also_ask_seo_content/__init__.py delete mode 100644 src/gooey/generate_people_also_ask_seo_content/client.py delete mode 100644 src/gooey/generate_product_photo_backgrounds/__init__.py delete mode 100644 src/gooey/generate_product_photo_backgrounds/client.py delete mode 100644 src/gooey/large_language_models_gpt3/__init__.py delete mode 100644 src/gooey/large_language_models_gpt3/client.py delete mode 100644 src/gooey/letter_writer/__init__.py delete mode 100644 src/gooey/letter_writer/client.py delete mode 100644 src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py create mode 100644 src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py delete mode 100644 src/gooey/lipsync_video_with_any_text/__init__.py delete mode 100644 src/gooey/lipsync_video_with_any_text/client.py delete mode 100644 src/gooey/people_also_ask_answers_from_a_doc/__init__.py delete mode 100644 src/gooey/people_also_ask_answers_from_a_doc/client.py delete mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py delete mode 100644 src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py delete mode 100644 src/gooey/render_image_search_results_with_ai/__init__.py delete mode 100644 src/gooey/render_image_search_results_with_ai/client.py delete mode 100644 src/gooey/search_your_docs_with_gpt/__init__.py delete mode 100644 src/gooey/search_your_docs_with_gpt/client.py delete mode 100644 src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py create mode 100644 src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py rename src/gooey/{types/seo_content_request_selected_model.py => smart_gpt/types/smart_gpt_page_request_selected_model.py} (95%) delete mode 100644 src/gooey/speech_recognition_translation/__init__.py delete mode 100644 src/gooey/speech_recognition_translation/client.py delete mode 100644 src/gooey/summarize_your_docs_with_gpt/__init__.py delete mode 100644 src/gooey/summarize_your_docs_with_gpt/client.py delete mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py delete mode 100644 src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py delete mode 100644 src/gooey/text_guided_audio_generator/__init__.py delete mode 100644 src/gooey/text_guided_audio_generator/client.py delete mode 100644 src/gooey/types/animate_request_selected_model.py create mode 100644 src/gooey/types/asr_page_request_output_format.py rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_asr_model.py => types/asr_page_request_selected_model.py} (89%) create mode 100644 src/gooey/types/asr_page_request_translation_model.py rename src/gooey/types/{synthesize_data_request_response_format_type.py => compare_llm_page_request_response_format_type.py} (65%) rename src/gooey/{copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py => types/compare_llm_page_request_selected_models_item.py} (95%) rename src/gooey/types/{qr_code_request_scheduler.py => compare_text2img_page_request_scheduler.py} (89%) rename src/gooey/types/{text_to_image_request_selected_models_item.py => compare_text2img_page_request_selected_models_item.py} (87%) rename src/gooey/types/{upscale_request_selected_models_item.py => compare_upscaler_page_request_selected_models_item.py} (74%) create mode 100644 src/gooey/types/deforum_sd_page_request_selected_model.py create mode 100644 src/gooey/types/doc_extract_page_request_response_format_type.py rename src/gooey/types/{speech_recognition_request_selected_model.py => doc_extract_page_request_selected_asr_model.py} (89%) create mode 100644 src/gooey/types/doc_extract_page_request_selected_model.py rename src/gooey/types/{seo_people_also_ask_doc_request_citation_style.py => doc_search_page_request_citation_style.py} (89%) rename src/gooey/types/{rag_request_embedding_model.py => doc_search_page_request_embedding_model.py} (87%) rename src/gooey/types/{rag_request_keyword_query.py => doc_search_page_request_keyword_query.py} (52%) create mode 100644 src/gooey/types/doc_search_page_request_response_format_type.py create mode 100644 src/gooey/types/doc_search_page_request_selected_model.py create mode 100644 src/gooey/types/doc_summary_page_request_response_format_type.py create mode 100644 src/gooey/types/doc_summary_page_request_selected_asr_model.py create mode 100644 src/gooey/types/doc_summary_page_request_selected_model.py delete mode 100644 src/gooey/types/doc_summary_request_response_format_type.py create mode 100644 src/gooey/types/email_face_inpainting_page_request_selected_model.py rename src/gooey/types/{seo_people_also_ask_request_embedding_model.py => embeddings_page_request_selected_model.py} (86%) rename src/gooey/types/{image_from_email_request_selected_model.py => face_inpainting_page_request_selected_model.py} (74%) delete mode 100644 src/gooey/types/failed_reponse_model_v2.py delete mode 100644 src/gooey/types/failed_response_detail.py create mode 100644 src/gooey/types/google_gpt_page_request_embedding_model.py create mode 100644 src/gooey/types/google_gpt_page_request_response_format_type.py create mode 100644 src/gooey/types/google_gpt_page_request_selected_model.py rename src/gooey/types/{image_from_web_search_request_selected_model.py => google_image_gen_page_request_selected_model.py} (88%) create mode 100644 src/gooey/types/image_segmentation_page_request_selected_model.py rename src/gooey/types/{remix_image_request_selected_controlnet_model.py => img2img_page_request_selected_controlnet_model.py} (71%) rename src/gooey/types/{qr_code_request_image_prompt_controlnet_models_item.py => img2img_page_request_selected_controlnet_model_item.py} (88%) rename src/gooey/types/{remix_image_request_selected_model.py => img2img_page_request_selected_model.py} (89%) create mode 100644 src/gooey/types/lipsync_tts_page_request_openai_tts_model.py rename src/gooey/{copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py => types/lipsync_tts_page_request_openai_voice_name.py} (74%) create mode 100644 src/gooey/types/lipsync_tts_page_request_selected_model.py rename src/gooey/{copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py => types/lipsync_tts_page_request_tts_provider.py} (77%) delete mode 100644 src/gooey/types/lipsync_tts_request_openai_tts_model.py delete mode 100644 src/gooey/types/lipsync_tts_request_selected_model.py delete mode 100644 src/gooey/types/llm_request_response_format_type.py create mode 100644 src/gooey/types/object_inpainting_page_request_selected_model.py delete mode 100644 src/gooey/types/personalize_email_request_response_format_type.py delete mode 100644 src/gooey/types/personalize_email_request_selected_model.py delete mode 100644 src/gooey/types/portrait_request_selected_model.py create mode 100644 src/gooey/types/post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item.py create mode 100644 src/gooey/types/post_v3art_qr_code_async_form_request_scheduler.py create mode 100644 src/gooey/types/post_v3art_qr_code_async_form_request_selected_controlnet_model_item.py create mode 100644 src/gooey/types/post_v3art_qr_code_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3asr_async_form_request_output_format.py create mode 100644 src/gooey/types/post_v3asr_async_form_request_selected_model.py rename src/gooey/{copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py => types/post_v3asr_async_form_request_translation_model.py} (65%) create mode 100644 src/gooey/types/post_v3bulk_eval_async_form_request_response_format_type.py rename src/gooey/{smart_gpt/types/async_form_smart_gpt_request_selected_model.py => types/post_v3bulk_eval_async_form_request_selected_model.py} (95%) create mode 100644 src/gooey/types/post_v3compare_ai_upscalers_async_form_request_selected_models_item.py create mode 100644 src/gooey/types/post_v3compare_llm_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3compare_llm_async_form_request_selected_models_item.py create mode 100644 src/gooey/types/post_v3compare_text2img_async_form_request_scheduler.py create mode 100644 src/gooey/types/post_v3compare_text2img_async_form_request_selected_models_item.py create mode 100644 src/gooey/types/post_v3deforum_sd_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3doc_extract_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3doc_extract_async_form_request_selected_asr_model.py create mode 100644 src/gooey/types/post_v3doc_extract_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3doc_search_async_form_request_citation_style.py rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_embedding_model.py => types/post_v3doc_search_async_form_request_embedding_model.py} (85%) create mode 100644 src/gooey/types/post_v3doc_search_async_form_request_keyword_query.py create mode 100644 src/gooey/types/post_v3doc_search_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3doc_search_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3doc_summary_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3doc_summary_async_form_request_selected_asr_model.py create mode 100644 src/gooey/types/post_v3doc_summary_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3email_face_inpainting_async_form_request_selected_model.py rename src/gooey/{copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py => types/post_v3embeddings_async_form_request_selected_model.py} (85%) create mode 100644 src/gooey/types/post_v3face_inpainting_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3google_gpt_async_form_request_embedding_model.py create mode 100644 src/gooey/types/post_v3google_gpt_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3google_gpt_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3google_image_gen_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3image_segmentation_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model.py rename src/gooey/types/{remix_image_request_selected_controlnet_model_item.py => post_v3img2img_async_form_request_selected_controlnet_model_item.py} (86%) create mode 100644 src/gooey/types/post_v3img2img_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3lipsync_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3lipsync_tts_async_form_request_openai_tts_model.py create mode 100644 src/gooey/types/post_v3lipsync_tts_async_form_request_openai_voice_name.py create mode 100644 src/gooey/types/post_v3lipsync_tts_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3lipsync_tts_async_form_request_tts_provider.py create mode 100644 src/gooey/types/post_v3object_inpainting_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3related_qna_maker_async_form_request_embedding_model.py create mode 100644 src/gooey/types/post_v3related_qna_maker_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3related_qna_maker_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3related_qna_maker_doc_async_form_request_citation_style.py create mode 100644 src/gooey/types/post_v3related_qna_maker_doc_async_form_request_embedding_model.py create mode 100644 src/gooey/types/post_v3related_qna_maker_doc_async_form_request_keyword_query.py create mode 100644 src/gooey/types/post_v3related_qna_maker_doc_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3related_qna_maker_doc_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3seo_summary_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3seo_summary_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3smart_gpt_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3smart_gpt_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3social_lookup_email_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3social_lookup_email_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3text_to_speech_async_form_request_openai_tts_model.py create mode 100644 src/gooey/types/post_v3text_to_speech_async_form_request_openai_voice_name.py create mode 100644 src/gooey/types/post_v3text_to_speech_async_form_request_tts_provider.py rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_translation_model.py => types/post_v3translate_async_form_request_selected_model.py} (66%) create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_asr_model.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_citation_style.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_embedding_model.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_lipsync_model.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_openai_tts_model.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_openai_voice_name.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_response_format_type.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_selected_model.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_translation_model.py create mode 100644 src/gooey/types/post_v3video_bots_async_form_request_tts_provider.py delete mode 100644 src/gooey/types/product_image_request_selected_model.py create mode 100644 src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py rename src/gooey/types/{text_to_image_request_scheduler.py => qr_code_generator_page_request_scheduler.py} (89%) rename src/gooey/types/{qr_code_request_selected_controlnet_model_item.py => qr_code_generator_page_request_selected_controlnet_model_item.py} (87%) rename src/gooey/types/{qr_code_request_selected_model.py => qr_code_generator_page_request_selected_model.py} (88%) delete mode 100644 src/gooey/types/rag_request_response_format_type.py rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_citation_style.py => types/related_qn_a_doc_page_request_citation_style.py} (89%) create mode 100644 src/gooey/types/related_qn_a_doc_page_request_embedding_model.py rename src/gooey/types/{seo_people_also_ask_doc_request_keyword_query.py => related_qn_a_doc_page_request_keyword_query.py} (50%) rename src/gooey/{evaluator/types/async_form_bulk_eval_request_response_format_type.py => types/related_qn_a_doc_page_request_response_format_type.py} (66%) rename src/gooey/{evaluator/types/async_form_bulk_eval_request_selected_model.py => types/related_qn_a_doc_page_request_selected_model.py} (95%) create mode 100644 src/gooey/types/related_qn_a_page_request_embedding_model.py create mode 100644 src/gooey/types/related_qn_a_page_request_response_format_type.py create mode 100644 src/gooey/types/related_qn_a_page_request_selected_model.py delete mode 100644 src/gooey/types/remove_background_request_selected_model.py delete mode 100644 src/gooey/types/seo_content_request_response_format_type.py delete mode 100644 src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py delete mode 100644 src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py delete mode 100644 src/gooey/types/seo_people_also_ask_doc_request_selected_model.py delete mode 100644 src/gooey/types/seo_people_also_ask_request_response_format_type.py delete mode 100644 src/gooey/types/seo_people_also_ask_request_selected_model.py create mode 100644 src/gooey/types/seo_summary_page_request_response_format_type.py create mode 100644 src/gooey/types/seo_summary_page_request_selected_model.py rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_response_format_type.py => types/social_lookup_email_page_request_response_format_type.py} (66%) rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_selected_model.py => types/social_lookup_email_page_request_selected_model.py} (95%) delete mode 100644 src/gooey/types/speech_recognition_request_output_format.py delete mode 100644 src/gooey/types/speech_recognition_request_translation_model.py delete mode 100644 src/gooey/types/synthesize_data_request_selected_asr_model.py delete mode 100644 src/gooey/types/synthesize_data_request_selected_model.py create mode 100644 src/gooey/types/text_to_speech_page_request_openai_tts_model.py rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py => types/text_to_speech_page_request_openai_voice_name.py} (73%) rename src/gooey/{copilot_integrations/types/video_bots_stream_create_request_tts_provider.py => types/text_to_speech_page_request_tts_provider.py} (76%) delete mode 100644 src/gooey/types/translate_request_selected_model.py create mode 100644 src/gooey/types/translation_page_request_selected_model.py delete mode 100644 src/gooey/types/web_search_llm_request_selected_model.py delete mode 100644 src/gooey/web_search_gpt3/__init__.py delete mode 100644 src/gooey/web_search_gpt3/client.py diff --git a/.gitignore b/.gitignore index 42cb863..0da665f 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ dist/ .mypy_cache/ __pycache__/ poetry.toml +.ruff_cache/ diff --git a/poetry.lock b/poetry.lock index 46252f8..d8971df 100644 --- a/poetry.lock +++ b/poetry.lock @@ -412,6 +412,33 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "ruff" +version = "0.5.7" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, + {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, + {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"}, + {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"}, + {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"}, + {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"}, + {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"}, +] + [[package]] name = "six" version = "1.16.0" @@ -470,4 +497,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "547951903d3bbcefb041f1f3a6ef8d5414ee7c6d96c5131d6197bfc91bc6229a" +content-hash = "6f6c191c1028d17a97fdfa84cedfd3cef94b5d63d98b8c1d333b3398eeea9055" diff --git a/pyproject.toml b/pyproject.toml index a05f7fb..e7789e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1-beta6" +version = "0.0.1-beta7" description = "" readme = "README.md" authors = [] @@ -43,6 +43,7 @@ pytest = "^7.4.0" pytest-asyncio = "^0.23.5" python-dateutil = "^2.9.0" types-python-dateutil = "^2.9.0.20240316" +ruff = "^0.5.6" [tool.pytest.ini_options] testpaths = [ "tests" ] @@ -51,6 +52,9 @@ asyncio_mode = "auto" [tool.mypy] plugins = ["pydantic.mypy"] +[tool.ruff] +line-length = 120 + [build-system] requires = ["poetry-core"] diff --git a/reference.md b/reference.md index 746713e..f45e317 100644 --- a/reference.md +++ b/reference.md @@ -40,7 +40,7 @@ client.animate(
-**animation_prompts:** `typing.List[AnimationPrompt]` +**animation_prompts:** `typing.Sequence[AnimationPrompt]`
@@ -56,7 +56,7 @@ client.animate(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -64,7 +64,7 @@ client.animate(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -80,7 +80,7 @@ client.animate(
-**selected_model:** `typing.Optional[AnimateRequestSelectedModel]` +**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]`
@@ -232,7 +232,7 @@ client.qr_code(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -240,7 +240,7 @@ client.qr_code(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -304,7 +304,9 @@ client.qr_code(
-**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]` +**image_prompt_controlnet_models:** `typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] +]`
@@ -344,7 +346,7 @@ client.qr_code(
-**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]` +**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]`
@@ -352,7 +354,9 @@ client.qr_code(
-**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]` +**selected_controlnet_model:** `typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] +]`
@@ -384,7 +388,7 @@ client.qr_code(
-**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` +**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
@@ -408,7 +412,7 @@ client.qr_code(
-**scheduler:** `typing.Optional[QrCodeRequestScheduler]` +**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]`
@@ -529,7 +533,7 @@ client.seo_people_also_ask(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -537,7 +541,7 @@ client.seo_people_also_ask(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -561,7 +565,7 @@ client.seo_people_also_ask(
-**selected_model:** `typing.Optional[SeoPeopleAlsoAskRequestSelectedModel]` +**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]`
@@ -601,7 +605,7 @@ client.seo_people_also_ask(
-**embedding_model:** `typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]`
@@ -662,7 +666,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType]` +**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]`
@@ -678,7 +682,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -817,7 +821,7 @@ client.seo_content(
-**selected_model:** `typing.Optional[SeoContentRequestSelectedModel]` +**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]`
@@ -889,7 +893,7 @@ client.seo_content(
-**response_format_type:** `typing.Optional[SeoContentRequestResponseFormatType]` +**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]`
@@ -905,7 +909,7 @@ client.seo_content(
-**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -1010,7 +1014,7 @@ client.web_search_llm(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1018,7 +1022,7 @@ client.web_search_llm(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1042,7 +1046,7 @@ client.web_search_llm(
-**selected_model:** `typing.Optional[WebSearchLlmRequestSelectedModel]` +**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]`
@@ -1082,7 +1086,7 @@ client.web_search_llm(
-**embedding_model:** `typing.Optional[WebSearchLlmRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]`
@@ -1143,7 +1147,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[WebSearchLlmRequestResponseFormatType]` +**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]`
@@ -1159,7 +1163,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -1255,7 +1259,7 @@ client.personalize_email(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1263,7 +1267,7 @@ client.personalize_email(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1279,7 +1283,7 @@ client.personalize_email(
-**selected_model:** `typing.Optional[PersonalizeEmailRequestSelectedModel]` +**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]`
@@ -1327,7 +1331,7 @@ client.personalize_email(
-**response_format_type:** `typing.Optional[PersonalizeEmailRequestResponseFormatType]` +**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]`
@@ -1394,7 +1398,7 @@ client.bulk_run(
-**documents:** `typing.List[str]` +**documents:** `typing.Sequence[str]` Upload or link to a CSV or google sheet that contains your sample input data. @@ -1408,7 +1412,7 @@ Remember to includes header names in your CSV too.
-**run_urls:** `typing.List[str]` +**run_urls:** `typing.Sequence[str]` Provide one or more Gooey.AI workflow runs. @@ -1453,7 +1457,7 @@ For each output field in the Gooey.AI workflow, specify the column name that you
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1461,7 +1465,7 @@ For each output field in the Gooey.AI workflow, specify the column name that you
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1469,7 +1473,7 @@ For each output field in the Gooey.AI workflow, specify the column name that you
-**eval_urls:** `typing.Optional[typing.List[str]]` +**eval_urls:** `typing.Optional[typing.Sequence[str]]` _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. @@ -1537,7 +1541,7 @@ client.synthesize_data(
-**documents:** `typing.List[str]` +**documents:** `typing.Sequence[str]`
@@ -1553,7 +1557,7 @@ client.synthesize_data(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1561,7 +1565,7 @@ client.synthesize_data(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1577,7 +1581,7 @@ client.synthesize_data(
-**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]` +**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]`
@@ -1612,7 +1616,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**selected_model:** `typing.Optional[SynthesizeDataRequestSelectedModel]` +**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]`
@@ -1660,7 +1664,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]` +**response_format_type:** `typing.Optional[DocExtractPageRequestResponseFormatType]`
@@ -1730,7 +1734,7 @@ client.llm()
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1738,7 +1742,7 @@ client.llm()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1754,7 +1758,7 @@ client.llm()
-**selected_models:** `typing.Optional[typing.List[LlmRequestSelectedModelsItem]]` +**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]`
@@ -1802,7 +1806,7 @@ client.llm()
-**response_format_type:** `typing.Optional[LlmRequestResponseFormatType]` +**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]`
@@ -1882,7 +1886,7 @@ client.rag(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1890,7 +1894,7 @@ client.rag(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -1898,7 +1902,7 @@ client.rag(
-**keyword_query:** `typing.Optional[RagRequestKeywordQuery]` +**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]`
@@ -1906,7 +1910,7 @@ client.rag(
-**documents:** `typing.Optional[typing.List[str]]` +**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -1946,7 +1950,7 @@ client.rag(
-**embedding_model:** `typing.Optional[RagRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]`
@@ -1983,7 +1987,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**selected_model:** `typing.Optional[RagRequestSelectedModel]` +**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]`
@@ -1991,7 +1995,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[RagRequestCitationStyle]` +**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]`
@@ -2039,7 +2043,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[RagRequestResponseFormatType]` +**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]`
@@ -2103,7 +2107,7 @@ client.doc_summary(
-**documents:** `typing.List[str]` +**documents:** `typing.Sequence[str]`
@@ -2119,7 +2123,7 @@ client.doc_summary(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2127,7 +2131,7 @@ client.doc_summary(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2151,7 +2155,7 @@ client.doc_summary(
-**selected_model:** `typing.Optional[DocSummaryRequestSelectedModel]` +**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]`
@@ -2167,7 +2171,7 @@ client.doc_summary(
-**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]` +**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]`
@@ -2223,7 +2227,7 @@ client.doc_summary(
-**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]` +**response_format_type:** `typing.Optional[DocSummaryPageRequestResponseFormatType]`
@@ -2303,7 +2307,7 @@ client.lipsync_tts(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2311,7 +2315,7 @@ client.lipsync_tts(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2319,7 +2323,7 @@ client.lipsync_tts(
-**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]` +**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]`
@@ -2447,7 +2451,7 @@ client.lipsync_tts(
-**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]` +**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]`
@@ -2455,7 +2459,7 @@ client.lipsync_tts(
-**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]` +**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]`
@@ -2511,7 +2515,7 @@ client.lipsync_tts(
-**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]` +**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]`
@@ -2591,7 +2595,7 @@ client.text_to_speech(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2599,7 +2603,7 @@ client.text_to_speech(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2607,7 +2611,7 @@ client.text_to_speech(
-**tts_provider:** `typing.Optional[TextToSpeechRequestTtsProvider]` +**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
@@ -2735,7 +2739,7 @@ client.text_to_speech(
-**openai_voice_name:** `typing.Optional[TextToSpeechRequestOpenaiVoiceName]` +**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]`
@@ -2743,7 +2747,7 @@ client.text_to_speech(
-**openai_tts_model:** `typing.Optional[TextToSpeechRequestOpenaiTtsModel]` +**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]`
@@ -2807,7 +2811,7 @@ client.speech_recognition(
-**documents:** `typing.List[str]` +**documents:** `typing.Sequence[str]`
@@ -2823,7 +2827,7 @@ client.speech_recognition(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2831,7 +2835,7 @@ client.speech_recognition(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2839,7 +2843,7 @@ client.speech_recognition(
-**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]` +**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]`
@@ -2855,7 +2859,7 @@ client.speech_recognition(
-**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]` +**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]`
@@ -2863,7 +2867,7 @@ client.speech_recognition(
-**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]` +**output_format:** `typing.Optional[AsrPageRequestOutputFormat]`
@@ -2978,7 +2982,7 @@ client.text_to_music(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2986,7 +2990,7 @@ client.text_to_music(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3050,7 +3054,7 @@ client.text_to_music(
-**selected_models:** `typing.Optional[typing.List[typing.Literal["audio_ldm"]]]` +**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
@@ -3120,7 +3124,7 @@ client.translate()
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3128,7 +3132,7 @@ client.translate()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3136,7 +3140,7 @@ client.translate()
-**texts:** `typing.Optional[typing.List[str]]` +**texts:** `typing.Optional[typing.Sequence[str]]`
@@ -3144,7 +3148,7 @@ client.translate()
-**selected_model:** `typing.Optional[TranslateRequestSelectedModel]` +**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]`
@@ -3251,7 +3255,7 @@ client.remix_image(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3259,7 +3263,7 @@ client.remix_image(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3275,7 +3279,7 @@ client.remix_image(
-**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]` +**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]`
@@ -3283,7 +3287,7 @@ client.remix_image(
-**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]` +**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]`
@@ -3347,7 +3351,7 @@ client.remix_image(
-**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` +**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
@@ -3443,7 +3447,7 @@ client.text_to_image(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3451,7 +3455,7 @@ client.text_to_image(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3539,7 +3543,7 @@ client.text_to_image(
-**selected_models:** `typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]]` +**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]`
@@ -3547,7 +3551,7 @@ client.text_to_image(
-**scheduler:** `typing.Optional[TextToImageRequestScheduler]` +**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]`
@@ -3652,7 +3656,7 @@ client.product_image(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3660,7 +3664,7 @@ client.product_image(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3700,7 +3704,7 @@ client.product_image(
-**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]` +**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]`
@@ -3812,7 +3816,7 @@ client = Gooey( ) client.portrait( input_image="input_image", - text_prompt="text_prompt", + text_prompt="tony stark from the iron man", ) ``` @@ -3853,7 +3857,7 @@ client.portrait(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3861,7 +3865,7 @@ client.portrait(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3893,7 +3897,7 @@ client.portrait(
-**selected_model:** `typing.Optional[PortraitRequestSelectedModel]` +**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]`
@@ -4004,7 +4008,8 @@ client = Gooey( api_key="YOUR_API_KEY", ) client.image_from_email( - text_prompt="text_prompt", + email_address="sean@dara.network", + text_prompt="winter's day in paris", ) ``` @@ -4037,7 +4042,7 @@ client.image_from_email(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4045,7 +4050,7 @@ client.image_from_email(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4093,7 +4098,7 @@ client.image_from_email(
-**selected_model:** `typing.Optional[ImageFromEmailRequestSelectedModel]` +**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
@@ -4310,7 +4315,7 @@ client.image_from_web_search(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4318,7 +4323,7 @@ client.image_from_web_search(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4334,7 +4339,7 @@ client.image_from_web_search(
-**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -4342,7 +4347,7 @@ client.image_from_web_search(
-**selected_model:** `typing.Optional[ImageFromWebSearchRequestSelectedModel]` +**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]`
@@ -4486,7 +4491,7 @@ client.remove_background(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4494,7 +4499,7 @@ client.remove_background(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4502,7 +4507,7 @@ client.remove_background(
-**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]` +**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]`
@@ -4630,7 +4635,7 @@ client.upscale(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4638,7 +4643,7 @@ client.upscale(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4662,7 +4667,7 @@ client.upscale(
-**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]` +**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]`
@@ -4734,7 +4739,7 @@ client.embed(
-**texts:** `typing.List[str]` +**texts:** `typing.Sequence[str]`
@@ -4750,7 +4755,7 @@ client.embed(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4758,7 +4763,7 @@ client.embed(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4766,7 +4771,7 @@ client.embed(
-**selected_model:** `typing.Optional[EmbedRequestSelectedModel]` +**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
@@ -4846,7 +4851,7 @@ client.seo_people_also_ask_doc(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4854,7 +4859,7 @@ client.seo_people_also_ask_doc(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4862,7 +4867,7 @@ client.seo_people_also_ask_doc(
-**keyword_query:** `typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery]` +**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]`
@@ -4870,7 +4875,7 @@ client.seo_people_also_ask_doc(
-**documents:** `typing.Optional[typing.List[str]]` +**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -4910,7 +4915,7 @@ client.seo_people_also_ask_doc(
-**embedding_model:** `typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
@@ -4947,7 +4952,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**selected_model:** `typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel]` +**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]`
@@ -4955,7 +4960,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle]` +**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]`
@@ -5003,7 +5008,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType]` +**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]`
@@ -5019,7 +5024,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -5109,7 +5114,7 @@ client.health_status_get()
-
client.post_v3chyron_plant_async() +
client.post_v3chyron_plant_async_form(...)
@@ -5127,7 +5132,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3chyron_plant_async() +client.post_v3chyron_plant_async_form( + midi_notes="midi_notes", +) ```
@@ -5143,49 +5150,51 @@ client.post_v3chyron_plant_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**midi_notes:** `str`
-
-
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
- -
client.post_v3compare_llm_async()
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**midi_notes_prompt:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3compare_llm_async() - -``` -
-
+**chyron_prompt:** `typing.Optional[str]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -5201,7 +5210,7 @@ client.post_v3compare_llm_async()
-
client.post_v3compare_text2img_async() +
client.post_v3compare_llm_async_form(...)
@@ -5219,7 +5228,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3compare_text2img_async() +client.post_v3compare_llm_async_form() ```
@@ -5235,49 +5244,91 @@ client.post_v3compare_text2img_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
+
+
+**input_prompt:** `typing.Optional[str]` +
-
-
client.post_v3deforum_sd_async()
-#### 🔌 Usage +**selected_models:** `typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]]` + +
+
+**avoid_repetition:** `typing.Optional[bool]` + +
+
+
-```python -from gooey import Gooey +**num_outputs:** `typing.Optional[int]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3deforum_sd_async() +
+
-``` +**quality:** `typing.Optional[float]` +
+ +
+
+ +**max_tokens:** `typing.Optional[int]` +
-#### ⚙️ Parameters +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType]` + +
+
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -5293,7 +5344,7 @@ client.post_v3deforum_sd_async()
-
client.post_v3email_face_inpainting_async() +
client.post_v3compare_text2img_async_form(...)
@@ -5311,7 +5362,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3email_face_inpainting_async() +client.post_v3compare_text2img_async_form( + text_prompt="text_prompt", +) ```
@@ -5327,95 +5380,149 @@ client.post_v3email_face_inpainting_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**text_prompt:** `str`
+ +
+
+ +**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-
client.post_v3face_inpainting_async()
-#### 🔌 Usage +**negative_prompt:** `typing.Optional[str]` + +
+
+**output_width:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**output_height:** `typing.Optional[int]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3face_inpainting_async() +
+
-``` +**num_outputs:** `typing.Optional[int]` +
+ +
+
+ +**quality:** `typing.Optional[int]` +
-#### ⚙️ Parameters -
+**dall_e3quality:** `typing.Optional[str]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**dall_e3style:** `typing.Optional[str]`
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` +
+
+
+**seed:** `typing.Optional[int]` +
-
-
client.post_v3google_image_gen_async()
-#### 🔌 Usage +**sd2upscaling:** `typing.Optional[bool]` + +
+
+**selected_models:** `typing.Optional[ + typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem] +]` + +
+
+
-```python -from gooey import Gooey +**scheduler:** `typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3google_image_gen_async() +
+
-``` +**edit_instruction:** `typing.Optional[str]` +
+ +
+
+ +**image_guidance_scale:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -5431,7 +5538,7 @@ client.post_v3google_image_gen_async()
-
client.post_v3image_segmentation_async() +
client.post_v3deforum_sd_async_form(...)
@@ -5444,12 +5551,19 @@ client.post_v3google_image_gen_async()
```python -from gooey import Gooey +from gooey import AnimationPrompt, Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3image_segmentation_async() +client.post_v3deforum_sd_async_form( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], +) ```
@@ -5465,95 +5579,123 @@ client.post_v3image_segmentation_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**animation_prompts:** `typing.List[AnimationPrompt]`
- -
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
-
-
client.post_v3img2img_async()
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**max_frames:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3img2img_async() - -``` -
-
+**selected_model:** `typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel]` + -#### ⚙️ Parameters -
+**animation_mode:** `typing.Optional[str]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**zoom:** `typing.Optional[str]`
+ +
+
+ +**translation_x:** `typing.Optional[str]` +
+
+
+**translation_y:** `typing.Optional[str]` +
-
-
client.post_v3letter_writer_async()
-#### 🔌 Usage +**rotation3d_x:** `typing.Optional[str]` + +
+
+**rotation3d_y:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**rotation3d_z:** `typing.Optional[str]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3letter_writer_async() +
+
-``` +**fps:** `typing.Optional[int]` +
+ +
+
+ +**seed:** `typing.Optional[int]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -5569,7 +5711,7 @@ client.post_v3letter_writer_async()
-
client.post_v3lipsync_async() +
client.post_v3email_face_inpainting_async_form(...)
@@ -5587,7 +5729,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3lipsync_async() +client.post_v3email_face_inpainting_async_form( + text_prompt="text_prompt", +) ```
@@ -5603,187 +5747,211 @@ client.post_v3lipsync_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**text_prompt:** `str`
- -
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
-
-
client.post_v3lipsync_tts_async()
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**email_address:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3lipsync_tts_async() - -``` -
-
+**twitter_handle:** `typing.Optional[str]` + -#### ⚙️ Parameters -
+**face_scale:** `typing.Optional[float]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**face_pos_x:** `typing.Optional[float]`
- - +
+
+**face_pos_y:** `typing.Optional[float]` +
-
-
client.post_v3object_inpainting_async()
-#### 🔌 Usage +**selected_model:** `typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel]` + +
+
+**negative_prompt:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3object_inpainting_async() - -``` -
-
+**num_outputs:** `typing.Optional[int]` + -#### ⚙️ Parameters -
+**quality:** `typing.Optional[int]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**upscale_factor:** `typing.Optional[float]`
- - +
+
+**output_width:** `typing.Optional[int]` +
-
-
client.post_v3seo_summary_async()
-#### 🔌 Usage +**output_height:** `typing.Optional[int]` + +
+
+**guidance_scale:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3seo_summary_async() - -``` -
-
+**should_send_email:** `typing.Optional[bool]` + -#### ⚙️ Parameters -
+**email_from:** `typing.Optional[str]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**email_cc:** `typing.Optional[str]`
- - +
+
+**email_bcc:** `typing.Optional[str]` +
-
-
client.post_v3smart_gpt_async()
-#### 🔌 Usage +**email_subject:** `typing.Optional[str]` + +
+
+**email_body:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**email_body_enable_html:** `typing.Optional[bool]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3smart_gpt_async() +
+
-``` +**fallback_email_body:** `typing.Optional[str]` +
+ +
+
+ +**seed:** `typing.Optional[int]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -5799,7 +5967,7 @@ client.post_v3smart_gpt_async()
-
client.post_v3social_lookup_email_async() +
client.post_v3face_inpainting_async_form(...)
@@ -5817,7 +5985,10 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3social_lookup_email_async() +client.post_v3face_inpainting_async_form( + input_image="input_image", + text_prompt="text_prompt", +) ```
@@ -5833,95 +6004,139 @@ client.post_v3social_lookup_email_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**input_image:** `str`
+ +
+
+ +**text_prompt:** `str` +
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
-
-
client.post_v3text_to_speech_async()
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**face_scale:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3text_to_speech_async() - -``` +**face_pos_x:** `typing.Optional[float]` +
+ +
+
+ +**face_pos_y:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
+**selected_model:** `typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**negative_prompt:** `typing.Optional[str]`
+ +
+
+ +**num_outputs:** `typing.Optional[int]` +
+
+
+**quality:** `typing.Optional[int]` +
-
-
client.post_v3art_qr_code_async()
-#### 🔌 Usage +**upscale_factor:** `typing.Optional[float]` + +
+
+**output_width:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**output_height:** `typing.Optional[int]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3art_qr_code_async() +
+
-``` +**guidance_scale:** `typing.Optional[float]` +
+ +
+
+ +**seed:** `typing.Optional[int]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -5937,7 +6152,7 @@ client.post_v3art_qr_code_async()
-
client.post_v3asr_async() +
client.post_v3google_image_gen_async_form(...)
@@ -5955,7 +6170,10 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3asr_async() +client.post_v3google_image_gen_async_form( + search_query="search_query", + text_prompt="text_prompt", +) ```
@@ -5971,95 +6189,131 @@ client.post_v3asr_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**search_query:** `str`
- -
+
+
+**text_prompt:** `str` +
-
-
client.post_v3bulk_eval_async()
-#### 🔌 Usage +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3bulk_eval_async() - -``` +**serp_search_location:** `typing.Optional[SerpSearchLocation]` +
+ +
+
+ +**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead +
-#### ⚙️ Parameters -
+**selected_model:** `typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**negative_prompt:** `typing.Optional[str]`
+ +
+
+ +**num_outputs:** `typing.Optional[int]` +
+
+
+**quality:** `typing.Optional[int]` +
-
-
client.post_v3bulk_runner_async()
-#### 🔌 Usage +**guidance_scale:** `typing.Optional[float]` + +
+
+**prompt_strength:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey +**sd2upscaling:** `typing.Optional[bool]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3bulk_runner_async() +
+
-``` +**seed:** `typing.Optional[int]` +
+ +
+
+ +**image_guidance_scale:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -6075,7 +6329,7 @@ client.post_v3bulk_runner_async()
-
client.post_v3compare_ai_upscalers_async() +
client.post_v3image_segmentation_async_form(...)
@@ -6093,7 +6347,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3compare_ai_upscalers_async() +client.post_v3image_segmentation_async_form( + input_image="input_image", +) ```
@@ -6109,95 +6365,91 @@ client.post_v3compare_ai_upscalers_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**input_image:** `str`
- -
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
-
-
client.post_v3doc_extract_async()
-#### 🔌 Usage - -
-
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3doc_extract_async() - -``` -
-
+**selected_model:** `typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel]` +
-#### ⚙️ Parameters -
+**mask_threshold:** `typing.Optional[float]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**rect_persepective_transform:** `typing.Optional[bool]`
- - +
+
+**reflection_opacity:** `typing.Optional[float]` +
-
-
client.post_v3doc_search_async()
-#### 🔌 Usage +**obj_scale:** `typing.Optional[float]` + +
+
+**obj_pos_x:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3doc_search_async() - -``` -
-
+**obj_pos_y:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -6213,7 +6465,7 @@ client.post_v3doc_search_async()
-
client.post_v3doc_summary_async() +
client.post_v3img2img_async_form(...)
@@ -6231,7 +6483,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3doc_summary_async() +client.post_v3img2img_async_form( + input_image="input_image", +) ```
@@ -6247,95 +6501,139 @@ client.post_v3doc_summary_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**input_image:** `str`
- -
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
-
-
client.post_v3embeddings_async()
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**text_prompt:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**selected_model:** `typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3embeddings_async() +
+
-``` +**selected_controlnet_model:** `typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel]` +
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` +
-#### ⚙️ Parameters -
+**num_outputs:** `typing.Optional[int]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[int]`
+ +
+
+ +**output_width:** `typing.Optional[int]` +
+
+
+**output_height:** `typing.Optional[int]` +
-
-
client.post_v3functions_async()
-#### 🔌 Usage +**guidance_scale:** `typing.Optional[float]` + +
+
+**prompt_strength:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey +**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3functions_async() +
+
-``` +**seed:** `typing.Optional[int]` +
+ +
+
+ +**image_guidance_scale:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -6351,7 +6649,7 @@ client.post_v3functions_async()
-
client.post_v3google_gpt_async() +
client.post_v3letter_writer_async_form(...)
@@ -6369,7 +6667,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3google_gpt_async() +client.post_v3letter_writer_async_form( + action_id="action_id", +) ```
@@ -6385,95 +6685,139 @@ client.post_v3google_gpt_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**action_id:** `str`
- -
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
-
-
client.post_v3related_qna_maker_doc_async()
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**prompt_header:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**example_letters:** `typing.Optional[typing.List[TrainingDataModel]]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3related_qna_maker_doc_async() +
+
-``` +**lm_selected_api:** `typing.Optional[str]` +
+ +
+
+ +**lm_selected_engine:** `typing.Optional[str]` +
-#### ⚙️ Parameters -
+**num_outputs:** `typing.Optional[int]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[float]`
+ +
+
+ +**lm_sampling_temperature:** `typing.Optional[float]` +
+
+
+**api_http_method:** `typing.Optional[str]` +
-
-
client.post_v3related_qna_maker_async()
-#### 🔌 Usage +**api_url:** `typing.Optional[str]` + +
+
+**api_headers:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**api_json_body:** `typing.Optional[str]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3related_qna_maker_async() +
+
-``` +**input_prompt:** `typing.Optional[str]` +
+ +
+
+ +**strip_html2text:** `typing.Optional[bool]` +
-#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -6489,7 +6833,7 @@ client.post_v3related_qna_maker_async()
-
client.post_v3text2audio_async() +
client.post_v3lipsync_async_form(...)
@@ -6507,7 +6851,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.post_v3text2audio_async() +client.post_v3lipsync_async_form() ```
@@ -6523,95 +6867,91 @@ client.post_v3text2audio_async()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
- -
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-
client.post_v3translate_async()
-#### 🔌 Usage - -
-
+**input_face:** `typing.Optional[str]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3translate_async() - -``` -
-
+**face_padding_top:** `typing.Optional[int]` +
-#### ⚙️ Parameters -
+**face_padding_bottom:** `typing.Optional[int]` + +
+
+
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**face_padding_left:** `typing.Optional[int]`
- - +
+
+**face_padding_right:** `typing.Optional[int]` +
-
-
client.post_v3video_bots_async()
-#### 🔌 Usage +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` + +
+
+**selected_model:** `typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.post_v3video_bots_async() - -``` -
-
+**input_audio:** `typing.Optional[str]` + -#### ⚙️ Parameters -
+**settings:** `typing.Optional[RunSettings]` + +
+
+
@@ -6627,8 +6967,7 @@ client.post_v3video_bots_async()
-## CopilotIntegrations -
client.copilot_integrations.video_bots_stream_create(...) +
client.post_v3lipsync_tts_async_form(...)
@@ -6646,8 +6985,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", +client.post_v3lipsync_tts_async_form( + text_prompt="text_prompt", ) ``` @@ -6664,7 +7003,7 @@ client.copilot_integrations.video_bots_stream_create(
-**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab +**text_prompt:** `str`
@@ -6672,13 +7011,15 @@ client.copilot_integrations.video_bots_stream_create(
-**conversation_id:** `typing.Optional[str]` +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
-The gooey conversation ID. - -If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. +
+
-Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -6686,11 +7027,7 @@ Note that you may not provide a custom ID here, and must only use the `conversat
-**user_id:** `typing.Optional[str]` - -Your app's custom user ID. - -If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. +**tts_provider:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider]`
@@ -6698,11 +7035,7 @@ If not provided, a random user will be created and a new ID will be returned in
-**user_message_id:** `typing.Optional[str]` - -Your app's custom message ID for the user message. - -If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. +**uberduck_voice_name:** `typing.Optional[str]`
@@ -6710,7 +7043,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user. +**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -6718,7 +7051,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**google_voice_name:** `typing.Optional[str]`
@@ -6726,7 +7059,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**google_speaking_rate:** `typing.Optional[float]`
@@ -6734,7 +7067,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_prompt:** `typing.Optional[str]` +**google_pitch:** `typing.Optional[float]`
@@ -6742,7 +7075,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_audio:** `typing.Optional[str]` +**bark_history_prompt:** `typing.Optional[str]`
@@ -6750,7 +7083,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_images:** `typing.Optional[typing.List[str]]` +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -6758,7 +7091,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_documents:** `typing.Optional[typing.List[str]]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -6766,7 +7099,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. +**elevenlabs_voice_id:** `typing.Optional[str]`
@@ -6774,7 +7107,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**messages:** `typing.Optional[typing.List[ConversationEntry]]` +**elevenlabs_model:** `typing.Optional[str]`
@@ -6782,7 +7115,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**bot_script:** `typing.Optional[str]` +**elevenlabs_stability:** `typing.Optional[float]`
@@ -6790,7 +7123,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**selected_model:** `typing.Optional[VideoBotsStreamCreateRequestSelectedModel]` +**elevenlabs_similarity_boost:** `typing.Optional[float]`
@@ -6798,7 +7131,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) +**elevenlabs_style:** `typing.Optional[float]`
@@ -6806,7 +7139,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**task_instructions:** `typing.Optional[str]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -6814,7 +7147,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**query_instructions:** `typing.Optional[str]` +**azure_voice_name:** `typing.Optional[str]`
@@ -6822,7 +7155,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**keyword_instructions:** `typing.Optional[str]` +**openai_voice_name:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName]`
@@ -6830,7 +7163,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**documents:** `typing.Optional[typing.List[str]]` +**openai_tts_model:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel]`
@@ -6838,7 +7171,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**max_references:** `typing.Optional[int]` +**input_face:** `typing.Optional[str]`
@@ -6846,7 +7179,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**max_context_words:** `typing.Optional[int]` +**face_padding_top:** `typing.Optional[int]`
@@ -6854,7 +7187,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**scroll_jump:** `typing.Optional[int]` +**face_padding_bottom:** `typing.Optional[int]`
@@ -6862,7 +7195,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**embedding_model:** `typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel]` +**face_padding_left:** `typing.Optional[int]`
@@ -6870,12 +7203,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - +**face_padding_right:** `typing.Optional[int]`
@@ -6883,7 +7211,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[VideoBotsStreamCreateRequestCitationStyle]` +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
@@ -6891,7 +7219,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**use_url_shortener:** `typing.Optional[bool]` +**selected_model:** `typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel]`
@@ -6899,7 +7227,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_model:** `typing.Optional[VideoBotsStreamCreateRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. +**settings:** `typing.Optional[RunSettings]`
@@ -6907,47 +7235,56 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**translation_model:** `typing.Optional[VideoBotsStreamCreateRequestTranslationModel]` -
+
+
client.post_v3object_inpainting_async_form(...)
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - -
-
+#### 🔌 Usage
-**input_glossary_document:** `typing.Optional[str]` +
+
+ +```python +from gooey import Gooey +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3object_inpainting_async_form( + input_image="input_image", + text_prompt="text_prompt", +) -Translation Glossary for User Langauge -> LLM Language (English) - - +``` +
+
+#### ⚙️ Parameters +
-**output_glossary_document:** `typing.Optional[str]` - +
+
-Translation Glossary for LLM Language (English) -> User Langauge - +**input_image:** `str`
@@ -6955,7 +7292,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**lipsync_model:** `typing.Optional[VideoBotsStreamCreateRequestLipsyncModel]` +**text_prompt:** `str`
@@ -6963,7 +7300,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -6971,7 +7308,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**avoid_repetition:** `typing.Optional[bool]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -6979,7 +7316,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**num_outputs:** `typing.Optional[int]` +**obj_scale:** `typing.Optional[float]`
@@ -6987,7 +7324,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**quality:** `typing.Optional[float]` +**obj_pos_x:** `typing.Optional[float]`
@@ -6995,7 +7332,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**max_tokens:** `typing.Optional[int]` +**obj_pos_y:** `typing.Optional[float]`
@@ -7003,7 +7340,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**sampling_temperature:** `typing.Optional[float]` +**mask_threshold:** `typing.Optional[float]`
@@ -7011,7 +7348,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**response_format_type:** `typing.Optional[VideoBotsStreamCreateRequestResponseFormatType]` +**selected_model:** `typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel]`
@@ -7019,7 +7356,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**tts_provider:** `typing.Optional[VideoBotsStreamCreateRequestTtsProvider]` +**negative_prompt:** `typing.Optional[str]`
@@ -7027,7 +7364,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**uberduck_voice_name:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -7035,7 +7372,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**uberduck_speaking_rate:** `typing.Optional[float]` +**quality:** `typing.Optional[int]`
@@ -7043,7 +7380,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_voice_name:** `typing.Optional[str]` +**output_width:** `typing.Optional[int]`
@@ -7051,7 +7388,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_speaking_rate:** `typing.Optional[float]` +**output_height:** `typing.Optional[int]`
@@ -7059,7 +7396,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_pitch:** `typing.Optional[float]` +**guidance_scale:** `typing.Optional[float]`
@@ -7067,7 +7404,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**bark_history_prompt:** `typing.Optional[str]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -7075,7 +7412,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**seed:** `typing.Optional[int]`
@@ -7083,7 +7420,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_api_key:** `typing.Optional[str]` +**settings:** `typing.Optional[RunSettings]`
@@ -7091,23 +7428,58 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_voice_id:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + + + +
+
client.post_v3seo_summary_async_form(...)
-**elevenlabs_model:** `typing.Optional[str]` - +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3seo_summary_async_form( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", +) + +``` +
+
+#### ⚙️ Parameters +
-**elevenlabs_stability:** `typing.Optional[float]` +
+
+ +**search_query:** `str`
@@ -7115,7 +7487,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_similarity_boost:** `typing.Optional[float]` +**keywords:** `str`
@@ -7123,7 +7495,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_style:** `typing.Optional[float]` +**title:** `str`
@@ -7131,7 +7503,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` +**company_url:** `str`
@@ -7139,7 +7511,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**azure_voice_name:** `typing.Optional[str]` +**task_instructions:** `typing.Optional[str]`
@@ -7147,7 +7519,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_voice_name:** `typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName]` +**enable_html:** `typing.Optional[bool]`
@@ -7155,7 +7527,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_tts_model:** `typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel]` +**selected_model:** `typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel]`
@@ -7163,7 +7535,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**input_face:** `typing.Optional[str]` +**max_search_urls:** `typing.Optional[int]`
@@ -7171,7 +7543,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_top:** `typing.Optional[int]` +**enable_crosslinks:** `typing.Optional[bool]`
@@ -7179,7 +7551,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_bottom:** `typing.Optional[int]` +**seed:** `typing.Optional[int]`
@@ -7187,7 +7559,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_left:** `typing.Optional[int]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -7195,7 +7567,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_right:** `typing.Optional[int]` +**num_outputs:** `typing.Optional[int]`
@@ -7203,7 +7575,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +**quality:** `typing.Optional[float]`
@@ -7211,7 +7583,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**input_text:** `typing.Optional[str]` — Use `input_prompt` instead +**max_tokens:** `typing.Optional[int]`
@@ -7219,55 +7591,55 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**sampling_temperature:** `typing.Optional[float]`
-
-
+
+
+**response_format_type:** `typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType]` +
-
-
client.copilot_integrations.video_bots_stream(...)
-#### 🔌 Usage +**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream( - request_id="request_id", -) - -``` -
-
+**serp_search_type:** `typing.Optional[SerpSearchType]` + -#### ⚙️ Parameters -
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead + +
+
+
-**request_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -7287,8 +7659,7 @@ client.copilot_integrations.video_bots_stream(
-## CopilotForYourEnterprise -
client.copilot_for_your_enterprise.async_form_video_bots(...) +
client.post_v3smart_gpt_async_form(...)
@@ -7306,7 +7677,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.copilot_for_your_enterprise.async_form_video_bots() +client.post_v3smart_gpt_async_form( + input_prompt="input_prompt", +) ```
@@ -7322,7 +7695,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**example_id:** `typing.Optional[str]` +**input_prompt:** `str`
@@ -7338,7 +7711,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -7346,7 +7719,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**input_prompt:** `typing.Optional[str]` +**cot_prompt:** `typing.Optional[str]`
@@ -7354,7 +7727,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**input_audio:** `typing.Optional[str]` +**reflexion_prompt:** `typing.Optional[str]`
@@ -7362,7 +7735,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**input_images:** `typing.Optional[typing.List[str]]` +**dera_prompt:** `typing.Optional[str]`
@@ -7370,7 +7743,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**input_documents:** `typing.Optional[typing.List[str]]` +**selected_model:** `typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel]`
@@ -7378,7 +7751,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. +**avoid_repetition:** `typing.Optional[bool]`
@@ -7386,7 +7759,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**messages:** `typing.Optional[typing.List[ConversationEntry]]` +**num_outputs:** `typing.Optional[int]`
@@ -7394,7 +7767,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**bot_script:** `typing.Optional[str]` +**quality:** `typing.Optional[float]`
@@ -7402,15 +7775,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**selected_model:** `typing.Optional[AsyncFormVideoBotsRequestSelectedModel]` - -
-
- -
-
- -**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) +**max_tokens:** `typing.Optional[int]`
@@ -7418,7 +7783,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**task_instructions:** `typing.Optional[str]` +**sampling_temperature:** `typing.Optional[float]`
@@ -7426,7 +7791,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**query_instructions:** `typing.Optional[str]` +**response_format_type:** `typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType]`
@@ -7434,7 +7799,7 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**keyword_instructions:** `typing.Optional[str]` +**settings:** `typing.Optional[RunSettings]`
@@ -7442,68 +7807,55 @@ client.copilot_for_your_enterprise.async_form_video_bots()
-**documents:** `typing.Optional[typing.List[str]]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**max_references:** `typing.Optional[int]` -
-
-
-**max_context_words:** `typing.Optional[int]` -
+
+
client.post_v3social_lookup_email_async_form(...)
-**scroll_jump:** `typing.Optional[int]` - -
-
+#### 🔌 Usage
-**embedding_model:** `typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel]` - -
-
-
-**dense_weight:** `typing.Optional[float]` +```python +from gooey import Gooey +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3social_lookup_email_async_form( + email_address="email_address", +) -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - +``` +
+
+#### ⚙️ Parameters +
-**citation_style:** `typing.Optional[AsyncFormVideoBotsRequestCitationStyle]` - -
-
-
-**use_url_shortener:** `typing.Optional[bool]` +**email_address:** `str`
@@ -7511,7 +7863,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_model:** `typing.Optional[AsyncFormVideoBotsRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -7519,7 +7871,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -7527,7 +7879,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**translation_model:** `typing.Optional[AsyncFormVideoBotsRequestTranslationModel]` +**input_prompt:** `typing.Optional[str]`
@@ -7535,7 +7887,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. +**selected_model:** `typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel]`
@@ -7543,11 +7895,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**input_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for User Langauge -> LLM Language (English) - +**avoid_repetition:** `typing.Optional[bool]`
@@ -7555,11 +7903,7 @@ Translation Glossary for User Langauge -> LLM Language (English)
-**output_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for LLM Language (English) -> User Langauge - +**num_outputs:** `typing.Optional[int]`
@@ -7567,7 +7911,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**lipsync_model:** `typing.Optional[AsyncFormVideoBotsRequestLipsyncModel]` +**quality:** `typing.Optional[float]`
@@ -7575,7 +7919,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). +**max_tokens:** `typing.Optional[int]`
@@ -7583,7 +7927,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**avoid_repetition:** `typing.Optional[bool]` +**sampling_temperature:** `typing.Optional[float]`
@@ -7591,7 +7935,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**num_outputs:** `typing.Optional[int]` +**response_format_type:** `typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType]`
@@ -7599,7 +7943,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**quality:** `typing.Optional[float]` +**settings:** `typing.Optional[RunSettings]`
@@ -7607,55 +7951,55 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**max_tokens:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**sampling_temperature:** `typing.Optional[float]` -
+
+
client.post_v3text_to_speech_async_form(...)
-**response_format_type:** `typing.Optional[AsyncFormVideoBotsRequestResponseFormatType]` - -
-
+#### 🔌 Usage
-**tts_provider:** `typing.Optional[AsyncFormVideoBotsRequestTtsProvider]` - -
-
-
-**uberduck_voice_name:** `typing.Optional[str]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3text_to_speech_async_form( + text_prompt="text_prompt", +) + +```
+ + + +#### ⚙️ Parameters
-**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
-
-**google_voice_name:** `typing.Optional[str]` +**text_prompt:** `str`
@@ -7663,7 +8007,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_speaking_rate:** `typing.Optional[float]` +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -7671,7 +8015,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**google_pitch:** `typing.Optional[float]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -7679,7 +8023,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**bark_history_prompt:** `typing.Optional[str]` +**tts_provider:** `typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider]`
@@ -7687,7 +8031,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**uberduck_voice_name:** `typing.Optional[str]`
@@ -7695,7 +8039,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_api_key:** `typing.Optional[str]` +**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -7703,7 +8047,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_voice_id:** `typing.Optional[str]` +**google_voice_name:** `typing.Optional[str]`
@@ -7711,7 +8055,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_model:** `typing.Optional[str]` +**google_speaking_rate:** `typing.Optional[float]`
@@ -7719,7 +8063,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_stability:** `typing.Optional[float]` +**google_pitch:** `typing.Optional[float]`
@@ -7727,7 +8071,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_similarity_boost:** `typing.Optional[float]` +**bark_history_prompt:** `typing.Optional[str]`
@@ -7735,7 +8079,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_style:** `typing.Optional[float]` +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -7743,7 +8087,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -7751,7 +8095,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**azure_voice_name:** `typing.Optional[str]` +**elevenlabs_voice_id:** `typing.Optional[str]`
@@ -7759,7 +8103,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_voice_name:** `typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName]` +**elevenlabs_model:** `typing.Optional[str]`
@@ -7767,7 +8111,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**openai_tts_model:** `typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel]` +**elevenlabs_stability:** `typing.Optional[float]`
@@ -7775,7 +8119,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**input_face:** `typing.Optional[str]` +**elevenlabs_similarity_boost:** `typing.Optional[float]`
@@ -7783,7 +8127,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_top:** `typing.Optional[int]` +**elevenlabs_style:** `typing.Optional[float]`
@@ -7791,7 +8135,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_bottom:** `typing.Optional[int]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -7799,7 +8143,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_left:** `typing.Optional[int]` +**azure_voice_name:** `typing.Optional[str]`
@@ -7807,7 +8151,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_right:** `typing.Optional[int]` +**openai_voice_name:** `typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName]`
@@ -7815,7 +8159,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +**openai_tts_model:** `typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel]`
@@ -7843,7 +8187,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-
client.copilot_for_your_enterprise.status_video_bots(...) +
client.post_v3art_qr_code_async_form(...)
@@ -7861,8 +8205,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.copilot_for_your_enterprise.status_video_bots( - run_id="run_id", +client.post_v3art_qr_code_async_form( + text_prompt="text_prompt", ) ``` @@ -7879,7 +8223,7 @@ client.copilot_for_your_enterprise.status_video_bots(
-**run_id:** `str` +**text_prompt:** `str`
@@ -7887,56 +8231,55 @@ client.copilot_for_your_enterprise.status_video_bots(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
-
-
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-## AiAnimationGenerator -
client.ai_animation_generator.status_deforum_sd(...)
-#### 🔌 Usage +**qr_code_data:** `typing.Optional[str]` + +
+
+**qr_code_input_image:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.ai_animation_generator.status_deforum_sd( - run_id="run_id", -) - -``` -
-
+**qr_code_vcard:** `typing.Optional[Vcard]` + -#### ⚙️ Parameters -
+**qr_code_file:** `typing.Optional[str]` + +
+
+
-**run_id:** `str` +**use_url_shortener:** `typing.Optional[bool]`
@@ -7944,56 +8287,57 @@ client.ai_animation_generator.status_deforum_sd(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**negative_prompt:** `typing.Optional[str]`
- - +
+
+**image_prompt:** `typing.Optional[str]` +
-
-## AiArtQrCode -
client.ai_art_qr_code.status_art_qr_code(...)
-#### 🔌 Usage +**image_prompt_controlnet_models:** `typing.Optional[ + typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem] +]` + +
+
+**image_prompt_strength:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.ai_art_qr_code.status_art_qr_code( - run_id="run_id", -) - -``` -
-
+**image_prompt_scale:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**image_prompt_pos_x:** `typing.Optional[float]` + +
+
+
-**run_id:** `str` +**image_prompt_pos_y:** `typing.Optional[float]`
@@ -8001,56 +8345,57 @@ client.ai_art_qr_code.status_art_qr_code(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**selected_model:** `typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel]`
- - +
+
+**selected_controlnet_model:** `typing.Optional[ + typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem] +]` +
-
-## GeneratePeopleAlsoAskSeoContent -
client.generate_people_also_ask_seo_content.status_related_qna_maker(...)
-#### 🔌 Usage +**output_width:** `typing.Optional[int]` + +
+
+**output_height:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.generate_people_also_ask_seo_content.status_related_qna_maker( - run_id="run_id", -) - -``` -
-
+**guidance_scale:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` + +
+
+
-**run_id:** `str` +**num_outputs:** `typing.Optional[int]`
@@ -8058,56 +8403,55 @@ client.generate_people_also_ask_seo_content.status_related_qna_maker(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[int]`
- - +
+
+**scheduler:** `typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler]` +
-
-## CreateAPerfectSeoOptimizedTitleParagraph -
client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(...)
-#### 🔌 Usage +**seed:** `typing.Optional[int]` + +
+
+**obj_scale:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary( - run_id="run_id", -) - -``` -
-
+**obj_pos_x:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**obj_pos_y:** `typing.Optional[float]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -8127,8 +8471,7 @@ client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary(
-## WebSearchGpt3 -
client.web_search_gpt3.status_google_gpt(...) +
client.post_v3asr_async_form(...)
@@ -8146,8 +8489,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.web_search_gpt3.status_google_gpt( - run_id="run_id", +client.post_v3asr_async_form( + documents=["documents"], ) ``` @@ -8164,7 +8507,7 @@ client.web_search_gpt3.status_google_gpt(
-**run_id:** `str` +**documents:** `typing.List[str]`
@@ -8172,56 +8515,39 @@ client.web_search_gpt3.status_google_gpt(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
-
-
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-## ProfileLookupGpt3ForAiPersonalizedEmails -
client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(...)
-#### 🔌 Usage - -
-
+**selected_model:** `typing.Optional[PostV3AsrAsyncFormRequestSelectedModel]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email( - run_id="run_id", -) - -``` -
-
+**language:** `typing.Optional[str]` +
-#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**translation_model:** `typing.Optional[PostV3AsrAsyncFormRequestTranslationModel]`
@@ -8229,56 +8555,50 @@ client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**output_format:** `typing.Optional[PostV3AsrAsyncFormRequestOutputFormat]`
-
-
+
+
+**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. +
-
-## BulkRunner -
client.bulk_runner.status_bulk_runner(...)
-#### 🔌 Usage +**translation_source:** `typing.Optional[str]` + +
+
+**translation_target:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.bulk_runner.status_bulk_runner( - run_id="run_id", -) +**glossary_document:** `typing.Optional[str]` -``` -
-
+Provide a glossary to customize translation and improve accuracy of domain-specific terms. +If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + -#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -8298,8 +8618,7 @@ client.bulk_runner.status_bulk_runner(
-## Evaluator -
client.evaluator.async_form_bulk_eval(...) +
client.post_v3bulk_eval_async_form(...)
@@ -8317,7 +8636,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.evaluator.async_form_bulk_eval( +client.post_v3bulk_eval_async_form( documents=["documents"], ) @@ -8343,14 +8662,6 @@ For example, for Copilot, this would sample questions or for Art QR Code, would Remember to includes header names in your CSV too. -
-
- -
-
- -**example_id:** `typing.Optional[str]` -
@@ -8365,7 +8676,7 @@ Remember to includes header names in your CSV too.
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -8398,7 +8709,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**selected_model:** `typing.Optional[AsyncFormBulkEvalRequestSelectedModel]` +**selected_model:** `typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel]`
@@ -8446,7 +8757,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**response_format_type:** `typing.Optional[AsyncFormBulkEvalRequestResponseFormatType]` +**response_format_type:** `typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType]`
@@ -8474,7 +8785,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
client.evaluator.status_bulk_eval(...) +
client.post_v3bulk_runner_async_form(...)
@@ -8492,8 +8803,11 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.evaluator.status_bulk_eval( - run_id="run_id", +client.post_v3bulk_runner_async_form( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, ) ``` @@ -8510,7 +8824,13 @@ client.evaluator.status_bulk_eval(
-**run_id:** `str` +**documents:** `typing.List[str]` + + +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Remember to includes header names in your CSV too. +
@@ -8518,56 +8838,72 @@ client.evaluator.status_bulk_eval(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**run_urls:** `typing.List[str]` + + +Provide one or more Gooey.AI workflow runs. +You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. +
+ +
+
+ +**input_columns:** `typing.Dict[str, str]` + + +For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + +
+
+
+ +**output_columns:** `typing.Dict[str, str]` + +For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + +
-
-## SyntheticDataMakerForVideosPdFs -
client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(...)
-#### 🔌 Usage +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+
-```python -from gooey import Gooey +**eval_urls:** `typing.Optional[typing.List[str]]` -client = Gooey( - api_key="YOUR_API_KEY", -) -client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract( - run_id="run_id", -) -``` -
-
+_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + + -#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -8587,8 +8923,7 @@ client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract(
-## LargeLanguageModelsGpt3 -
client.large_language_models_gpt3.status_compare_llm(...) +
client.post_v3compare_ai_upscalers_async_form(...)
@@ -8606,8 +8941,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.large_language_models_gpt3.status_compare_llm( - run_id="run_id", +client.post_v3compare_ai_upscalers_async_form( + scale=1, ) ``` @@ -8624,7 +8959,7 @@ client.large_language_models_gpt3.status_compare_llm(
-**run_id:** `str` +**scale:** `int` — The final upsampling scale of the image
@@ -8632,56 +8967,57 @@ client.large_language_models_gpt3.status_compare_llm(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
-
-
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-## SearchYourDocsWithGpt -
client.search_your_docs_with_gpt.status_doc_search(...)
-#### 🔌 Usage +**input_image:** `typing.Optional[str]` — Input Image + +
+
+**input_video:** `typing.Optional[str]` — Input Video + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.search_your_docs_with_gpt.status_doc_search( - run_id="run_id", -) - -``` -
-
+**selected_models:** `typing.Optional[ + typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem] +]` + -#### ⚙️ Parameters -
+**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -8701,8 +9037,7 @@ client.search_your_docs_with_gpt.status_doc_search(
-## SmartGpt -
client.smart_gpt.async_form_smart_gpt(...) +
client.post_v3doc_extract_async_form(...)
@@ -8720,8 +9055,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.smart_gpt.async_form_smart_gpt( - input_prompt="input_prompt", +client.post_v3doc_extract_async_form( + documents=["documents"], ) ``` @@ -8738,7 +9073,7 @@ client.smart_gpt.async_form_smart_gpt(
-**input_prompt:** `str` +**documents:** `typing.List[str]`
@@ -8746,7 +9081,7 @@ client.smart_gpt.async_form_smart_gpt(
-**example_id:** `typing.Optional[str]` +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
@@ -8754,7 +9089,7 @@ client.smart_gpt.async_form_smart_gpt(
-**functions:** `typing.Optional[typing.List[RecipeFunction]]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -8762,7 +9097,7 @@ client.smart_gpt.async_form_smart_gpt(
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**sheet_url:** `typing.Optional[str]`
@@ -8770,7 +9105,7 @@ client.smart_gpt.async_form_smart_gpt(
-**cot_prompt:** `typing.Optional[str]` +**selected_asr_model:** `typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel]`
@@ -8778,7 +9113,7 @@ client.smart_gpt.async_form_smart_gpt(
-**reflexion_prompt:** `typing.Optional[str]` +**google_translate_target:** `typing.Optional[str]`
@@ -8786,7 +9121,18 @@ client.smart_gpt.async_form_smart_gpt(
-**dera_prompt:** `typing.Optional[str]` +**glossary_document:** `typing.Optional[str]` + +Provide a glossary to customize translation and improve accuracy of domain-specific terms. +If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]`
@@ -8794,7 +9140,7 @@ client.smart_gpt.async_form_smart_gpt(
-**selected_model:** `typing.Optional[AsyncFormSmartGptRequestSelectedModel]` +**selected_model:** `typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel]`
@@ -8842,7 +9188,7 @@ client.smart_gpt.async_form_smart_gpt(
-**response_format_type:** `typing.Optional[AsyncFormSmartGptRequestResponseFormatType]` +**response_format_type:** `typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType]`
@@ -8870,7 +9216,7 @@ client.smart_gpt.async_form_smart_gpt(
-
client.smart_gpt.status_smart_gpt(...) +
client.post_v3doc_search_async_form(...)
@@ -8888,8 +9234,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.smart_gpt.status_smart_gpt( - run_id="run_id", +client.post_v3doc_search_async_form( + search_query="search_query", ) ``` @@ -8906,7 +9252,7 @@ client.smart_gpt.status_smart_gpt(
-**run_id:** `str` +**search_query:** `str`
@@ -8914,56 +9260,39 @@ client.smart_gpt.status_smart_gpt(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
-
-
+
+
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
-
-## SummarizeYourDocsWithGpt -
client.summarize_your_docs_with_gpt.status_doc_summary(...)
-#### 🔌 Usage - -
-
+**keyword_query:** `typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.summarize_your_docs_with_gpt.status_doc_summary( - run_id="run_id", -) - -``` -
-
+**documents:** `typing.Optional[typing.List[str]]` +
-#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**max_references:** `typing.Optional[int]`
@@ -8971,54 +9300,52 @@ client.summarize_your_docs_with_gpt.status_doc_summary(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**max_context_words:** `typing.Optional[int]`
-
-
+
+
+**scroll_jump:** `typing.Optional[int]` +
-
-## Functions -
client.functions.async_form_functions(...)
-#### 🔌 Usage +**doc_extract_url:** `typing.Optional[str]` + +
+
+**embedding_model:** `typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel]` + +
+
+
-```python -from gooey import Gooey +**dense_weight:** `typing.Optional[float]` -client = Gooey( - api_key="YOUR_API_KEY", -) -client.functions.async_form_functions() -``` -
-
+Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + -#### ⚙️ Parameters -
-
-
- -**example_id:** `typing.Optional[str]` +**task_instructions:** `typing.Optional[str]`
@@ -9026,7 +9353,7 @@ client.functions.async_form_functions()
-**code:** `typing.Optional[str]` — The JS code to be executed. +**query_instructions:** `typing.Optional[str]`
@@ -9034,7 +9361,7 @@ client.functions.async_form_functions()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used in the code +**selected_model:** `typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel]`
@@ -9042,7 +9369,7 @@ client.functions.async_form_functions()
-**settings:** `typing.Optional[RunSettings]` +**citation_style:** `typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle]`
@@ -9050,55 +9377,55 @@ client.functions.async_form_functions()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**avoid_repetition:** `typing.Optional[bool]`
-
-
+
+
+**num_outputs:** `typing.Optional[int]` +
-
-
client.functions.status_functions(...)
-#### 🔌 Usage +**quality:** `typing.Optional[float]` + +
+
+**max_tokens:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.functions.status_functions( - run_id="run_id", -) - -``` -
-
+**sampling_temperature:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**response_format_type:** `typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -9118,8 +9445,7 @@ client.functions.status_functions(
-## LipSyncing -
client.lip_syncing.async_form_lipsync(...) +
client.post_v3doc_summary_async_form(...)
@@ -9137,7 +9463,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.lip_syncing.async_form_lipsync() +client.post_v3doc_summary_async_form( + documents=["documents"], +) ```
@@ -9153,7 +9481,7 @@ client.lip_syncing.async_form_lipsync()
-**example_id:** `typing.Optional[str]` +**documents:** `typing.List[str]`
@@ -9169,7 +9497,7 @@ client.lip_syncing.async_form_lipsync()
-**variables:** `typing.Optional[typing.Dict[str, typing.Any]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -9177,7 +9505,7 @@ client.lip_syncing.async_form_lipsync()
-**input_face:** `typing.Optional[str]` +**task_instructions:** `typing.Optional[str]`
@@ -9185,7 +9513,7 @@ client.lip_syncing.async_form_lipsync()
-**face_padding_top:** `typing.Optional[int]` +**merge_instructions:** `typing.Optional[str]`
@@ -9193,7 +9521,7 @@ client.lip_syncing.async_form_lipsync()
-**face_padding_bottom:** `typing.Optional[int]` +**selected_model:** `typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel]`
@@ -9201,7 +9529,7 @@ client.lip_syncing.async_form_lipsync()
-**face_padding_left:** `typing.Optional[int]` +**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
@@ -9209,7 +9537,7 @@ client.lip_syncing.async_form_lipsync()
-**face_padding_right:** `typing.Optional[int]` +**selected_asr_model:** `typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel]`
@@ -9217,7 +9545,7 @@ client.lip_syncing.async_form_lipsync()
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +**google_translate_target:** `typing.Optional[str]`
@@ -9225,7 +9553,7 @@ client.lip_syncing.async_form_lipsync()
-**selected_model:** `typing.Optional[AsyncFormLipsyncRequestSelectedModel]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -9233,7 +9561,7 @@ client.lip_syncing.async_form_lipsync()
-**input_audio:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -9241,7 +9569,7 @@ client.lip_syncing.async_form_lipsync()
-**settings:** `typing.Optional[RunSettings]` +**quality:** `typing.Optional[float]`
@@ -9249,55 +9577,31 @@ client.lip_syncing.async_form_lipsync()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**max_tokens:** `typing.Optional[int]`
- -
- - - - -
-
client.lip_syncing.status_lipsync(...)
-#### 🔌 Usage - -
-
+**sampling_temperature:** `typing.Optional[float]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.lip_syncing.status_lipsync( - run_id="run_id", -) - -``` -
-
+**response_format_type:** `typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType]` +
-#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -9317,8 +9621,7 @@ client.lip_syncing.status_lipsync(
-## LipsyncVideoWithAnyText -
client.lipsync_video_with_any_text.status_lipsync_tts(...) +
client.post_v3embeddings_async_form(...)
@@ -9336,8 +9639,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.lipsync_video_with_any_text.status_lipsync_tts( - run_id="run_id", +client.post_v3embeddings_async_form( + texts=["texts"], ) ``` @@ -9354,7 +9657,7 @@ client.lipsync_video_with_any_text.status_lipsync_tts(
-**run_id:** `str` +**texts:** `typing.List[str]`
@@ -9362,56 +9665,31 @@ client.lipsync_video_with_any_text.status_lipsync_tts(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]`
-
-
- - - -
- -## CompareAiVoiceGenerators -
client.compare_ai_voice_generators.status_text_to_speech(...)
-#### 🔌 Usage - -
-
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.compare_ai_voice_generators.status_text_to_speech( - run_id="run_id", -) - -``` -
-
+**selected_model:** `typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel]` +
-#### ⚙️ Parameters - -
-
-
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -9431,8 +9709,7 @@ client.compare_ai_voice_generators.status_text_to_speech(
-## SpeechRecognitionTranslation -
client.speech_recognition_translation.status_asr(...) +
client.post_v3functions_async_form(...)
@@ -9450,9 +9727,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.speech_recognition_translation.status_asr( - run_id="run_id", -) +client.post_v3functions_async_form() ```
@@ -9468,7 +9743,23 @@ client.speech_recognition_translation.status_asr(
-**run_id:** `str` +**code:** `typing.Optional[str]` — The JS code to be executed. + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used in the code + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]`
@@ -9488,8 +9779,7 @@ client.speech_recognition_translation.status_asr(
-## TextGuidedAudioGenerator -
client.text_guided_audio_generator.status_text2audio(...) +
client.post_v3google_gpt_async_form(...)
@@ -9507,8 +9797,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.text_guided_audio_generator.status_text2audio( - run_id="run_id", +client.post_v3google_gpt_async_form( + search_query="search_query", + site_filter="site_filter", ) ``` @@ -9525,7 +9816,7 @@ client.text_guided_audio_generator.status_text2audio(
-**run_id:** `str` +**search_query:** `str`
@@ -9533,56 +9824,79 @@ client.text_guided_audio_generator.status_text2audio(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**site_filter:** `str`
-
-
+
+
+**functions:** `typing.Optional[typing.List[RecipeFunction]]` +
-
-## CompareAiTranslations -
client.compare_ai_translations.status_translate(...)
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
-
+**task_instructions:** `typing.Optional[str]` + +
+
+ +
-```python -from gooey import Gooey +**query_instructions:** `typing.Optional[str]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.compare_ai_translations.status_translate( - run_id="run_id", -) +
+
-``` +**selected_model:** `typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel]` +
+ +
+
+ +**max_search_urls:** `typing.Optional[int]` +
-#### ⚙️ Parameters +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+**max_context_words:** `typing.Optional[int]` + +
+
+
-**run_id:** `str` +**scroll_jump:** `typing.Optional[int]`
@@ -9590,56 +9904,108 @@ client.compare_ai_translations.status_translate(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**embedding_model:** `typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel]`
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + +
+
+
+**avoid_repetition:** `typing.Optional[bool]` +
-
-## EditAnImageWithAiPrompt -
client.edit_an_image_with_ai_prompt.status_img2img(...)
-#### 🔌 Usage +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+**max_tokens:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**sampling_temperature:** `typing.Optional[float]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.edit_an_image_with_ai_prompt.status_img2img( - run_id="run_id", -) +
+
-``` +**response_format_type:** `typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType]` +
+ +
+
+ +**serp_search_location:** `typing.Optional[SerpSearchLocation]` +
-#### ⚙️ Parameters +
+
+ +**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
+ +
+
+ +**serp_search_type:** `typing.Optional[SerpSearchType]` + +
+
+**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -9659,8 +10025,7 @@ client.edit_an_image_with_ai_prompt.status_img2img(
-## CompareAiImageGenerators -
client.compare_ai_image_generators.status_compare_text2img(...) +
client.post_v3related_qna_maker_doc_async_form(...)
@@ -9678,8 +10043,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.compare_ai_image_generators.status_compare_text2img( - run_id="run_id", +client.post_v3related_qna_maker_doc_async_form( + search_query="search_query", ) ``` @@ -9696,7 +10061,7 @@ client.compare_ai_image_generators.status_compare_text2img(
-**run_id:** `str` +**search_query:** `str`
@@ -9704,56 +10069,2470 @@ client.compare_ai_image_generators.status_compare_text2img(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
+ +
+
+ +**keyword_query:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery]` +
+
+
+**documents:** `typing.Optional[typing.List[str]]` +
-
-## GenerateProductPhotoBackgrounds -
client.generate_product_photo_backgrounds.status_object_inpainting(...)
-#### 🔌 Usage +**max_references:** `typing.Optional[int]` + +
+
+**max_context_words:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**scroll_jump:** `typing.Optional[int]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.generate_product_photo_backgrounds.status_object_inpainting( - run_id="run_id", -) +
+
-``` +**doc_extract_url:** `typing.Optional[str]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel]` + +
+
+ +
+
+ +**citation_style:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle]` + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` +
+ +
+
+ +**response_format_type:** `typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType]` + +
+
+ +
+
+ +**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+ +
+
+ +**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
+ +
+
+ +**serp_search_type:** `typing.Optional[SerpSearchType]` + +
+
+ +
+
+ +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ + + + + + +
+ +
client.post_v3related_qna_maker_async_form(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3related_qna_maker_async_form( + search_query="search_query", + site_filter="site_filter", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**search_query:** `str` + +
+
+ +
+
+ +**site_filter:** `str` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel]` + +
+
+ +
+
+ +**max_search_urls:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_context_words:** `typing.Optional[int]` + +
+
+ +
+
+ +**scroll_jump:** `typing.Optional[int]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType]` + +
+
+ +
+
+ +**serp_search_location:** `typing.Optional[SerpSearchLocation]` + +
+
+ +
+
+ +**scaleserp_locations:** `typing.Optional[typing.List[str]]` — DEPRECATED: use `serp_search_location` instead + +
+
+ +
+
+ +**serp_search_type:** `typing.Optional[SerpSearchType]` + +
+
+ +
+
+ +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.post_v3text2audio_async_form(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3text2audio_async_form( + text_prompt="text_prompt", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text_prompt:** `str` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**negative_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**duration_sec:** `typing.Optional[float]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[int]` + +
+
+ +
+
+ +**guidance_scale:** `typing.Optional[float]` + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +
+
+ +
+
+ +**sd2upscaling:** `typing.Optional[bool]` + +
+
+ +
+
+ +**selected_models:** `typing.Optional[typing.List[typing.Literal["audio_ldm"]]]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.post_v3translate_async_form(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3translate_async_form() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**texts:** `typing.Optional[typing.List[str]]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel]` + +
+
+ +
+
+ +**translation_source:** `typing.Optional[str]` + +
+
+ +
+
+ +**translation_target:** `typing.Optional[str]` + +
+
+ +
+
+ +**glossary_document:** `typing.Optional[str]` + +Provide a glossary to customize translation and improve accuracy of domain-specific terms. +If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.post_v3video_bots_async_form(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3video_bots_async_form() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.List[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**input_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_audio:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_images:** `typing.Optional[typing.List[str]]` + +
+
+ +
+
+ +**input_documents:** `typing.Optional[typing.List[str]]` + +
+
+ +
+
+ +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.List[ConversationEntry]]` + +
+
+ +
+
+ +**bot_script:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel]` + +
+
+ +
+
+ +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**keyword_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.List[str]]` + +
+
+ +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_context_words:** `typing.Optional[int]` + +
+
+ +
+
+ +**scroll_jump:** `typing.Optional[int]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**citation_style:** `typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle]` + +
+
+ +
+
+ +**use_url_shortener:** `typing.Optional[bool]` + +
+
+ +
+
+ +**asr_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**translation_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel]` + +
+
+ +
+
+ +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + +
+
+ +
+
+ +**input_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for User Langauge -> LLM Language (English) + + +
+
+ +
+
+ +**output_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for LLM Language (English) -> User Langauge + + +
+
+ +
+
+ +**lipsync_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel]` + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType]` + +
+
+ +
+
+ +**tts_provider:** `typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider]` + +
+
+ +
+
+ +**uberduck_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**uberduck_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**google_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_pitch:** `typing.Optional[float]` + +
+
+ +
+
+ +**bark_history_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+ +
+
+ +**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_model:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_style:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+ +
+
+ +**azure_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**openai_voice_name:** `typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName]` + +
+
+ +
+
+ +**openai_tts_model:** `typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel]` + +
+
+ +
+
+ +**input_face:** `typing.Optional[str]` + +
+
+ +
+
+ +**face_padding_top:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_bottom:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_left:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_right:** `typing.Optional[int]` + +
+
+ +
+
+ +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## CopilotIntegrations +
client.copilot_integrations.video_bots_stream_create(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_integrations.video_bots_stream_create( + integration_id="integration_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab + +
+
+ +
+
+ +**conversation_id:** `typing.Optional[str]` + +The gooey conversation ID. + +If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. + +Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. + +
+
+ +
+
+ +**user_id:** `typing.Optional[str]` + +Your app's custom user ID. + +If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. + +
+
+ +
+
+ +**user_message_id:** `typing.Optional[str]` + +Your app's custom message ID for the user message. + +If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. + +
+
+ +
+
+ +**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user. + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**input_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_audio:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_images:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**input_documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` + +
+
+ +
+
+ +**bot_script:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]` + +
+
+ +
+
+ +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**keyword_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_context_words:** `typing.Optional[int]` + +
+
+ +
+
+ +**scroll_jump:** `typing.Optional[int]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]` + +
+
+ +
+
+ +**use_url_shortener:** `typing.Optional[bool]` + +
+
+ +
+
+ +**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]` + +
+
+ +
+
+ +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + +
+
+ +
+
+ +**input_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for User Langauge -> LLM Language (English) + + +
+
+ +
+
+ +**output_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for LLM Language (English) -> User Langauge + + +
+
+ +
+
+ +**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]` + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]` + +
+
+ +
+
+ +**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]` + +
+
+ +
+
+ +**uberduck_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**uberduck_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**google_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_pitch:** `typing.Optional[float]` + +
+
+ +
+
+ +**bark_history_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+ +
+
+ +**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_model:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_style:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+ +
+
+ +**azure_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]` + +
+
+ +
+
+ +**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]` + +
+
+ +
+
+ +**input_face:** `typing.Optional[str]` + +
+
+ +
+
+ +**face_padding_top:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_bottom:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_left:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_right:** `typing.Optional[int]` + +
+
+ +
+
+ +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` + +
+
+ +
+
+ +**input_text:** `typing.Optional[str]` — Use `input_prompt` instead + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.copilot_integrations.video_bots_stream(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_integrations.video_bots_stream( + request_id="request_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## CopilotForYourEnterprise +
client.copilot_for_your_enterprise.async_video_bots(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_for_your_enterprise.async_video_bots() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**input_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_audio:** `typing.Optional[str]` + +
+
+ +
+
+ +**input_images:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**input_documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` + +
+
+ +
+
+ +**bot_script:** `typing.Optional[str]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]` + +
+
+ +
+
+ +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + +
+
+ +
+
+ +**task_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**query_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**keyword_instructions:** `typing.Optional[str]` + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[str]]` + +
+
+ +
+
+ +**max_references:** `typing.Optional[int]` + +
+
+ +
+
+ +**max_context_words:** `typing.Optional[int]` + +
+
+ +
+
+ +**scroll_jump:** `typing.Optional[int]` + +
+
+ +
+
+ +**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]` + +
+
+ +
+
+ +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + +
+
+ +
+
+ +**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]` + +
+
+ +
+
+ +**use_url_shortener:** `typing.Optional[bool]` + +
+
+ +
+
+ +**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. + +
+
+ +
+
+ +**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]` + +
+
+ +
+
+ +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + +
+
+ +
+
+ +**input_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for User Langauge -> LLM Language (English) + + +
+
+ +
+
+ +**output_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for LLM Language (English) -> User Langauge + + +
+
+ +
+
+ +**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]` + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[VideoBotsPageRequestResponseFormatType]` + +
+
+ +
+
+ +**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]` + +
+
+ +
+
+ +**uberduck_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**uberduck_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**google_speaking_rate:** `typing.Optional[float]` + +
+
+ +
+
+ +**google_pitch:** `typing.Optional[float]` + +
+
+ +
+
+ +**bark_history_prompt:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+ +
+
+ +**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_model:** `typing.Optional[str]` + +
+
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_style:** `typing.Optional[float]` + +
+
+ +
+
+ +**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+ +
+
+ +**azure_voice_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]` + +
+
+ +
+
+ +**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]` + +
+
+ +
+
+ +**input_face:** `typing.Optional[str]` + +
+
+ +
+
+ +**face_padding_top:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_bottom:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_left:** `typing.Optional[int]` + +
+
+ +
+
+ +**face_padding_right:** `typing.Optional[int]` +
-#### ⚙️ Parameters -
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -9773,8 +12552,8 @@ client.generate_product_photo_backgrounds.status_object_inpainting(
-## AiImageWithAFace -
client.ai_image_with_a_face.status_face_inpainting(...) +## Evaluator +
client.evaluator.async_bulk_eval(...)
@@ -9792,8 +12571,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.ai_image_with_a_face.status_face_inpainting( - run_id="run_id", +client.evaluator.async_bulk_eval( + documents=["documents"], ) ``` @@ -9810,7 +12589,13 @@ client.ai_image_with_a_face.status_face_inpainting(
-**run_id:** `str` +**documents:** `typing.Sequence[str]` + + +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Remember to includes header names in your CSV too. +
@@ -9818,56 +12603,48 @@ client.ai_image_with_a_face.status_face_inpainting(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**example_id:** `typing.Optional[str]`
-
-
+
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +
-
-## AiGeneratedPhotoFromEmailProfileLookup -
client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting(...)
-#### 🔌 Usage - -
-
+**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
-```python -from gooey import Gooey +**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]` -client = Gooey( - api_key="YOUR_API_KEY", -) -client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting( - run_id="run_id", -) -``` -
-
+Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. +_The `columns` dictionary can be used to reference the spreadsheet columns._ + +
-#### ⚙️ Parameters -
-
-
+**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]` -**run_id:** `str` + +Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). +
@@ -9875,56 +12652,63 @@ client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]`
-
-
+
+
+**avoid_repetition:** `typing.Optional[bool]` +
-
-## RenderImageSearchResultsWithAi -
client.render_image_search_results_with_ai.status_google_image_gen(...)
-#### 🔌 Usage +**num_outputs:** `typing.Optional[int]` + +
+
+**quality:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.render_image_search_results_with_ai.status_google_image_gen( - run_id="run_id", -) - -``` +**max_tokens:** `typing.Optional[int]` +
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
+**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -9944,8 +12728,8 @@ client.render_image_search_results_with_ai.status_google_image_gen(
-## AiBackgroundChanger -
client.ai_background_changer.status_image_segmentation(...) +## SmartGpt +
client.smart_gpt.async_smart_gpt(...)
@@ -9963,8 +12747,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.ai_background_changer.status_image_segmentation( - run_id="run_id", +client.smart_gpt.async_smart_gpt( + input_prompt="input_prompt", ) ``` @@ -9981,7 +12765,7 @@ client.ai_background_changer.status_image_segmentation(
-**run_id:** `str` +**input_prompt:** `str`
@@ -9989,56 +12773,55 @@ client.ai_background_changer.status_image_segmentation(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**example_id:** `typing.Optional[str]`
-
-
+
+
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +
-
-## CompareAiImageUpscalers -
client.compare_ai_image_upscalers.status_compare_ai_upscalers(...)
-#### 🔌 Usage +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+**cot_prompt:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.compare_ai_image_upscalers.status_compare_ai_upscalers( - run_id="run_id", -) - -``` -
-
+**reflexion_prompt:** `typing.Optional[str]` + -#### ⚙️ Parameters -
+**dera_prompt:** `typing.Optional[str]` + +
+
+
-**run_id:** `str` +**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]`
@@ -10046,56 +12829,55 @@ client.compare_ai_image_upscalers.status_compare_ai_upscalers(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**avoid_repetition:** `typing.Optional[bool]`
- - +
+
+**num_outputs:** `typing.Optional[int]` +
-
-## ChyronPlantBot -
client.chyron_plant_bot.status_chyron_plant(...)
-#### 🔌 Usage +**quality:** `typing.Optional[float]` + +
+
+**max_tokens:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.chyron_plant_bot.status_chyron_plant( - run_id="run_id", -) - -``` -
-
+**sampling_temperature:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -10115,8 +12897,8 @@ client.chyron_plant_bot.status_chyron_plant(
-## LetterWriter -
client.letter_writer.status_letter_writer(...) +## Functions +
client.functions.async_functions(...)
@@ -10134,9 +12916,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.letter_writer.status_letter_writer( - run_id="run_id", -) +client.functions.async_functions() ```
@@ -10152,7 +12932,31 @@ client.letter_writer.status_letter_writer(
-**run_id:** `str` +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**code:** `typing.Optional[str]` — The JS code to be executed. + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used in the code + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]`
@@ -10172,8 +12976,8 @@ client.letter_writer.status_letter_writer(
-## Embeddings -
client.embeddings.status_embeddings(...) +## LipSyncing +
client.lip_syncing.async_lipsync(...)
@@ -10191,9 +12995,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.embeddings.status_embeddings( - run_id="run_id", -) +client.lip_syncing.async_lipsync() ```
@@ -10209,7 +13011,7 @@ client.embeddings.status_embeddings(
-**run_id:** `str` +**example_id:** `typing.Optional[str]`
@@ -10217,56 +13019,87 @@ client.embeddings.status_embeddings(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +
+
+
+**input_face:** `typing.Optional[str]` +
-
-## PeopleAlsoAskAnswersFromADoc -
client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc(...)
-#### 🔌 Usage +**face_padding_top:** `typing.Optional[int]` + +
+
+**face_padding_bottom:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey +**face_padding_left:** `typing.Optional[int]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc( - run_id="run_id", -) +
+
-``` +**face_padding_right:** `typing.Optional[int]` +
+ +
+
+ +**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +
-#### ⚙️ Parameters +
+
+ +**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]` + +
+
+**input_audio:** `typing.Optional[str]` + +
+
+
-**run_id:** `str` +**settings:** `typing.Optional[RunSettings]`
@@ -10409,7 +13242,7 @@ client.misc.video_bots_broadcast(
-**documents:** `typing.Optional[typing.List[str]]` — Video URL to send to all users +**documents:** `typing.Optional[typing.Sequence[str]]` — Video URL to send to all users
@@ -10417,7 +13250,7 @@ client.misc.video_bots_broadcast(
-**buttons:** `typing.Optional[typing.List[ReplyButton]]` — Buttons to send to all users +**buttons:** `typing.Optional[typing.Sequence[ReplyButton]]` — Buttons to send to all users
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index 3dd8833..7eb63d2 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -5,12 +5,14 @@ AggFunctionFunction, AggFunctionResult, AggFunctionResultFunction, - AnimateRequestSelectedModel, AnimationPrompt, AsrChunk, AsrOutputJson, AsrPageOutput, AsrPageOutputOutputTextItem, + AsrPageRequestOutputFormat, + AsrPageRequestSelectedModel, + AsrPageRequestTranslationModel, AsrPageStatusResponse, AsyncApiResponseModelV3, BalanceResponse, @@ -28,10 +30,15 @@ ChyronPlantPageRequest, ChyronPlantPageStatusResponse, CompareLlmPageOutput, + CompareLlmPageRequestResponseFormatType, + CompareLlmPageRequestSelectedModelsItem, CompareLlmPageStatusResponse, CompareText2ImgPageOutput, + CompareText2ImgPageRequestScheduler, + CompareText2ImgPageRequestSelectedModelsItem, CompareText2ImgPageStatusResponse, CompareUpscalerPageOutput, + CompareUpscalerPageRequestSelectedModelsItem, CompareUpscalerPageStatusResponse, ConsoleLogs, ConsoleLogsLevel, @@ -44,43 +51,58 @@ ConversationStart, CreateStreamResponse, DeforumSdPageOutput, + DeforumSdPageRequestSelectedModel, DeforumSdPageStatusResponse, DocExtractPageOutput, + DocExtractPageRequestResponseFormatType, + DocExtractPageRequestSelectedAsrModel, + DocExtractPageRequestSelectedModel, DocExtractPageStatusResponse, DocSearchPageOutput, + DocSearchPageRequestCitationStyle, + DocSearchPageRequestEmbeddingModel, + DocSearchPageRequestKeywordQuery, + DocSearchPageRequestResponseFormatType, + DocSearchPageRequestSelectedModel, DocSearchPageStatusResponse, DocSummaryPageOutput, + DocSummaryPageRequestResponseFormatType, + DocSummaryPageRequestSelectedAsrModel, + DocSummaryPageRequestSelectedModel, DocSummaryPageStatusResponse, - DocSummaryRequestResponseFormatType, - DocSummaryRequestSelectedAsrModel, - DocSummaryRequestSelectedModel, EmailFaceInpaintingPageOutput, + EmailFaceInpaintingPageRequestSelectedModel, EmailFaceInpaintingPageStatusResponse, - EmbedRequestSelectedModel, EmbeddingsPageOutput, + EmbeddingsPageRequestSelectedModel, EmbeddingsPageStatusResponse, EvalPrompt, FaceInpaintingPageOutput, + FaceInpaintingPageRequestSelectedModel, FaceInpaintingPageStatusResponse, - FailedReponseModelV2, - FailedResponseDetail, FinalResponse, FunctionsPageOutput, FunctionsPageStatusResponse, GenericErrorResponse, GenericErrorResponseDetail, GoogleGptPageOutput, + GoogleGptPageRequestEmbeddingModel, + GoogleGptPageRequestResponseFormatType, + GoogleGptPageRequestSelectedModel, GoogleGptPageStatusResponse, GoogleImageGenPageOutput, + GoogleImageGenPageRequestSelectedModel, GoogleImageGenPageStatusResponse, HttpValidationError, - ImageFromEmailRequestSelectedModel, - ImageFromWebSearchRequestSelectedModel, ImageSegmentationPageOutput, + ImageSegmentationPageRequestSelectedModel, ImageSegmentationPageStatusResponse, ImageUrl, ImageUrlDetail, Img2ImgPageOutput, + Img2ImgPageRequestSelectedControlnetModel, + Img2ImgPageRequestSelectedControlnetModelItem, + Img2ImgPageRequestSelectedModel, Img2ImgPageStatusResponse, LetterWriterPageOutput, LetterWriterPageRequest, @@ -88,47 +110,112 @@ LipsyncPageOutput, LipsyncPageStatusResponse, LipsyncTtsPageOutput, + LipsyncTtsPageRequestOpenaiTtsModel, + LipsyncTtsPageRequestOpenaiVoiceName, + LipsyncTtsPageRequestSelectedModel, + LipsyncTtsPageRequestTtsProvider, LipsyncTtsPageStatusResponse, - LipsyncTtsRequestOpenaiTtsModel, - LipsyncTtsRequestOpenaiVoiceName, - LipsyncTtsRequestSelectedModel, - LipsyncTtsRequestTtsProvider, - LlmRequestResponseFormatType, - LlmRequestSelectedModelsItem, LlmTools, MessagePart, ObjectInpaintingPageOutput, + ObjectInpaintingPageRequestSelectedModel, ObjectInpaintingPageStatusResponse, - PersonalizeEmailRequestResponseFormatType, - PersonalizeEmailRequestSelectedModel, - PortraitRequestSelectedModel, - ProductImageRequestSelectedModel, + PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem, + PostV3ArtQrCodeAsyncFormRequestScheduler, + PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem, + PostV3ArtQrCodeAsyncFormRequestSelectedModel, + PostV3AsrAsyncFormRequestOutputFormat, + PostV3AsrAsyncFormRequestSelectedModel, + PostV3AsrAsyncFormRequestTranslationModel, + PostV3BulkEvalAsyncFormRequestResponseFormatType, + PostV3BulkEvalAsyncFormRequestSelectedModel, + PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem, + PostV3CompareLlmAsyncFormRequestResponseFormatType, + PostV3CompareLlmAsyncFormRequestSelectedModelsItem, + PostV3CompareText2ImgAsyncFormRequestScheduler, + PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem, + PostV3DeforumSdAsyncFormRequestSelectedModel, + PostV3DocExtractAsyncFormRequestResponseFormatType, + PostV3DocExtractAsyncFormRequestSelectedAsrModel, + PostV3DocExtractAsyncFormRequestSelectedModel, + PostV3DocSearchAsyncFormRequestCitationStyle, + PostV3DocSearchAsyncFormRequestEmbeddingModel, + PostV3DocSearchAsyncFormRequestKeywordQuery, + PostV3DocSearchAsyncFormRequestResponseFormatType, + PostV3DocSearchAsyncFormRequestSelectedModel, + PostV3DocSummaryAsyncFormRequestResponseFormatType, + PostV3DocSummaryAsyncFormRequestSelectedAsrModel, + PostV3DocSummaryAsyncFormRequestSelectedModel, + PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel, + PostV3EmbeddingsAsyncFormRequestSelectedModel, + PostV3FaceInpaintingAsyncFormRequestSelectedModel, + PostV3GoogleGptAsyncFormRequestEmbeddingModel, + PostV3GoogleGptAsyncFormRequestResponseFormatType, + PostV3GoogleGptAsyncFormRequestSelectedModel, + PostV3GoogleImageGenAsyncFormRequestSelectedModel, + PostV3ImageSegmentationAsyncFormRequestSelectedModel, + PostV3Img2ImgAsyncFormRequestSelectedControlnetModel, + PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem, + PostV3Img2ImgAsyncFormRequestSelectedModel, + PostV3LipsyncAsyncFormRequestSelectedModel, + PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel, + PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName, + PostV3LipsyncTtsAsyncFormRequestSelectedModel, + PostV3LipsyncTtsAsyncFormRequestTtsProvider, + PostV3ObjectInpaintingAsyncFormRequestSelectedModel, + PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel, + PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType, + PostV3RelatedQnaMakerAsyncFormRequestSelectedModel, + PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle, + PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel, + PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery, + PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType, + PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel, + PostV3SeoSummaryAsyncFormRequestResponseFormatType, + PostV3SeoSummaryAsyncFormRequestSelectedModel, + PostV3SmartGptAsyncFormRequestResponseFormatType, + PostV3SmartGptAsyncFormRequestSelectedModel, + PostV3SocialLookupEmailAsyncFormRequestResponseFormatType, + PostV3SocialLookupEmailAsyncFormRequestSelectedModel, + PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel, + PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName, + PostV3TextToSpeechAsyncFormRequestTtsProvider, + PostV3TranslateAsyncFormRequestSelectedModel, + PostV3VideoBotsAsyncFormRequestAsrModel, + PostV3VideoBotsAsyncFormRequestCitationStyle, + PostV3VideoBotsAsyncFormRequestEmbeddingModel, + PostV3VideoBotsAsyncFormRequestLipsyncModel, + PostV3VideoBotsAsyncFormRequestOpenaiTtsModel, + PostV3VideoBotsAsyncFormRequestOpenaiVoiceName, + PostV3VideoBotsAsyncFormRequestResponseFormatType, + PostV3VideoBotsAsyncFormRequestSelectedModel, + PostV3VideoBotsAsyncFormRequestTranslationModel, + PostV3VideoBotsAsyncFormRequestTtsProvider, PromptTreeNode, PromptTreeNodePrompt, QrCodeGeneratorPageOutput, + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, + QrCodeGeneratorPageRequestScheduler, + QrCodeGeneratorPageRequestSelectedControlnetModelItem, + QrCodeGeneratorPageRequestSelectedModel, QrCodeGeneratorPageStatusResponse, - QrCodeRequestImagePromptControlnetModelsItem, - QrCodeRequestScheduler, - QrCodeRequestSelectedControlnetModelItem, - QrCodeRequestSelectedModel, - RagRequestCitationStyle, - RagRequestEmbeddingModel, - RagRequestKeywordQuery, - RagRequestResponseFormatType, - RagRequestSelectedModel, RecipeFunction, RecipeFunctionTrigger, RecipeRunState, RelatedDocSearchResponse, RelatedGoogleGptResponse, RelatedQnADocPageOutput, + RelatedQnADocPageRequestCitationStyle, + RelatedQnADocPageRequestEmbeddingModel, + RelatedQnADocPageRequestKeywordQuery, + RelatedQnADocPageRequestResponseFormatType, + RelatedQnADocPageRequestSelectedModel, RelatedQnADocPageStatusResponse, RelatedQnAPageOutput, + RelatedQnAPageRequestEmbeddingModel, + RelatedQnAPageRequestResponseFormatType, + RelatedQnAPageRequestSelectedModel, RelatedQnAPageStatusResponse, - RemixImageRequestSelectedControlnetModel, - RemixImageRequestSelectedControlnetModelItem, - RemixImageRequestSelectedModel, - RemoveBackgroundRequestSelectedModel, ReplyButton, ResponseModel, ResponseModelFinalKeywordQuery, @@ -139,45 +226,30 @@ SadTalkerSettings, SadTalkerSettingsPreprocess, SearchReference, - SeoContentRequestResponseFormatType, - SeoContentRequestSelectedModel, - SeoPeopleAlsoAskDocRequestCitationStyle, - SeoPeopleAlsoAskDocRequestEmbeddingModel, - SeoPeopleAlsoAskDocRequestKeywordQuery, - SeoPeopleAlsoAskDocRequestResponseFormatType, - SeoPeopleAlsoAskDocRequestSelectedModel, - SeoPeopleAlsoAskRequestEmbeddingModel, - SeoPeopleAlsoAskRequestResponseFormatType, - SeoPeopleAlsoAskRequestSelectedModel, SeoSummaryPageOutput, + SeoSummaryPageRequestResponseFormatType, + SeoSummaryPageRequestSelectedModel, SeoSummaryPageStatusResponse, SerpSearchLocation, SerpSearchType, SmartGptPageOutput, SmartGptPageStatusResponse, SocialLookupEmailPageOutput, + SocialLookupEmailPageRequestResponseFormatType, + SocialLookupEmailPageRequestSelectedModel, SocialLookupEmailPageStatusResponse, - SpeechRecognitionRequestOutputFormat, - SpeechRecognitionRequestSelectedModel, - SpeechRecognitionRequestTranslationModel, StreamError, - SynthesizeDataRequestResponseFormatType, - SynthesizeDataRequestSelectedAsrModel, - SynthesizeDataRequestSelectedModel, Text2AudioPageOutput, Text2AudioPageStatusResponse, - TextToImageRequestScheduler, - TextToImageRequestSelectedModelsItem, TextToSpeechPageOutput, + TextToSpeechPageRequestOpenaiTtsModel, + TextToSpeechPageRequestOpenaiVoiceName, + TextToSpeechPageRequestTtsProvider, TextToSpeechPageStatusResponse, - TextToSpeechRequestOpenaiTtsModel, - TextToSpeechRequestOpenaiVoiceName, - TextToSpeechRequestTtsProvider, TrainingDataModel, - TranslateRequestSelectedModel, TranslationPageOutput, + TranslationPageRequestSelectedModel, TranslationPageStatusResponse, - UpscaleRequestSelectedModelsItem, ValidationError, ValidationErrorLocItem, Vcard, @@ -185,84 +257,39 @@ VideoBotsPageOutputFinalKeywordQuery, VideoBotsPageOutputFinalPrompt, VideoBotsPageStatusResponse, - WebSearchLlmRequestEmbeddingModel, - WebSearchLlmRequestResponseFormatType, - WebSearchLlmRequestSelectedModel, -) -from .errors import ( - BadRequestError, - InternalServerError, - PaymentRequiredError, - TooManyRequestsError, - UnprocessableEntityError, -) -from . import ( - ai_animation_generator, - ai_art_qr_code, - ai_background_changer, - ai_generated_photo_from_email_profile_lookup, - ai_image_with_a_face, - bulk_runner, - chyron_plant_bot, - compare_ai_image_generators, - compare_ai_image_upscalers, - compare_ai_translations, - compare_ai_voice_generators, - copilot_for_your_enterprise, - copilot_integrations, - create_a_perfect_seo_optimized_title_paragraph, - edit_an_image_with_ai_prompt, - embeddings, - evaluator, - functions, - generate_people_also_ask_seo_content, - generate_product_photo_backgrounds, - large_language_models_gpt3, - letter_writer, - lip_syncing, - lipsync_video_with_any_text, - misc, - people_also_ask_answers_from_a_doc, - profile_lookup_gpt3for_ai_personalized_emails, - render_image_search_results_with_ai, - search_your_docs_with_gpt, - smart_gpt, - speech_recognition_translation, - summarize_your_docs_with_gpt, - synthetic_data_maker_for_videos_pd_fs, - text_guided_audio_generator, - web_search_gpt3, ) +from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError +from . import copilot_for_your_enterprise, copilot_integrations, evaluator, functions, lip_syncing, misc, smart_gpt from .client import AsyncGooey, Gooey from .copilot_for_your_enterprise import ( - AsyncFormVideoBotsRequestAsrModel, - AsyncFormVideoBotsRequestCitationStyle, - AsyncFormVideoBotsRequestEmbeddingModel, - AsyncFormVideoBotsRequestLipsyncModel, - AsyncFormVideoBotsRequestOpenaiTtsModel, - AsyncFormVideoBotsRequestOpenaiVoiceName, - AsyncFormVideoBotsRequestResponseFormatType, - AsyncFormVideoBotsRequestSelectedModel, - AsyncFormVideoBotsRequestTranslationModel, - AsyncFormVideoBotsRequestTtsProvider, + VideoBotsPageRequestAsrModel, + VideoBotsPageRequestCitationStyle, + VideoBotsPageRequestEmbeddingModel, + VideoBotsPageRequestLipsyncModel, + VideoBotsPageRequestOpenaiTtsModel, + VideoBotsPageRequestOpenaiVoiceName, + VideoBotsPageRequestResponseFormatType, + VideoBotsPageRequestSelectedModel, + VideoBotsPageRequestTranslationModel, + VideoBotsPageRequestTtsProvider, ) from .copilot_integrations import ( - VideoBotsStreamCreateRequestAsrModel, - VideoBotsStreamCreateRequestCitationStyle, - VideoBotsStreamCreateRequestEmbeddingModel, - VideoBotsStreamCreateRequestLipsyncModel, - VideoBotsStreamCreateRequestOpenaiTtsModel, - VideoBotsStreamCreateRequestOpenaiVoiceName, - VideoBotsStreamCreateRequestResponseFormatType, - VideoBotsStreamCreateRequestSelectedModel, - VideoBotsStreamCreateRequestTranslationModel, - VideoBotsStreamCreateRequestTtsProvider, + CreateStreamRequestAsrModel, + CreateStreamRequestCitationStyle, + CreateStreamRequestEmbeddingModel, + CreateStreamRequestLipsyncModel, + CreateStreamRequestOpenaiTtsModel, + CreateStreamRequestOpenaiVoiceName, + CreateStreamRequestResponseFormatType, + CreateStreamRequestSelectedModel, + CreateStreamRequestTranslationModel, + CreateStreamRequestTtsProvider, VideoBotsStreamResponse, ) from .environment import GooeyEnvironment -from .evaluator import AsyncFormBulkEvalRequestResponseFormatType, AsyncFormBulkEvalRequestSelectedModel -from .lip_syncing import AsyncFormLipsyncRequestSelectedModel -from .smart_gpt import AsyncFormSmartGptRequestResponseFormatType, AsyncFormSmartGptRequestSelectedModel +from .evaluator import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel +from .lip_syncing import LipsyncPageRequestSelectedModel +from .smart_gpt import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel from .version import __version__ __all__ = [ @@ -270,34 +297,22 @@ "AggFunctionFunction", "AggFunctionResult", "AggFunctionResultFunction", - "AnimateRequestSelectedModel", "AnimationPrompt", "AsrChunk", "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", + "AsrPageRequestOutputFormat", + "AsrPageRequestSelectedModel", + "AsrPageRequestTranslationModel", "AsrPageStatusResponse", "AsyncApiResponseModelV3", - "AsyncFormBulkEvalRequestResponseFormatType", - "AsyncFormBulkEvalRequestSelectedModel", - "AsyncFormLipsyncRequestSelectedModel", - "AsyncFormSmartGptRequestResponseFormatType", - "AsyncFormSmartGptRequestSelectedModel", - "AsyncFormVideoBotsRequestAsrModel", - "AsyncFormVideoBotsRequestCitationStyle", - "AsyncFormVideoBotsRequestEmbeddingModel", - "AsyncFormVideoBotsRequestLipsyncModel", - "AsyncFormVideoBotsRequestOpenaiTtsModel", - "AsyncFormVideoBotsRequestOpenaiVoiceName", - "AsyncFormVideoBotsRequestResponseFormatType", - "AsyncFormVideoBotsRequestSelectedModel", - "AsyncFormVideoBotsRequestTranslationModel", - "AsyncFormVideoBotsRequestTtsProvider", "AsyncGooey", - "BadRequestError", "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", + "BulkEvalPageRequestResponseFormatType", + "BulkEvalPageRequestSelectedModel", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", "BulkRunnerPageStatusResponse", @@ -310,10 +325,15 @@ "ChyronPlantPageRequest", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", + "CompareLlmPageRequestResponseFormatType", + "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", + "CompareText2ImgPageRequestScheduler", + "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", + "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", "ConsoleLogsLevel", @@ -324,28 +344,47 @@ "ConversationEntryContentItem_Text", "ConversationEntryRole", "ConversationStart", + "CreateStreamRequestAsrModel", + "CreateStreamRequestCitationStyle", + "CreateStreamRequestEmbeddingModel", + "CreateStreamRequestLipsyncModel", + "CreateStreamRequestOpenaiTtsModel", + "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", + "CreateStreamRequestSelectedModel", + "CreateStreamRequestTranslationModel", + "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", + "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", + "DocExtractPageRequestResponseFormatType", + "DocExtractPageRequestSelectedAsrModel", + "DocExtractPageRequestSelectedModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", + "DocSearchPageRequestCitationStyle", + "DocSearchPageRequestEmbeddingModel", + "DocSearchPageRequestKeywordQuery", + "DocSearchPageRequestResponseFormatType", + "DocSearchPageRequestSelectedModel", "DocSearchPageStatusResponse", "DocSummaryPageOutput", + "DocSummaryPageRequestResponseFormatType", + "DocSummaryPageRequestSelectedAsrModel", + "DocSummaryPageRequestSelectedModel", "DocSummaryPageStatusResponse", - "DocSummaryRequestResponseFormatType", - "DocSummaryRequestSelectedAsrModel", - "DocSummaryRequestSelectedModel", "EmailFaceInpaintingPageOutput", + "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", - "EmbedRequestSelectedModel", "EmbeddingsPageOutput", + "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", + "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", - "FailedReponseModelV2", - "FailedResponseDetail", "FinalResponse", "FunctionsPageOutput", "FunctionsPageStatusResponse", @@ -354,67 +393,138 @@ "Gooey", "GooeyEnvironment", "GoogleGptPageOutput", + "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestResponseFormatType", + "GoogleGptPageRequestSelectedModel", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", + "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", - "ImageFromEmailRequestSelectedModel", - "ImageFromWebSearchRequestSelectedModel", "ImageSegmentationPageOutput", + "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", + "Img2ImgPageRequestSelectedControlnetModel", + "Img2ImgPageRequestSelectedControlnetModelItem", + "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", - "InternalServerError", "LetterWriterPageOutput", "LetterWriterPageRequest", "LetterWriterPageStatusResponse", "LipsyncPageOutput", + "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", "LipsyncTtsPageOutput", + "LipsyncTtsPageRequestOpenaiTtsModel", + "LipsyncTtsPageRequestOpenaiVoiceName", + "LipsyncTtsPageRequestSelectedModel", + "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", - "LipsyncTtsRequestOpenaiTtsModel", - "LipsyncTtsRequestOpenaiVoiceName", - "LipsyncTtsRequestSelectedModel", - "LipsyncTtsRequestTtsProvider", - "LlmRequestResponseFormatType", - "LlmRequestSelectedModelsItem", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", + "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", "PaymentRequiredError", - "PersonalizeEmailRequestResponseFormatType", - "PersonalizeEmailRequestSelectedModel", - "PortraitRequestSelectedModel", - "ProductImageRequestSelectedModel", + "PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem", + "PostV3ArtQrCodeAsyncFormRequestScheduler", + "PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem", + "PostV3ArtQrCodeAsyncFormRequestSelectedModel", + "PostV3AsrAsyncFormRequestOutputFormat", + "PostV3AsrAsyncFormRequestSelectedModel", + "PostV3AsrAsyncFormRequestTranslationModel", + "PostV3BulkEvalAsyncFormRequestResponseFormatType", + "PostV3BulkEvalAsyncFormRequestSelectedModel", + "PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem", + "PostV3CompareLlmAsyncFormRequestResponseFormatType", + "PostV3CompareLlmAsyncFormRequestSelectedModelsItem", + "PostV3CompareText2ImgAsyncFormRequestScheduler", + "PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem", + "PostV3DeforumSdAsyncFormRequestSelectedModel", + "PostV3DocExtractAsyncFormRequestResponseFormatType", + "PostV3DocExtractAsyncFormRequestSelectedAsrModel", + "PostV3DocExtractAsyncFormRequestSelectedModel", + "PostV3DocSearchAsyncFormRequestCitationStyle", + "PostV3DocSearchAsyncFormRequestEmbeddingModel", + "PostV3DocSearchAsyncFormRequestKeywordQuery", + "PostV3DocSearchAsyncFormRequestResponseFormatType", + "PostV3DocSearchAsyncFormRequestSelectedModel", + "PostV3DocSummaryAsyncFormRequestResponseFormatType", + "PostV3DocSummaryAsyncFormRequestSelectedAsrModel", + "PostV3DocSummaryAsyncFormRequestSelectedModel", + "PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel", + "PostV3EmbeddingsAsyncFormRequestSelectedModel", + "PostV3FaceInpaintingAsyncFormRequestSelectedModel", + "PostV3GoogleGptAsyncFormRequestEmbeddingModel", + "PostV3GoogleGptAsyncFormRequestResponseFormatType", + "PostV3GoogleGptAsyncFormRequestSelectedModel", + "PostV3GoogleImageGenAsyncFormRequestSelectedModel", + "PostV3ImageSegmentationAsyncFormRequestSelectedModel", + "PostV3Img2ImgAsyncFormRequestSelectedControlnetModel", + "PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem", + "PostV3Img2ImgAsyncFormRequestSelectedModel", + "PostV3LipsyncAsyncFormRequestSelectedModel", + "PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel", + "PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName", + "PostV3LipsyncTtsAsyncFormRequestSelectedModel", + "PostV3LipsyncTtsAsyncFormRequestTtsProvider", + "PostV3ObjectInpaintingAsyncFormRequestSelectedModel", + "PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel", + "PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType", + "PostV3RelatedQnaMakerAsyncFormRequestSelectedModel", + "PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle", + "PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel", + "PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery", + "PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType", + "PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel", + "PostV3SeoSummaryAsyncFormRequestResponseFormatType", + "PostV3SeoSummaryAsyncFormRequestSelectedModel", + "PostV3SmartGptAsyncFormRequestResponseFormatType", + "PostV3SmartGptAsyncFormRequestSelectedModel", + "PostV3SocialLookupEmailAsyncFormRequestResponseFormatType", + "PostV3SocialLookupEmailAsyncFormRequestSelectedModel", + "PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel", + "PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName", + "PostV3TextToSpeechAsyncFormRequestTtsProvider", + "PostV3TranslateAsyncFormRequestSelectedModel", + "PostV3VideoBotsAsyncFormRequestAsrModel", + "PostV3VideoBotsAsyncFormRequestCitationStyle", + "PostV3VideoBotsAsyncFormRequestEmbeddingModel", + "PostV3VideoBotsAsyncFormRequestLipsyncModel", + "PostV3VideoBotsAsyncFormRequestOpenaiTtsModel", + "PostV3VideoBotsAsyncFormRequestOpenaiVoiceName", + "PostV3VideoBotsAsyncFormRequestResponseFormatType", + "PostV3VideoBotsAsyncFormRequestSelectedModel", + "PostV3VideoBotsAsyncFormRequestTranslationModel", + "PostV3VideoBotsAsyncFormRequestTtsProvider", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", + "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", + "QrCodeGeneratorPageRequestScheduler", + "QrCodeGeneratorPageRequestSelectedControlnetModelItem", + "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", - "QrCodeRequestImagePromptControlnetModelsItem", - "QrCodeRequestScheduler", - "QrCodeRequestSelectedControlnetModelItem", - "QrCodeRequestSelectedModel", - "RagRequestCitationStyle", - "RagRequestEmbeddingModel", - "RagRequestKeywordQuery", - "RagRequestResponseFormatType", - "RagRequestSelectedModel", "RecipeFunction", "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", + "RelatedQnADocPageRequestCitationStyle", + "RelatedQnADocPageRequestEmbeddingModel", + "RelatedQnADocPageRequestKeywordQuery", + "RelatedQnADocPageRequestResponseFormatType", + "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", + "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestResponseFormatType", + "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageStatusResponse", - "RemixImageRequestSelectedControlnetModel", - "RemixImageRequestSelectedControlnetModelItem", - "RemixImageRequestSelectedModel", - "RemoveBackgroundRequestSelectedModel", "ReplyButton", "ResponseModel", "ResponseModelFinalKeywordQuery", @@ -425,102 +535,58 @@ "SadTalkerSettings", "SadTalkerSettingsPreprocess", "SearchReference", - "SeoContentRequestResponseFormatType", - "SeoContentRequestSelectedModel", - "SeoPeopleAlsoAskDocRequestCitationStyle", - "SeoPeopleAlsoAskDocRequestEmbeddingModel", - "SeoPeopleAlsoAskDocRequestKeywordQuery", - "SeoPeopleAlsoAskDocRequestResponseFormatType", - "SeoPeopleAlsoAskDocRequestSelectedModel", - "SeoPeopleAlsoAskRequestEmbeddingModel", - "SeoPeopleAlsoAskRequestResponseFormatType", - "SeoPeopleAlsoAskRequestSelectedModel", "SeoSummaryPageOutput", + "SeoSummaryPageRequestResponseFormatType", + "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", + "SmartGptPageRequestResponseFormatType", + "SmartGptPageRequestSelectedModel", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", + "SocialLookupEmailPageRequestResponseFormatType", + "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageStatusResponse", - "SpeechRecognitionRequestOutputFormat", - "SpeechRecognitionRequestSelectedModel", - "SpeechRecognitionRequestTranslationModel", "StreamError", - "SynthesizeDataRequestResponseFormatType", - "SynthesizeDataRequestSelectedAsrModel", - "SynthesizeDataRequestSelectedModel", "Text2AudioPageOutput", "Text2AudioPageStatusResponse", - "TextToImageRequestScheduler", - "TextToImageRequestSelectedModelsItem", "TextToSpeechPageOutput", + "TextToSpeechPageRequestOpenaiTtsModel", + "TextToSpeechPageRequestOpenaiVoiceName", + "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", - "TextToSpeechRequestOpenaiTtsModel", - "TextToSpeechRequestOpenaiVoiceName", - "TextToSpeechRequestTtsProvider", "TooManyRequestsError", "TrainingDataModel", - "TranslateRequestSelectedModel", "TranslationPageOutput", + "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", "UnprocessableEntityError", - "UpscaleRequestSelectedModelsItem", "ValidationError", "ValidationErrorLocItem", "Vcard", "VideoBotsPageOutput", "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", + "VideoBotsPageRequestLipsyncModel", + "VideoBotsPageRequestOpenaiTtsModel", + "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", + "VideoBotsPageRequestSelectedModel", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", - "VideoBotsStreamCreateRequestAsrModel", - "VideoBotsStreamCreateRequestCitationStyle", - "VideoBotsStreamCreateRequestEmbeddingModel", - "VideoBotsStreamCreateRequestLipsyncModel", - "VideoBotsStreamCreateRequestOpenaiTtsModel", - "VideoBotsStreamCreateRequestOpenaiVoiceName", - "VideoBotsStreamCreateRequestResponseFormatType", - "VideoBotsStreamCreateRequestSelectedModel", - "VideoBotsStreamCreateRequestTranslationModel", - "VideoBotsStreamCreateRequestTtsProvider", "VideoBotsStreamResponse", - "WebSearchLlmRequestEmbeddingModel", - "WebSearchLlmRequestResponseFormatType", - "WebSearchLlmRequestSelectedModel", "__version__", - "ai_animation_generator", - "ai_art_qr_code", - "ai_background_changer", - "ai_generated_photo_from_email_profile_lookup", - "ai_image_with_a_face", - "bulk_runner", - "chyron_plant_bot", - "compare_ai_image_generators", - "compare_ai_image_upscalers", - "compare_ai_translations", - "compare_ai_voice_generators", "copilot_for_your_enterprise", "copilot_integrations", - "create_a_perfect_seo_optimized_title_paragraph", - "edit_an_image_with_ai_prompt", - "embeddings", "evaluator", "functions", - "generate_people_also_ask_seo_content", - "generate_product_photo_backgrounds", - "large_language_models_gpt3", - "letter_writer", "lip_syncing", - "lipsync_video_with_any_text", "misc", - "people_also_ask_answers_from_a_doc", - "profile_lookup_gpt3for_ai_personalized_emails", - "render_image_search_results_with_ai", - "search_your_docs_with_gpt", "smart_gpt", - "speech_recognition_translation", - "summarize_your_docs_with_gpt", - "synthetic_data_maker_for_videos_pd_fs", - "text_guided_audio_generator", - "web_search_gpt3", ] diff --git a/src/gooey/ai_animation_generator/__init__.py b/src/gooey/ai_animation_generator/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_animation_generator/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_animation_generator/client.py b/src/gooey/ai_animation_generator/client.py deleted file mode 100644 index b510152..0000000 --- a/src/gooey/ai_animation_generator/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.deforum_sd_page_status_response import DeforumSdPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class AiAnimationGeneratorClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_deforum_sd( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.ai_animation_generator.status_deforum_sd( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/DeforumSD/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiAnimationGeneratorClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_deforum_sd( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DeforumSdPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_animation_generator.status_deforum_sd( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_art_qr_code/__init__.py b/src/gooey/ai_art_qr_code/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_art_qr_code/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_art_qr_code/client.py b/src/gooey/ai_art_qr_code/client.py deleted file mode 100644 index ca94e4e..0000000 --- a/src/gooey/ai_art_qr_code/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse - - -class AiArtQrCodeClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_art_qr_code( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.ai_art_qr_code.status_art_qr_code( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/art-qr-code/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiArtQrCodeClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_art_qr_code( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QrCodeGeneratorPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_art_qr_code.status_art_qr_code( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_background_changer/__init__.py b/src/gooey/ai_background_changer/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_background_changer/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_background_changer/client.py b/src/gooey/ai_background_changer/client.py deleted file mode 100644 index 0c430f5..0000000 --- a/src/gooey/ai_background_changer/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse - - -class AiBackgroundChangerClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_image_segmentation( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ImageSegmentationPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.ai_background_changer.status_image_segmentation( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiBackgroundChangerClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_image_segmentation( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ImageSegmentationPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_background_changer.status_image_segmentation( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_generated_photo_from_email_profile_lookup/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py b/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py deleted file mode 100644 index 1b29a5a..0000000 --- a/src/gooey/ai_generated_photo_from_email_profile_lookup/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class AiGeneratedPhotoFromEmailProfileLookupClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_email_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiGeneratedPhotoFromEmailProfileLookupClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_email_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmailFaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_generated_photo_from_email_profile_lookup.status_email_face_inpainting( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/ai_image_with_a_face/__init__.py b/src/gooey/ai_image_with_a_face/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/ai_image_with_a_face/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/ai_image_with_a_face/client.py b/src/gooey/ai_image_with_a_face/client.py deleted file mode 100644 index 9866b9a..0000000 --- a/src/gooey/ai_image_with_a_face/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class AiImageWithAFaceClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.ai_image_with_a_face.status_face_inpainting( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncAiImageWithAFaceClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_face_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FaceInpaintingPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.ai_image_with_a_face.status_face_inpainting( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/bulk_runner/__init__.py b/src/gooey/bulk_runner/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/bulk_runner/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/bulk_runner/client.py b/src/gooey/bulk_runner/client.py deleted file mode 100644 index a1d42ae..0000000 --- a/src/gooey/bulk_runner/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class BulkRunnerClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_bulk_runner( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkRunnerPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.bulk_runner.status_bulk_runner( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-runner/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncBulkRunnerClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_bulk_runner( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkRunnerPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.bulk_runner.status_bulk_runner( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/chyron_plant_bot/__init__.py b/src/gooey/chyron_plant_bot/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/chyron_plant_bot/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/chyron_plant_bot/client.py b/src/gooey/chyron_plant_bot/client.py deleted file mode 100644 index 4ba9907..0000000 --- a/src/gooey/chyron_plant_bot/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class ChyronPlantBotClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_chyron_plant( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.chyron_plant_bot.status_chyron_plant( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ChyronPlantPageStatusResponse, parse_obj_as(type_=ChyronPlantPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncChyronPlantBotClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_chyron_plant( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ChyronPlantPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ChyronPlantPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.chyron_plant_bot.status_chyron_plant( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ChyronPlantPageStatusResponse, parse_obj_as(type_=ChyronPlantPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/client.py b/src/gooey/client.py index d0bf440..3758806 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -1,174 +1,306 @@ # This file was auto-generated by Fern from our API Definition. -import os import typing -from json.decoder import JSONDecodeError - +from .environment import GooeyEnvironment +import os import httpx - -from .ai_animation_generator.client import AiAnimationGeneratorClient, AsyncAiAnimationGeneratorClient -from .ai_art_qr_code.client import AiArtQrCodeClient, AsyncAiArtQrCodeClient -from .ai_background_changer.client import AiBackgroundChangerClient, AsyncAiBackgroundChangerClient -from .ai_generated_photo_from_email_profile_lookup.client import ( - AiGeneratedPhotoFromEmailProfileLookupClient, - AsyncAiGeneratedPhotoFromEmailProfileLookupClient, -) -from .ai_image_with_a_face.client import AiImageWithAFaceClient, AsyncAiImageWithAFaceClient -from .bulk_runner.client import AsyncBulkRunnerClient, BulkRunnerClient -from .chyron_plant_bot.client import AsyncChyronPlantBotClient, ChyronPlantBotClient -from .compare_ai_image_generators.client import AsyncCompareAiImageGeneratorsClient, CompareAiImageGeneratorsClient -from .compare_ai_image_upscalers.client import AsyncCompareAiImageUpscalersClient, CompareAiImageUpscalersClient -from .compare_ai_translations.client import AsyncCompareAiTranslationsClient, CompareAiTranslationsClient -from .compare_ai_voice_generators.client import AsyncCompareAiVoiceGeneratorsClient, CompareAiVoiceGeneratorsClient -from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient, CopilotForYourEnterpriseClient -from .copilot_integrations.client import AsyncCopilotIntegrationsClient, CopilotIntegrationsClient from .core.api_error import ApiError -from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from .core.pydantic_utilities import parse_obj_as +from .core.client_wrapper import SyncClientWrapper +from .copilot_integrations.client import CopilotIntegrationsClient +from .copilot_for_your_enterprise.client import CopilotForYourEnterpriseClient +from .evaluator.client import EvaluatorClient +from .smart_gpt.client import SmartGptClient +from .functions.client import FunctionsClient +from .lip_syncing.client import LipSyncingClient +from .misc.client import MiscClient +from .types.animation_prompt import AnimationPrompt +from .types.recipe_function import RecipeFunction +from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel +from .types.run_settings import RunSettings from .core.request_options import RequestOptions -from .create_a_perfect_seo_optimized_title_paragraph.client import ( - AsyncCreateAPerfectSeoOptimizedTitleParagraphClient, - CreateAPerfectSeoOptimizedTitleParagraphClient, -) -from .edit_an_image_with_ai_prompt.client import AsyncEditAnImageWithAiPromptClient, EditAnImageWithAiPromptClient -from .embeddings.client import AsyncEmbeddingsClient, EmbeddingsClient -from .environment import GooeyEnvironment -from .errors.bad_request_error import BadRequestError -from .errors.internal_server_error import InternalServerError +from .types.deforum_sd_page_output import DeforumSdPageOutput +from .types.deforum_sd_page_status_response import DeforumSdPageStatusResponse +from .core.pydantic_utilities import parse_obj_as from .errors.payment_required_error import PaymentRequiredError -from .errors.too_many_requests_error import TooManyRequestsError from .errors.unprocessable_entity_error import UnprocessableEntityError -from .evaluator.client import AsyncEvaluatorClient, EvaluatorClient -from .functions.client import AsyncFunctionsClient, FunctionsClient -from .generate_people_also_ask_seo_content.client import ( - AsyncGeneratePeopleAlsoAskSeoContentClient, - GeneratePeopleAlsoAskSeoContentClient, -) -from .generate_product_photo_backgrounds.client import ( - AsyncGenerateProductPhotoBackgroundsClient, - GenerateProductPhotoBackgroundsClient, -) -from .large_language_models_gpt3.client import AsyncLargeLanguageModelsGpt3Client, LargeLanguageModelsGpt3Client -from .letter_writer.client import AsyncLetterWriterClient, LetterWriterClient -from .lip_syncing.client import AsyncLipSyncingClient, LipSyncingClient -from .lipsync_video_with_any_text.client import AsyncLipsyncVideoWithAnyTextClient, LipsyncVideoWithAnyTextClient -from .misc.client import AsyncMiscClient, MiscClient -from .people_also_ask_answers_from_a_doc.client import ( - AsyncPeopleAlsoAskAnswersFromADocClient, - PeopleAlsoAskAnswersFromADocClient, -) -from .profile_lookup_gpt3for_ai_personalized_emails.client import ( - AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient, - ProfileLookupGpt3ForAiPersonalizedEmailsClient, -) -from .render_image_search_results_with_ai.client import ( - AsyncRenderImageSearchResultsWithAiClient, - RenderImageSearchResultsWithAiClient, -) -from .search_your_docs_with_gpt.client import AsyncSearchYourDocsWithGptClient, SearchYourDocsWithGptClient -from .smart_gpt.client import AsyncSmartGptClient, SmartGptClient -from .speech_recognition_translation.client import ( - AsyncSpeechRecognitionTranslationClient, - SpeechRecognitionTranslationClient, +from .types.http_validation_error import HttpValidationError +from .errors.too_many_requests_error import TooManyRequestsError +from .types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from .types.vcard import Vcard +from .types.qr_code_generator_page_request_image_prompt_controlnet_models_item import ( + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, ) -from .summarize_your_docs_with_gpt.client import AsyncSummarizeYourDocsWithGptClient, SummarizeYourDocsWithGptClient -from .synthetic_data_maker_for_videos_pd_fs.client import ( - AsyncSyntheticDataMakerForVideosPdFsClient, - SyntheticDataMakerForVideosPdFsClient, +from .types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel +from .types.qr_code_generator_page_request_selected_controlnet_model_item import ( + QrCodeGeneratorPageRequestSelectedControlnetModelItem, ) -from .text_guided_audio_generator.client import AsyncTextGuidedAudioGeneratorClient, TextGuidedAudioGeneratorClient -from .types.animate_request_selected_model import AnimateRequestSelectedModel -from .types.animation_prompt import AnimationPrompt -from .types.asr_page_status_response import AsrPageStatusResponse +from .types.qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler +from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput +from .types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse +from .types.related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel +from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType +from .types.serp_search_location import SerpSearchLocation +from .types.serp_search_type import SerpSearchType +from .types.related_qn_a_page_output import RelatedQnAPageOutput +from .types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse +from .types.seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel +from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType +from .types.seo_summary_page_output import SeoSummaryPageOutput +from .types.seo_summary_page_status_response import SeoSummaryPageStatusResponse +from .types.google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel +from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType +from .types.google_gpt_page_output import GoogleGptPageOutput +from .types.google_gpt_page_status_response import GoogleGptPageStatusResponse +from .types.social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel +from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType +from .types.social_lookup_email_page_output import SocialLookupEmailPageOutput +from .types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse +from .types.bulk_runner_page_output import BulkRunnerPageOutput from .types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse -from .types.compare_llm_page_status_response import CompareLlmPageStatusResponse -from .types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse -from .types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse -from .types.deforum_sd_page_status_response import DeforumSdPageStatusResponse +from .types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel +from .types.doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel +from .types.doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType +from .types.doc_extract_page_output import DocExtractPageOutput from .types.doc_extract_page_status_response import DocExtractPageStatusResponse +from .types.compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem +from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType +from .types.compare_llm_page_output import CompareLlmPageOutput +from .types.compare_llm_page_status_response import CompareLlmPageStatusResponse +from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery +from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel +from .types.doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel +from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle +from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType +from .types.doc_search_page_output import DocSearchPageOutput from .types.doc_search_page_status_response import DocSearchPageStatusResponse +from .types.doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel +from .types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel +from .types.doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType +from .types.doc_summary_page_output import DocSummaryPageOutput from .types.doc_summary_page_status_response import DocSummaryPageStatusResponse -from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType -from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel -from .types.doc_summary_request_selected_model import DocSummaryRequestSelectedModel -from .types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse -from .types.embed_request_selected_model import EmbedRequestSelectedModel -from .types.embeddings_page_status_response import EmbeddingsPageStatusResponse +from .types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider +from .types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName +from .types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel +from .types.sad_talker_settings import SadTalkerSettings +from .types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel +from .types.lipsync_tts_page_output import LipsyncTtsPageOutput +from .types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse +from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider +from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName +from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel +from .types.text_to_speech_page_output import TextToSpeechPageOutput +from .types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse +from .types.asr_page_request_selected_model import AsrPageRequestSelectedModel +from .types.asr_page_request_translation_model import AsrPageRequestTranslationModel +from .types.asr_page_request_output_format import AsrPageRequestOutputFormat +from .types.asr_page_output import AsrPageOutput +from .types.asr_page_status_response import AsrPageStatusResponse +from .types.text2audio_page_output import Text2AudioPageOutput +from .types.text2audio_page_status_response import Text2AudioPageStatusResponse +from .types.translation_page_request_selected_model import TranslationPageRequestSelectedModel +from .types.translation_page_output import TranslationPageOutput +from .types.translation_page_status_response import TranslationPageStatusResponse +from .types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel +from .types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel +from .types.img2img_page_output import Img2ImgPageOutput +from .types.img2img_page_status_response import Img2ImgPageStatusResponse +from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem +from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler +from .types.compare_text2img_page_output import CompareText2ImgPageOutput +from .types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse +from .types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel +from .types.object_inpainting_page_output import ObjectInpaintingPageOutput +from .types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse +from .types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel +from .types.face_inpainting_page_output import FaceInpaintingPageOutput from .types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse -from .types.failed_reponse_model_v2 import FailedReponseModelV2 -from .types.generic_error_response import GenericErrorResponse -from .types.google_gpt_page_status_response import GoogleGptPageStatusResponse +from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel +from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput +from .types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse +from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel +from .types.google_image_gen_page_output import GoogleImageGenPageOutput from .types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse -from .types.http_validation_error import HttpValidationError -from .types.image_from_email_request_selected_model import ImageFromEmailRequestSelectedModel -from .types.image_from_web_search_request_selected_model import ImageFromWebSearchRequestSelectedModel +from .types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel +from .types.image_segmentation_page_output import ImageSegmentationPageOutput from .types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse -from .types.img2img_page_status_response import Img2ImgPageStatusResponse -from .types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse -from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel -from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName -from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel -from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider -from .types.llm_request_response_format_type import LlmRequestResponseFormatType -from .types.llm_request_selected_models_item import LlmRequestSelectedModelsItem -from .types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse -from .types.personalize_email_request_response_format_type import PersonalizeEmailRequestResponseFormatType -from .types.personalize_email_request_selected_model import PersonalizeEmailRequestSelectedModel -from .types.portrait_request_selected_model import PortraitRequestSelectedModel -from .types.product_image_request_selected_model import ProductImageRequestSelectedModel -from .types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse -from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem -from .types.qr_code_request_scheduler import QrCodeRequestScheduler -from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem -from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel -from .types.rag_request_citation_style import RagRequestCitationStyle -from .types.rag_request_embedding_model import RagRequestEmbeddingModel -from .types.rag_request_keyword_query import RagRequestKeywordQuery -from .types.rag_request_response_format_type import RagRequestResponseFormatType -from .types.rag_request_selected_model import RagRequestSelectedModel -from .types.recipe_function import RecipeFunction +from .types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem +from .types.compare_upscaler_page_output import CompareUpscalerPageOutput +from .types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse +from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel +from .types.embeddings_page_output import EmbeddingsPageOutput +from .types.embeddings_page_status_response import EmbeddingsPageStatusResponse +from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel +from .types.related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel +from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle +from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType +from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput from .types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse -from .types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse -from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel -from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel -from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel -from .types.run_settings import RunSettings -from .types.sad_talker_settings import SadTalkerSettings -from .types.seo_content_request_response_format_type import SeoContentRequestResponseFormatType -from .types.seo_content_request_selected_model import SeoContentRequestSelectedModel -from .types.seo_people_also_ask_doc_request_citation_style import SeoPeopleAlsoAskDocRequestCitationStyle -from .types.seo_people_also_ask_doc_request_embedding_model import SeoPeopleAlsoAskDocRequestEmbeddingModel -from .types.seo_people_also_ask_doc_request_keyword_query import SeoPeopleAlsoAskDocRequestKeywordQuery -from .types.seo_people_also_ask_doc_request_response_format_type import SeoPeopleAlsoAskDocRequestResponseFormatType -from .types.seo_people_also_ask_doc_request_selected_model import SeoPeopleAlsoAskDocRequestSelectedModel -from .types.seo_people_also_ask_request_embedding_model import SeoPeopleAlsoAskRequestEmbeddingModel -from .types.seo_people_also_ask_request_response_format_type import SeoPeopleAlsoAskRequestResponseFormatType -from .types.seo_people_also_ask_request_selected_model import SeoPeopleAlsoAskRequestSelectedModel -from .types.seo_summary_page_status_response import SeoSummaryPageStatusResponse -from .types.serp_search_location import SerpSearchLocation -from .types.serp_search_type import SerpSearchType -from .types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse -from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat -from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel -from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel -from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType -from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel -from .types.synthesize_data_request_selected_model import SynthesizeDataRequestSelectedModel -from .types.text2audio_page_status_response import Text2AudioPageStatusResponse -from .types.text_to_image_request_scheduler import TextToImageRequestScheduler -from .types.text_to_image_request_selected_models_item import TextToImageRequestSelectedModelsItem -from .types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse -from .types.text_to_speech_request_openai_tts_model import TextToSpeechRequestOpenaiTtsModel -from .types.text_to_speech_request_openai_voice_name import TextToSpeechRequestOpenaiVoiceName -from .types.text_to_speech_request_tts_provider import TextToSpeechRequestTtsProvider -from .types.translate_request_selected_model import TranslateRequestSelectedModel -from .types.translation_page_status_response import TranslationPageStatusResponse -from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem -from .types.vcard import Vcard -from .types.web_search_llm_request_embedding_model import WebSearchLlmRequestEmbeddingModel -from .types.web_search_llm_request_response_format_type import WebSearchLlmRequestResponseFormatType -from .types.web_search_llm_request_selected_model import WebSearchLlmRequestSelectedModel -from .web_search_gpt3.client import AsyncWebSearchGpt3Client, WebSearchGpt3Client +from .types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse +from .types.post_v3compare_llm_async_form_request_selected_models_item import ( + PostV3CompareLlmAsyncFormRequestSelectedModelsItem, +) +from .types.post_v3compare_llm_async_form_request_response_format_type import ( + PostV3CompareLlmAsyncFormRequestResponseFormatType, +) +from .types.post_v3compare_text2img_async_form_request_selected_models_item import ( + PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem, +) +from .types.post_v3compare_text2img_async_form_request_scheduler import PostV3CompareText2ImgAsyncFormRequestScheduler +from .types.post_v3deforum_sd_async_form_request_selected_model import PostV3DeforumSdAsyncFormRequestSelectedModel +from .types.post_v3email_face_inpainting_async_form_request_selected_model import ( + PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel, +) +from .types.post_v3face_inpainting_async_form_request_selected_model import ( + PostV3FaceInpaintingAsyncFormRequestSelectedModel, +) +from .types.post_v3google_image_gen_async_form_request_selected_model import ( + PostV3GoogleImageGenAsyncFormRequestSelectedModel, +) +from .types.post_v3image_segmentation_async_form_request_selected_model import ( + PostV3ImageSegmentationAsyncFormRequestSelectedModel, +) +from .types.post_v3img2img_async_form_request_selected_model import PostV3Img2ImgAsyncFormRequestSelectedModel +from .types.post_v3img2img_async_form_request_selected_controlnet_model import ( + PostV3Img2ImgAsyncFormRequestSelectedControlnetModel, +) +from .types.training_data_model import TrainingDataModel +from .types.letter_writer_page_status_response import LetterWriterPageStatusResponse +from .types.post_v3lipsync_async_form_request_selected_model import PostV3LipsyncAsyncFormRequestSelectedModel +from .types.lipsync_page_status_response import LipsyncPageStatusResponse +from .types.post_v3lipsync_tts_async_form_request_tts_provider import PostV3LipsyncTtsAsyncFormRequestTtsProvider +from .types.post_v3lipsync_tts_async_form_request_openai_voice_name import ( + PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName, +) +from .types.post_v3lipsync_tts_async_form_request_openai_tts_model import PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel +from .types.post_v3lipsync_tts_async_form_request_selected_model import PostV3LipsyncTtsAsyncFormRequestSelectedModel +from .types.post_v3object_inpainting_async_form_request_selected_model import ( + PostV3ObjectInpaintingAsyncFormRequestSelectedModel, +) +from .types.post_v3seo_summary_async_form_request_selected_model import PostV3SeoSummaryAsyncFormRequestSelectedModel +from .types.post_v3seo_summary_async_form_request_response_format_type import ( + PostV3SeoSummaryAsyncFormRequestResponseFormatType, +) +from .types.post_v3smart_gpt_async_form_request_selected_model import PostV3SmartGptAsyncFormRequestSelectedModel +from .types.post_v3smart_gpt_async_form_request_response_format_type import ( + PostV3SmartGptAsyncFormRequestResponseFormatType, +) +from .types.smart_gpt_page_status_response import SmartGptPageStatusResponse +from .types.post_v3social_lookup_email_async_form_request_selected_model import ( + PostV3SocialLookupEmailAsyncFormRequestSelectedModel, +) +from .types.post_v3social_lookup_email_async_form_request_response_format_type import ( + PostV3SocialLookupEmailAsyncFormRequestResponseFormatType, +) +from .types.post_v3text_to_speech_async_form_request_tts_provider import PostV3TextToSpeechAsyncFormRequestTtsProvider +from .types.post_v3text_to_speech_async_form_request_openai_voice_name import ( + PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName, +) +from .types.post_v3text_to_speech_async_form_request_openai_tts_model import ( + PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel, +) +from .types.post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item import ( + PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem, +) +from .types.post_v3art_qr_code_async_form_request_selected_model import PostV3ArtQrCodeAsyncFormRequestSelectedModel +from .types.post_v3art_qr_code_async_form_request_selected_controlnet_model_item import ( + PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem, +) +from .types.post_v3art_qr_code_async_form_request_scheduler import PostV3ArtQrCodeAsyncFormRequestScheduler +from .types.post_v3asr_async_form_request_selected_model import PostV3AsrAsyncFormRequestSelectedModel +from .types.post_v3asr_async_form_request_translation_model import PostV3AsrAsyncFormRequestTranslationModel +from .types.post_v3asr_async_form_request_output_format import PostV3AsrAsyncFormRequestOutputFormat +from .types.eval_prompt import EvalPrompt +from .types.agg_function import AggFunction +from .types.post_v3bulk_eval_async_form_request_selected_model import PostV3BulkEvalAsyncFormRequestSelectedModel +from .types.post_v3bulk_eval_async_form_request_response_format_type import ( + PostV3BulkEvalAsyncFormRequestResponseFormatType, +) +from .types.bulk_eval_page_status_response import BulkEvalPageStatusResponse +from .types.post_v3compare_ai_upscalers_async_form_request_selected_models_item import ( + PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem, +) +from .types.post_v3doc_extract_async_form_request_selected_asr_model import ( + PostV3DocExtractAsyncFormRequestSelectedAsrModel, +) +from .types.post_v3doc_extract_async_form_request_selected_model import PostV3DocExtractAsyncFormRequestSelectedModel +from .types.post_v3doc_extract_async_form_request_response_format_type import ( + PostV3DocExtractAsyncFormRequestResponseFormatType, +) +from .types.post_v3doc_search_async_form_request_keyword_query import PostV3DocSearchAsyncFormRequestKeywordQuery +from .types.post_v3doc_search_async_form_request_embedding_model import PostV3DocSearchAsyncFormRequestEmbeddingModel +from .types.post_v3doc_search_async_form_request_selected_model import PostV3DocSearchAsyncFormRequestSelectedModel +from .types.post_v3doc_search_async_form_request_citation_style import PostV3DocSearchAsyncFormRequestCitationStyle +from .types.post_v3doc_search_async_form_request_response_format_type import ( + PostV3DocSearchAsyncFormRequestResponseFormatType, +) +from .types.post_v3doc_summary_async_form_request_selected_model import PostV3DocSummaryAsyncFormRequestSelectedModel +from .types.post_v3doc_summary_async_form_request_selected_asr_model import ( + PostV3DocSummaryAsyncFormRequestSelectedAsrModel, +) +from .types.post_v3doc_summary_async_form_request_response_format_type import ( + PostV3DocSummaryAsyncFormRequestResponseFormatType, +) +from .types.post_v3embeddings_async_form_request_selected_model import PostV3EmbeddingsAsyncFormRequestSelectedModel +from .types.functions_page_status_response import FunctionsPageStatusResponse +from .types.post_v3google_gpt_async_form_request_selected_model import PostV3GoogleGptAsyncFormRequestSelectedModel +from .types.post_v3google_gpt_async_form_request_embedding_model import PostV3GoogleGptAsyncFormRequestEmbeddingModel +from .types.post_v3google_gpt_async_form_request_response_format_type import ( + PostV3GoogleGptAsyncFormRequestResponseFormatType, +) +from .types.post_v3related_qna_maker_doc_async_form_request_keyword_query import ( + PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery, +) +from .types.post_v3related_qna_maker_doc_async_form_request_embedding_model import ( + PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel, +) +from .types.post_v3related_qna_maker_doc_async_form_request_selected_model import ( + PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel, +) +from .types.post_v3related_qna_maker_doc_async_form_request_citation_style import ( + PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle, +) +from .types.post_v3related_qna_maker_doc_async_form_request_response_format_type import ( + PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType, +) +from .types.post_v3related_qna_maker_async_form_request_selected_model import ( + PostV3RelatedQnaMakerAsyncFormRequestSelectedModel, +) +from .types.post_v3related_qna_maker_async_form_request_embedding_model import ( + PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel, +) +from .types.post_v3related_qna_maker_async_form_request_response_format_type import ( + PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType, +) +from .types.post_v3translate_async_form_request_selected_model import PostV3TranslateAsyncFormRequestSelectedModel +from .types.conversation_entry import ConversationEntry +from .types.post_v3video_bots_async_form_request_selected_model import PostV3VideoBotsAsyncFormRequestSelectedModel +from .types.post_v3video_bots_async_form_request_embedding_model import PostV3VideoBotsAsyncFormRequestEmbeddingModel +from .types.post_v3video_bots_async_form_request_citation_style import PostV3VideoBotsAsyncFormRequestCitationStyle +from .types.post_v3video_bots_async_form_request_asr_model import PostV3VideoBotsAsyncFormRequestAsrModel +from .types.post_v3video_bots_async_form_request_translation_model import ( + PostV3VideoBotsAsyncFormRequestTranslationModel, +) +from .types.post_v3video_bots_async_form_request_lipsync_model import PostV3VideoBotsAsyncFormRequestLipsyncModel +from .types.llm_tools import LlmTools +from .types.post_v3video_bots_async_form_request_response_format_type import ( + PostV3VideoBotsAsyncFormRequestResponseFormatType, +) +from .types.post_v3video_bots_async_form_request_tts_provider import PostV3VideoBotsAsyncFormRequestTtsProvider +from .types.post_v3video_bots_async_form_request_openai_voice_name import PostV3VideoBotsAsyncFormRequestOpenaiVoiceName +from .types.post_v3video_bots_async_form_request_openai_tts_model import PostV3VideoBotsAsyncFormRequestOpenaiTtsModel +from .types.video_bots_page_status_response import VideoBotsPageStatusResponse +from .core.client_wrapper import AsyncClientWrapper +from .copilot_integrations.client import AsyncCopilotIntegrationsClient +from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient +from .evaluator.client import AsyncEvaluatorClient +from .smart_gpt.client import AsyncSmartGptClient +from .functions.client import AsyncFunctionsClient +from .lip_syncing.client import AsyncLipSyncingClient +from .misc.client import AsyncMiscClient # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -219,7 +351,7 @@ def __init__( api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.Client] = None + httpx_client: typing.Optional[httpx.Client] = None, ): _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None if api_key is None: @@ -236,92 +368,48 @@ def __init__( ) self.copilot_integrations = CopilotIntegrationsClient(client_wrapper=self._client_wrapper) self.copilot_for_your_enterprise = CopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.ai_animation_generator = AiAnimationGeneratorClient(client_wrapper=self._client_wrapper) - self.ai_art_qr_code = AiArtQrCodeClient(client_wrapper=self._client_wrapper) - self.generate_people_also_ask_seo_content = GeneratePeopleAlsoAskSeoContentClient( - client_wrapper=self._client_wrapper - ) - self.create_a_perfect_seo_optimized_title_paragraph = CreateAPerfectSeoOptimizedTitleParagraphClient( - client_wrapper=self._client_wrapper - ) - self.web_search_gpt3 = WebSearchGpt3Client(client_wrapper=self._client_wrapper) - self.profile_lookup_gpt3for_ai_personalized_emails = ProfileLookupGpt3ForAiPersonalizedEmailsClient( - client_wrapper=self._client_wrapper - ) - self.bulk_runner = BulkRunnerClient(client_wrapper=self._client_wrapper) self.evaluator = EvaluatorClient(client_wrapper=self._client_wrapper) - self.synthetic_data_maker_for_videos_pd_fs = SyntheticDataMakerForVideosPdFsClient( - client_wrapper=self._client_wrapper - ) - self.large_language_models_gpt3 = LargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper) - self.search_your_docs_with_gpt = SearchYourDocsWithGptClient(client_wrapper=self._client_wrapper) self.smart_gpt = SmartGptClient(client_wrapper=self._client_wrapper) - self.summarize_your_docs_with_gpt = SummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper) self.functions = FunctionsClient(client_wrapper=self._client_wrapper) self.lip_syncing = LipSyncingClient(client_wrapper=self._client_wrapper) - self.lipsync_video_with_any_text = LipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper) - self.compare_ai_voice_generators = CompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper) - self.speech_recognition_translation = SpeechRecognitionTranslationClient(client_wrapper=self._client_wrapper) - self.text_guided_audio_generator = TextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper) - self.compare_ai_translations = CompareAiTranslationsClient(client_wrapper=self._client_wrapper) - self.edit_an_image_with_ai_prompt = EditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_generators = CompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper) - self.generate_product_photo_backgrounds = GenerateProductPhotoBackgroundsClient( - client_wrapper=self._client_wrapper - ) - self.ai_image_with_a_face = AiImageWithAFaceClient(client_wrapper=self._client_wrapper) - self.ai_generated_photo_from_email_profile_lookup = AiGeneratedPhotoFromEmailProfileLookupClient( - client_wrapper=self._client_wrapper - ) - self.render_image_search_results_with_ai = RenderImageSearchResultsWithAiClient( - client_wrapper=self._client_wrapper - ) - self.ai_background_changer = AiBackgroundChangerClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_upscalers = CompareAiImageUpscalersClient(client_wrapper=self._client_wrapper) - self.chyron_plant_bot = ChyronPlantBotClient(client_wrapper=self._client_wrapper) - self.letter_writer = LetterWriterClient(client_wrapper=self._client_wrapper) - self.embeddings = EmbeddingsClient(client_wrapper=self._client_wrapper) - self.people_also_ask_answers_from_a_doc = PeopleAlsoAskAnswersFromADocClient( - client_wrapper=self._client_wrapper - ) self.misc = MiscClient(client_wrapper=self._client_wrapper) def animate( self, *, - animation_prompts: typing.List[AnimationPrompt], + animation_prompts: typing.Sequence[AnimationPrompt], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - max_frames: typing.Optional[int] = None, - selected_model: typing.Optional[AnimateRequestSelectedModel] = None, - animation_mode: typing.Optional[str] = None, - zoom: typing.Optional[str] = None, - translation_x: typing.Optional[str] = None, - translation_y: typing.Optional[str] = None, - rotation3d_x: typing.Optional[str] = None, - rotation3d_y: typing.Optional[str] = None, - rotation3d_z: typing.Optional[str] = None, - fps: typing.Optional[int] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DeforumSdPageOutput]: """ Parameters ---------- - animation_prompts : typing.List[AnimationPrompt] + animation_prompts : typing.Sequence[AnimationPrompt] example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments max_frames : typing.Optional[int] - selected_model : typing.Optional[AnimateRequestSelectedModel] + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] animation_mode : typing.Optional[str] @@ -348,7 +436,7 @@ def animate( Returns ------- - DeforumSdPageStatusResponse + typing.Optional[DeforumSdPageOutput] Successful Response Examples @@ -368,10 +456,12 @@ def animate( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async/form", + "v3/DeforumSD/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "animation_prompts": animation_prompts, @@ -388,32 +478,48 @@ def animate( "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + DeforumSdPageStatusResponse, + parse_obj_as( + type_=DeforumSdPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -425,38 +531,40 @@ def qr_code( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - qr_code_data: typing.Optional[str] = None, - qr_code_input_image: typing.Optional[str] = None, - qr_code_vcard: typing.Optional[Vcard] = None, - qr_code_file: typing.Optional[str] = None, - use_url_shortener: typing.Optional[bool] = None, - negative_prompt: typing.Optional[str] = None, - image_prompt: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + qr_code_data: typing.Optional[str] = OMIT, + qr_code_input_image: typing.Optional[str] = OMIT, + qr_code_vcard: typing.Optional[Vcard] = OMIT, + qr_code_file: typing.Optional[str] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + image_prompt: typing.Optional[str] = OMIT, image_prompt_controlnet_models: typing.Optional[ - typing.List[QrCodeRequestImagePromptControlnetModelsItem] - ] = None, - image_prompt_strength: typing.Optional[float] = None, - image_prompt_scale: typing.Optional[float] = None, - image_prompt_pos_x: typing.Optional[float] = None, - image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - scheduler: typing.Optional[QrCodeRequestScheduler] = None, - seed: typing.Optional[int] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: + typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] + ] = OMIT, + image_prompt_strength: typing.Optional[float] = OMIT, + image_prompt_scale: typing.Optional[float] = OMIT, + image_prompt_pos_x: typing.Optional[float] = OMIT, + image_prompt_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] + ] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, + seed: typing.Optional[int] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[QrCodeGeneratorPageOutput]: """ Parameters ---------- @@ -464,9 +572,9 @@ def qr_code( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments qr_code_data : typing.Optional[str] @@ -483,7 +591,7 @@ def qr_code( image_prompt : typing.Optional[str] - image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] + image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] image_prompt_strength : typing.Optional[float] @@ -493,9 +601,9 @@ def qr_code( image_prompt_pos_y : typing.Optional[float] - selected_model : typing.Optional[QrCodeRequestSelectedModel] + selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] + selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] output_width : typing.Optional[int] @@ -503,13 +611,13 @@ def qr_code( guidance_scale : typing.Optional[float] - controlnet_conditioning_scale : typing.Optional[typing.List[float]] + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] num_outputs : typing.Optional[int] quality : typing.Optional[int] - scheduler : typing.Optional[QrCodeRequestScheduler] + scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] seed : typing.Optional[int] @@ -526,7 +634,7 @@ def qr_code( Returns ------- - QrCodeGeneratorPageStatusResponse + typing.Optional[QrCodeGeneratorPageOutput] Successful Response Examples @@ -541,10 +649,12 @@ def qr_code( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async/form", + "v3/art-qr-code/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "qr_code_data": qr_code_data, @@ -575,32 +685,48 @@ def qr_code( "obj_pos_y": obj_pos_y, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + QrCodeGeneratorPageStatusResponse, + parse_obj_as( + type_=QrCodeGeneratorPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -613,30 +739,30 @@ def seo_people_also_ask( search_query: str, site_filter: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[SeoPeopleAlsoAskRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[RelatedQnAPageOutput]: """ Parameters ---------- @@ -646,16 +772,16 @@ def seo_people_also_ask( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments task_instructions : typing.Optional[str] query_instructions : typing.Optional[str] - selected_model : typing.Optional[SeoPeopleAlsoAskRequestSelectedModel] + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] max_search_urls : typing.Optional[int] @@ -665,7 +791,7 @@ def seo_people_also_ask( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel] + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -683,11 +809,11 @@ def seo_people_also_ask( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType] + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] serp_search_location : typing.Optional[SerpSearchLocation] - scaleserp_locations : typing.Optional[typing.List[str]] + scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead serp_search_type : typing.Optional[SerpSearchType] @@ -702,7 +828,7 @@ def seo_people_also_ask( Returns ------- - RelatedQnAPageStatusResponse + typing.Optional[RelatedQnAPageOutput] Successful Response Examples @@ -718,10 +844,12 @@ def seo_people_also_ask( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async/form", + "v3/related-qna-maker/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "search_query": search_query, @@ -747,32 +875,48 @@ def seo_people_also_ask( "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + RelatedQnAPageStatusResponse, + parse_obj_as( + type_=RelatedQnAPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -787,25 +931,25 @@ def seo_content( title: str, company_url: str, example_id: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - enable_html: typing.Optional[bool] = None, - selected_model: typing.Optional[SeoContentRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - enable_crosslinks: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SeoContentRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[SeoSummaryPageOutput]: """ Parameters ---------- @@ -823,7 +967,7 @@ def seo_content( enable_html : typing.Optional[bool] - selected_model : typing.Optional[SeoContentRequestSelectedModel] + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] max_search_urls : typing.Optional[int] @@ -841,11 +985,11 @@ def seo_content( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SeoContentRequestResponseFormatType] + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] serp_search_location : typing.Optional[SerpSearchLocation] - scaleserp_locations : typing.Optional[typing.List[str]] + scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead serp_search_type : typing.Optional[SerpSearchType] @@ -860,7 +1004,7 @@ def seo_content( Returns ------- - SeoSummaryPageStatusResponse + typing.Optional[SeoSummaryPageOutput] Successful Response Examples @@ -878,10 +1022,12 @@ def seo_content( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async/form", + "v3/SEOSummary/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "search_query": search_query, "keywords": keywords, "title": title, @@ -904,32 +1050,48 @@ def seo_content( "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + SeoSummaryPageStatusResponse, + parse_obj_as( + type_=SeoSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -942,30 +1104,30 @@ def web_search_llm( search_query: str, site_filter: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[WebSearchLlmRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[WebSearchLlmRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[WebSearchLlmRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[GoogleGptPageOutput]: """ Parameters ---------- @@ -975,16 +1137,16 @@ def web_search_llm( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments task_instructions : typing.Optional[str] query_instructions : typing.Optional[str] - selected_model : typing.Optional[WebSearchLlmRequestSelectedModel] + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] max_search_urls : typing.Optional[int] @@ -994,7 +1156,7 @@ def web_search_llm( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[WebSearchLlmRequestEmbeddingModel] + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -1012,11 +1174,11 @@ def web_search_llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[WebSearchLlmRequestResponseFormatType] + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] serp_search_location : typing.Optional[SerpSearchLocation] - scaleserp_locations : typing.Optional[typing.List[str]] + scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead serp_search_type : typing.Optional[SerpSearchType] @@ -1031,7 +1193,7 @@ def web_search_llm( Returns ------- - GoogleGptPageStatusResponse + typing.Optional[GoogleGptPageOutput] Successful Response Examples @@ -1047,10 +1209,12 @@ def web_search_llm( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/google-gpt/async/form", + "v3/google-gpt/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "search_query": search_query, @@ -1076,32 +1240,48 @@ def web_search_llm( "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + GoogleGptPageStatusResponse, + parse_obj_as( + type_=GoogleGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -1113,19 +1293,19 @@ def personalize_email( *, email_address: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PersonalizeEmailRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PersonalizeEmailRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[SocialLookupEmailPageOutput]: """ Parameters ---------- @@ -1133,14 +1313,14 @@ def personalize_email( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_prompt : typing.Optional[str] - selected_model : typing.Optional[PersonalizeEmailRequestSelectedModel] + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -1152,7 +1332,7 @@ def personalize_email( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[PersonalizeEmailRequestResponseFormatType] + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1161,7 +1341,7 @@ def personalize_email( Returns ------- - SocialLookupEmailPageStatusResponse + typing.Optional[SocialLookupEmailPageOutput] Successful Response Examples @@ -1176,10 +1356,12 @@ def personalize_email( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async/form", + "v3/SocialLookupEmail/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "email_address": email_address, @@ -1193,32 +1375,48 @@ def personalize_email( "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + SocialLookupEmailPageStatusResponse, + parse_obj_as( + type_=SocialLookupEmailPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -1228,28 +1426,28 @@ def personalize_email( def bulk_run( self, *, - documents: typing.List[str], - run_urls: typing.List[str], + documents: typing.Sequence[str], + run_urls: typing.Sequence[str], input_columns: typing.Dict[str, str], output_columns: typing.Dict[str, str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - eval_urls: typing.Optional[typing.List[str]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[BulkRunnerPageOutput]: """ Parameters ---------- - documents : typing.List[str] + documents : typing.Sequence[str] Upload or link to a CSV or google sheet that contains your sample input data. For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. Remember to includes header names in your CSV too. - run_urls : typing.List[str] + run_urls : typing.Sequence[str] Provide one or more Gooey.AI workflow runs. You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. @@ -1267,12 +1465,12 @@ def bulk_run( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - eval_urls : typing.Optional[typing.List[str]] + eval_urls : typing.Optional[typing.Sequence[str]] _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. @@ -1284,7 +1482,7 @@ def bulk_run( Returns ------- - BulkRunnerPageStatusResponse + typing.Optional[BulkRunnerPageOutput] Successful Response Examples @@ -1302,10 +1500,12 @@ def bulk_run( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async/form", + "v3/bulk-runner/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "documents": documents, @@ -1315,32 +1515,48 @@ def bulk_run( "eval_urls": eval_urls, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + BulkRunnerPageStatusResponse, + parse_obj_as( + type_=BulkRunnerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -1350,40 +1566,40 @@ def bulk_run( def synthesize_data( self, *, - documents: typing.List[str], + documents: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - sheet_url: typing.Optional[str] = None, - selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + sheet_url: typing.Optional[str] = OMIT, + selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DocExtractPageOutput]: """ Parameters ---------- - documents : typing.List[str] + documents : typing.Sequence[str] example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments sheet_url : typing.Optional[str] - selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] + selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] google_translate_target : typing.Optional[str] @@ -1393,7 +1609,7 @@ def synthesize_data( task_instructions : typing.Optional[str] - selected_model : typing.Optional[SynthesizeDataRequestSelectedModel] + selected_model : typing.Optional[DocExtractPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -1405,7 +1621,7 @@ def synthesize_data( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] + response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1414,7 +1630,7 @@ def synthesize_data( Returns ------- - DocExtractPageStatusResponse + typing.Optional[DocExtractPageOutput] Successful Response Examples @@ -1429,10 +1645,12 @@ def synthesize_data( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-extract/async/form", + "v3/doc-extract/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "documents": documents, @@ -1450,32 +1668,48 @@ def synthesize_data( "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + DocExtractPageStatusResponse, + parse_obj_as( + type_=DocExtractPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -1486,32 +1720,32 @@ def llm( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - selected_models: typing.Optional[typing.List[LlmRequestSelectedModelsItem]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[LlmRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[CompareLlmPageOutput]: """ Parameters ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_prompt : typing.Optional[str] - selected_models : typing.Optional[typing.List[LlmRequestSelectedModelsItem]] + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] avoid_repetition : typing.Optional[bool] @@ -1523,7 +1757,7 @@ def llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[LlmRequestResponseFormatType] + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1532,7 +1766,7 @@ def llm( Returns ------- - CompareLlmPageStatusResponse + typing.Optional[CompareLlmPageOutput] Successful Response Examples @@ -1545,10 +1779,12 @@ def llm( client.llm() """ _response = self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async/form", + "v3/CompareLLM/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_prompt": input_prompt, @@ -1561,32 +1797,48 @@ def llm( "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + CompareLlmPageStatusResponse, + parse_obj_as( + type_=CompareLlmPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -1598,29 +1850,29 @@ def rag( *, search_query: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - keyword_query: typing.Optional[RagRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[RagRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[RagRequestSelectedModel] = None, - citation_style: typing.Optional[RagRequestCitationStyle] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[RagRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DocSearchPageOutput]: """ Parameters ---------- @@ -1628,14 +1880,14 @@ def rag( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[RagRequestKeywordQuery] + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] max_references : typing.Optional[int] @@ -1645,7 +1897,7 @@ def rag( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[RagRequestEmbeddingModel] + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -1657,9 +1909,9 @@ def rag( query_instructions : typing.Optional[str] - selected_model : typing.Optional[RagRequestSelectedModel] + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - citation_style : typing.Optional[RagRequestCitationStyle] + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -1671,7 +1923,7 @@ def rag( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[RagRequestResponseFormatType] + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1680,7 +1932,7 @@ def rag( Returns ------- - DocSearchPageStatusResponse + typing.Optional[DocSearchPageOutput] Successful Response Examples @@ -1695,10 +1947,12 @@ def rag( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-search/async/form", + "v3/doc-search/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "search_query": search_query, @@ -1722,32 +1976,48 @@ def rag( "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + DocSearchPageStatusResponse, + parse_obj_as( + type_=DocSearchPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -1757,46 +2027,46 @@ def rag( def doc_summary( self, *, - documents: typing.List[str], + documents: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - task_instructions: typing.Optional[str] = None, - merge_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, - selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + merge_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, + selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DocSummaryPageOutput]: """ Parameters ---------- - documents : typing.List[str] + documents : typing.Sequence[str] example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments task_instructions : typing.Optional[str] merge_instructions : typing.Optional[str] - selected_model : typing.Optional[DocSummaryRequestSelectedModel] + selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] chain_type : typing.Optional[typing.Literal["map_reduce"]] - selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] google_translate_target : typing.Optional[str] @@ -1810,7 +2080,7 @@ def doc_summary( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1819,7 +2089,7 @@ def doc_summary( Returns ------- - DocSummaryPageStatusResponse + typing.Optional[DocSummaryPageOutput] Successful Response Examples @@ -1834,10 +2104,12 @@ def doc_summary( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/async/form", + "v3/doc-summary/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "documents": documents, @@ -1855,32 +2127,48 @@ def doc_summary( "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + DocSummaryPageStatusResponse, + parse_obj_as( + type_=DocSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -1892,36 +2180,36 @@ def lipsync_tts( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[LipsyncTtsPageOutput]: """ Parameters ---------- @@ -1929,12 +2217,12 @@ def lipsync_tts( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] + tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -1967,9 +2255,9 @@ def lipsync_tts( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] input_face : typing.Optional[str] @@ -1983,7 +2271,7 @@ def lipsync_tts( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] + selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -1992,7 +2280,7 @@ def lipsync_tts( Returns ------- - LipsyncTtsPageStatusResponse + typing.Optional[LipsyncTtsPageOutput] Successful Response Examples @@ -2007,10 +2295,12 @@ def lipsync_tts( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async/form", + "v3/LipsyncTTS/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "text_prompt": text_prompt, @@ -2041,32 +2331,48 @@ def lipsync_tts( "selected_model": selected_model, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + LipsyncTtsPageStatusResponse, + parse_obj_as( + type_=LipsyncTtsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2078,29 +2384,29 @@ def text_to_speech( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - tts_provider: typing.Optional[TextToSpeechRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[TextToSpeechRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[TextToSpeechRequestOpenaiTtsModel] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[TextToSpeechPageOutput]: """ Parameters ---------- @@ -2108,12 +2414,12 @@ def text_to_speech( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechRequestTtsProvider] + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -2146,9 +2452,9 @@ def text_to_speech( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[TextToSpeechRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[TextToSpeechRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] settings : typing.Optional[RunSettings] @@ -2157,7 +2463,7 @@ def text_to_speech( Returns ------- - TextToSpeechPageStatusResponse + typing.Optional[TextToSpeechPageOutput] Successful Response Examples @@ -2172,10 +2478,12 @@ def text_to_speech( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async/form", + "v3/TextToSpeech/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "text_prompt": text_prompt, @@ -2199,32 +2507,48 @@ def text_to_speech( "openai_tts_model": openai_tts_model, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + TextToSpeechPageStatusResponse, + parse_obj_as( + type_=TextToSpeechPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2234,40 +2558,40 @@ def text_to_speech( def speech_recognition( self, *, - documents: typing.List[str], + documents: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, - language: typing.Optional[str] = None, - translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, - output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, - google_translate_target: typing.Optional[str] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, + language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, + output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[AsrPageOutput]: """ Parameters ---------- - documents : typing.List[str] + documents : typing.Sequence[str] example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] + selected_model : typing.Optional[AsrPageRequestSelectedModel] language : typing.Optional[str] - translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] + translation_model : typing.Optional[AsrPageRequestTranslationModel] - output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] + output_format : typing.Optional[AsrPageRequestOutputFormat] google_translate_target : typing.Optional[str] use `translation_model` & `translation_target` instead. @@ -2287,7 +2611,7 @@ def speech_recognition( Returns ------- - AsrPageStatusResponse + typing.Optional[AsrPageOutput] Successful Response Examples @@ -2302,10 +2626,12 @@ def speech_recognition( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/asr/async/form", + "v3/asr/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "documents": documents, @@ -2319,32 +2645,48 @@ def speech_recognition( "glossary_document": glossary_document, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + AsrPageStatusResponse, + parse_obj_as( + type_=AsrPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2356,19 +2698,19 @@ def text_to_music( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - negative_prompt: typing.Optional[str] = None, - duration_sec: typing.Optional[float] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + duration_sec: typing.Optional[float] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[Text2AudioPageOutput]: """ Parameters ---------- @@ -2376,9 +2718,9 @@ def text_to_music( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments negative_prompt : typing.Optional[str] @@ -2395,7 +2737,7 @@ def text_to_music( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]] + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] settings : typing.Optional[RunSettings] @@ -2404,7 +2746,7 @@ def text_to_music( Returns ------- - Text2AudioPageStatusResponse + typing.Optional[Text2AudioPageOutput] Successful Response Examples @@ -2419,10 +2761,12 @@ def text_to_music( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/async/form", + "v3/text2audio/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "text_prompt": text_prompt, @@ -2436,32 +2780,48 @@ def text_to_music( "selected_models": selected_models, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + Text2AudioPageStatusResponse, + parse_obj_as( + type_=Text2AudioPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2472,29 +2832,29 @@ def translate( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[TranslateRequestSelectedModel] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + texts: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[TranslationPageOutput]: """ Parameters ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - texts : typing.Optional[typing.List[str]] + texts : typing.Optional[typing.Sequence[str]] - selected_model : typing.Optional[TranslateRequestSelectedModel] + selected_model : typing.Optional[TranslationPageRequestSelectedModel] translation_source : typing.Optional[str] @@ -2511,7 +2871,7 @@ def translate( Returns ------- - TranslationPageStatusResponse + typing.Optional[TranslationPageOutput] Successful Response Examples @@ -2524,10 +2884,12 @@ def translate( client.translate() """ _response = self._client_wrapper.httpx_client.request( - "v3/translate/async/form", + "v3/translate/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "texts": texts, @@ -2537,32 +2899,48 @@ def translate( "glossary_document": glossary_document, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + TranslationPageStatusResponse, + parse_obj_as( + type_=TranslationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2574,24 +2952,24 @@ def remix_image( *, input_image: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - prompt_strength: typing.Optional[float] = None, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, - seed: typing.Optional[int] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + text_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[Img2ImgPageOutput]: """ Parameters ---------- @@ -2599,16 +2977,16 @@ def remix_image( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments text_prompt : typing.Optional[str] - selected_model : typing.Optional[RemixImageRequestSelectedModel] + selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] + selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] negative_prompt : typing.Optional[str] @@ -2624,7 +3002,7 @@ def remix_image( prompt_strength : typing.Optional[float] - controlnet_conditioning_scale : typing.Optional[typing.List[float]] + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] seed : typing.Optional[int] @@ -2637,7 +3015,7 @@ def remix_image( Returns ------- - Img2ImgPageStatusResponse + typing.Optional[Img2ImgPageOutput] Successful Response Examples @@ -2652,10 +3030,12 @@ def remix_image( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/async/form", + "v3/Img2Img/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_image": input_image, @@ -2674,32 +3054,48 @@ def remix_image( "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + Img2ImgPageStatusResponse, + parse_obj_as( + type_=Img2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2711,25 +3107,25 @@ def text_to_image( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - negative_prompt: typing.Optional[str] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - dall_e3quality: typing.Optional[str] = None, - dall_e3style: typing.Optional[str] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]] = None, - scheduler: typing.Optional[TextToImageRequestScheduler] = None, - edit_instruction: typing.Optional[str] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[CompareText2ImgPageOutput]: """ Parameters ---------- @@ -2737,9 +3133,9 @@ def text_to_image( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments negative_prompt : typing.Optional[str] @@ -2762,9 +3158,9 @@ def text_to_image( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]] + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - scheduler : typing.Optional[TextToImageRequestScheduler] + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] edit_instruction : typing.Optional[str] @@ -2777,7 +3173,7 @@ def text_to_image( Returns ------- - CompareText2ImgPageStatusResponse + typing.Optional[CompareText2ImgPageOutput] Successful Response Examples @@ -2792,10 +3188,12 @@ def text_to_image( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async/form", + "v3/CompareText2Img/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "text_prompt": text_prompt, @@ -2815,32 +3213,48 @@ def text_to_image( "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + CompareText2ImgPageStatusResponse, + parse_obj_as( + type_=CompareText2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2853,24 +3267,24 @@ def product_image( input_image: str, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[ObjectInpaintingPageOutput]: """ Parameters ---------- @@ -2880,9 +3294,9 @@ def product_image( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments obj_scale : typing.Optional[float] @@ -2893,7 +3307,7 @@ def product_image( mask_threshold : typing.Optional[float] - selected_model : typing.Optional[ProductImageRequestSelectedModel] + selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -2918,7 +3332,7 @@ def product_image( Returns ------- - ObjectInpaintingPageStatusResponse + typing.Optional[ObjectInpaintingPageOutput] Successful Response Examples @@ -2934,10 +3348,12 @@ def product_image( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async/form", + "v3/ObjectInpainting/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_image": input_image, @@ -2957,32 +3373,48 @@ def product_image( "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + ObjectInpaintingPageStatusResponse, + parse_obj_as( + type_=ObjectInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -2995,23 +3427,23 @@ def portrait( input_image: str, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PortraitRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[FaceInpaintingPageOutput]: """ Parameters ---------- @@ -3021,9 +3453,9 @@ def portrait( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments face_scale : typing.Optional[float] @@ -3032,7 +3464,7 @@ def portrait( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[PortraitRequestSelectedModel] + selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -3057,7 +3489,7 @@ def portrait( Returns ------- - FaceInpaintingPageStatusResponse + typing.Optional[FaceInpaintingPageOutput] Successful Response Examples @@ -3069,14 +3501,16 @@ def portrait( ) client.portrait( input_image="input_image", - text_prompt="text_prompt", + text_prompt="tony stark from the iron man", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async/form", + "v3/FaceInpainting/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_image": input_image, @@ -3095,32 +3529,48 @@ def portrait( "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + FaceInpaintingPageStatusResponse, + parse_obj_as( + type_=FaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -3132,33 +3582,33 @@ def image_from_email( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - email_address: typing.Optional[str] = None, - twitter_handle: typing.Optional[str] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[ImageFromEmailRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - should_send_email: typing.Optional[bool] = None, - email_from: typing.Optional[str] = None, - email_cc: typing.Optional[str] = None, - email_bcc: typing.Optional[str] = None, - email_subject: typing.Optional[str] = None, - email_body: typing.Optional[str] = None, - email_body_enable_html: typing.Optional[bool] = None, - fallback_email_body: typing.Optional[str] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[EmailFaceInpaintingPageOutput]: """ Parameters ---------- @@ -3166,9 +3616,9 @@ def image_from_email( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments email_address : typing.Optional[str] @@ -3181,7 +3631,7 @@ def image_from_email( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[ImageFromEmailRequestSelectedModel] + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -3222,7 +3672,7 @@ def image_from_email( Returns ------- - EmailFaceInpaintingPageStatusResponse + typing.Optional[EmailFaceInpaintingPageOutput] Successful Response Examples @@ -3233,14 +3683,17 @@ def image_from_email( api_key="YOUR_API_KEY", ) client.image_from_email( - text_prompt="text_prompt", + email_address="sean@dara.network", + text_prompt="winter's day in paris", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async/form", + "v3/EmailFaceInpainting/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "email_address": email_address, @@ -3268,32 +3721,48 @@ def image_from_email( "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + EmailFaceInpaintingPageStatusResponse, + parse_obj_as( + type_=EmailFaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -3306,22 +3775,22 @@ def image_from_web_search( search_query: str, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[ImageFromWebSearchRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - prompt_strength: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - image_guidance_scale: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[GoogleImageGenPageOutput]: """ Parameters ---------- @@ -3331,17 +3800,17 @@ def image_from_web_search( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments serp_search_location : typing.Optional[SerpSearchLocation] - scaleserp_locations : typing.Optional[typing.List[str]] + scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead - selected_model : typing.Optional[ImageFromWebSearchRequestSelectedModel] + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -3366,7 +3835,7 @@ def image_from_web_search( Returns ------- - GoogleImageGenPageStatusResponse + typing.Optional[GoogleImageGenPageOutput] Successful Response Examples @@ -3382,10 +3851,12 @@ def image_from_web_search( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async/form", + "v3/GoogleImageGen/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "serp_search_location": serp_search_location, @@ -3403,32 +3874,48 @@ def image_from_web_search( "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + GoogleImageGenPageStatusResponse, + parse_obj_as( + type_=GoogleImageGenPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -3440,18 +3927,18 @@ def remove_background( *, input_image: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, - mask_threshold: typing.Optional[float] = None, - rect_persepective_transform: typing.Optional[bool] = None, - reflection_opacity: typing.Optional[float] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + rect_persepective_transform: typing.Optional[bool] = OMIT, + reflection_opacity: typing.Optional[float] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[ImageSegmentationPageOutput]: """ Parameters ---------- @@ -3459,12 +3946,12 @@ def remove_background( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] + selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] mask_threshold : typing.Optional[float] @@ -3485,7 +3972,7 @@ def remove_background( Returns ------- - ImageSegmentationPageStatusResponse + typing.Optional[ImageSegmentationPageOutput] Successful Response Examples @@ -3500,10 +3987,12 @@ def remove_background( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async/form", + "v3/ImageSegmentation/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_image": input_image, @@ -3516,32 +4005,48 @@ def remove_background( "obj_pos_y": obj_pos_y, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + ImageSegmentationPageStatusResponse, + parse_obj_as( + type_=ImageSegmentationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -3553,15 +4058,15 @@ def upscale( *, scale: int, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_image: typing.Optional[str] = None, - input_video: typing.Optional[str] = None, - selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_image: typing.Optional[str] = OMIT, + input_video: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[CompareUpscalerPageOutput]: """ Parameters ---------- @@ -3570,9 +4075,9 @@ def upscale( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_image : typing.Optional[str] @@ -3581,7 +4086,7 @@ def upscale( input_video : typing.Optional[str] Input Video - selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] @@ -3592,7 +4097,7 @@ def upscale( Returns ------- - CompareUpscalerPageStatusResponse + typing.Optional[CompareUpscalerPageOutput] Successful Response Examples @@ -3607,10 +4112,12 @@ def upscale( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async/form", + "v3/compare-ai-upscalers/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_image": input_image, @@ -3620,32 +4127,48 @@ def upscale( "selected_bg_model": selected_bg_model, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + CompareUpscalerPageStatusResponse, + parse_obj_as( + type_=CompareUpscalerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -3655,27 +4178,27 @@ def upscale( def embed( self, *, - texts: typing.List[str], + texts: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - selected_model: typing.Optional[EmbedRequestSelectedModel] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[EmbeddingsPageOutput]: """ Parameters ---------- - texts : typing.List[str] + texts : typing.Sequence[str] example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[EmbedRequestSelectedModel] + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -3684,7 +4207,7 @@ def embed( Returns ------- - EmbeddingsPageStatusResponse + typing.Optional[EmbeddingsPageOutput] Successful Response Examples @@ -3699,42 +4222,60 @@ def embed( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/async/form", + "v3/embeddings/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "texts": texts, "selected_model": selected_model, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + EmbeddingsPageStatusResponse, + parse_obj_as( + type_=EmbeddingsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -3746,33 +4287,33 @@ def seo_people_also_ask_doc( *, search_query: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - keyword_query: typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel] = None, - citation_style: typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[RelatedQnADocPageOutput]: """ Parameters ---------- @@ -3780,14 +4321,14 @@ def seo_people_also_ask_doc( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery] + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] max_references : typing.Optional[int] @@ -3797,7 +4338,7 @@ def seo_people_also_ask_doc( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel] + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -3809,9 +4350,9 @@ def seo_people_also_ask_doc( query_instructions : typing.Optional[str] - selected_model : typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel] + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - citation_style : typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle] + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -3823,11 +4364,11 @@ def seo_people_also_ask_doc( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType] + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] serp_search_location : typing.Optional[SerpSearchLocation] - scaleserp_locations : typing.Optional[typing.List[str]] + scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead serp_search_type : typing.Optional[SerpSearchType] @@ -3842,7 +4383,7 @@ def seo_people_also_ask_doc( Returns ------- - RelatedQnADocPageStatusResponse + typing.Optional[RelatedQnADocPageOutput] Successful Response Examples @@ -3857,10 +4398,12 @@ def seo_people_also_ask_doc( ) """ _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async/form", + "v3/related-qna-maker-doc/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "search_query": search_query, @@ -3888,39 +4431,57 @@ def seo_people_also_ask_doc( "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + RelatedQnADocPageStatusResponse, + parse_obj_as( + type_=RelatedQnADocPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def health_status_get(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def health_status_get( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: """ Parameters ---------- @@ -3929,7 +4490,7 @@ def health_status_get(self, *, request_options: typing.Optional[RequestOptions] Returns ------- - typing.Any + typing.Optional[typing.Any] Successful Response Examples @@ -3941,25 +4502,58 @@ def health_status_get(self, *, request_options: typing.Optional[RequestOptions] ) client.health_status_get() """ - _response = self._client_wrapper.httpx_client.request("status", method="GET", request_options=request_options) + _response = self._client_wrapper.httpx_client.request( + "status", + method="GET", + request_options=request_options, + ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3chyron_plant_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3chyron_plant_async_form( + self, + *, + midi_notes: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + midi_notes_prompt: typing.Optional[str] = None, + chyron_prompt: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ChyronPlantPageStatusResponse: """ Parameters ---------- + midi_notes : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + midi_notes_prompt : typing.Optional[str] + + chyron_prompt : typing.Optional[str] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + ChyronPlantPageStatusResponse Successful Response Examples @@ -3969,29 +4563,87 @@ def post_v3chyron_plant_async(self, *, request_options: typing.Optional[RequestO client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3chyron_plant_async() + client.post_v3chyron_plant_async_form( + midi_notes="midi_notes", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async", method="POST", request_options=request_options + "v3/ChyronPlant/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "midi_notes": midi_notes, + "midi_notes_prompt": midi_notes_prompt, + "chyron_prompt": chyron_prompt, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + ChyronPlantPageStatusResponse, + parse_obj_as( + type_=ChyronPlantPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3compare_llm_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3compare_llm_async_form( + self, + *, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + selected_models: typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareLlmPageStatusResponse: """ Parameters ---------- + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_models : typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + CompareLlmPageStatusResponse Successful Response Examples @@ -4001,29 +4653,111 @@ def post_v3compare_llm_async(self, *, request_options: typing.Optional[RequestOp client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3compare_llm_async() + client.post_v3compare_llm_async_form() """ _response = self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async", method="POST", request_options=request_options + "v3/CompareLLM/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "selected_models": selected_models, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + CompareLlmPageStatusResponse, + parse_obj_as( + type_=CompareLlmPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3compare_text2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3compare_text2img_async_form( + self, + *, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + negative_prompt: typing.Optional[str] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + dall_e3quality: typing.Optional[str] = None, + dall_e3style: typing.Optional[str] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + sd2upscaling: typing.Optional[bool] = None, + selected_models: typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] = None, + scheduler: typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] = None, + edit_instruction: typing.Optional[str] = None, + image_guidance_scale: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareText2ImgPageStatusResponse: """ Parameters ---------- + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + dall_e3quality : typing.Optional[str] + + dall_e3style : typing.Optional[str] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] + + scheduler : typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] + + edit_instruction : typing.Optional[str] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + CompareText2ImgPageStatusResponse Successful Response Examples @@ -4033,63 +4767,260 @@ def post_v3compare_text2img_async(self, *, request_options: typing.Optional[Requ client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3compare_text2img_async() + client.post_v3compare_text2img_async_form( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async", method="POST", request_options=request_options + "v3/CompareText2Img/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, + "num_outputs": num_outputs, + "quality": quality, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + CompareText2ImgPageStatusResponse, + parse_obj_as( + type_=CompareText2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3deforum_sd_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3deforum_sd_async_form( + self, + *, + animation_prompts: typing.List[AnimationPrompt], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + max_frames: typing.Optional[int] = None, + selected_model: typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] = None, + animation_mode: typing.Optional[str] = None, + zoom: typing.Optional[str] = None, + translation_x: typing.Optional[str] = None, + translation_y: typing.Optional[str] = None, + rotation3d_x: typing.Optional[str] = None, + rotation3d_y: typing.Optional[str] = None, + rotation3d_z: typing.Optional[str] = None, + fps: typing.Optional[int] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DeforumSdPageStatusResponse: """ Parameters ---------- + animation_prompts : typing.List[AnimationPrompt] + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + max_frames : typing.Optional[int] + + selected_model : typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] + + animation_mode : typing.Optional[str] + + zoom : typing.Optional[str] + + translation_x : typing.Optional[str] + + translation_y : typing.Optional[str] + + rotation3d_x : typing.Optional[str] + + rotation3d_y : typing.Optional[str] + + rotation3d_z : typing.Optional[str] + + fps : typing.Optional[int] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + DeforumSdPageStatusResponse Successful Response Examples -------- - from gooey import Gooey + from gooey import AnimationPrompt, Gooey client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3deforum_sd_async() + client.post_v3deforum_sd_async_form( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async", method="POST", request_options=request_options + "v3/DeforumSD/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + DeforumSdPageStatusResponse, + parse_obj_as( + type_=DeforumSdPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3email_face_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters + def post_v3email_face_inpainting_async_form( + self, + *, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + email_address: typing.Optional[str] = None, + twitter_handle: typing.Optional[str] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + should_send_email: typing.Optional[bool] = None, + email_from: typing.Optional[str] = None, + email_cc: typing.Optional[str] = None, + email_bcc: typing.Optional[str] = None, + email_subject: typing.Optional[str] = None, + email_body: typing.Optional[str] = None, + email_body_enable_html: typing.Optional[bool] = None, + fallback_email_body: typing.Optional[str] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> EmailFaceInpaintingPageStatusResponse: + """ + Parameters ---------- + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + email_address : typing.Optional[str] + + twitter_handle : typing.Optional[str] + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + should_send_email : typing.Optional[bool] + + email_from : typing.Optional[str] + + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] + + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + EmailFaceInpaintingPageStatusResponse Successful Response Examples @@ -4099,29 +5030,125 @@ def post_v3email_face_inpainting_async( client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3email_face_inpainting_async() + client.post_v3email_face_inpainting_async_form( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async", method="POST", request_options=request_options + "v3/EmailFaceInpainting/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + EmailFaceInpaintingPageStatusResponse, + parse_obj_as( + type_=EmailFaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3face_inpainting_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3face_inpainting_async_form( + self, + *, + input_image: str, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> FaceInpaintingPageStatusResponse: """ Parameters ---------- + input_image : str + + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + face_scale : typing.Optional[float] + + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + FaceInpaintingPageStatusResponse Successful Response Examples @@ -4131,29 +5158,115 @@ def post_v3face_inpainting_async(self, *, request_options: typing.Optional[Reque client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3face_inpainting_async() + client.post_v3face_inpainting_async_form( + input_image="input_image", + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async", method="POST", request_options=request_options + "v3/FaceInpainting/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + FaceInpaintingPageStatusResponse, + parse_obj_as( + type_=FaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3google_image_gen_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3google_image_gen_async_form( + self, + *, + search_query: str, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + prompt_strength: typing.Optional[float] = None, + sd2upscaling: typing.Optional[bool] = None, + seed: typing.Optional[int] = None, + image_guidance_scale: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleImageGenPageStatusResponse: """ Parameters ---------- + search_query : str + + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + selected_model : typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + GoogleImageGenPageStatusResponse Successful Response Examples @@ -4163,29 +5276,98 @@ def post_v3google_image_gen_async(self, *, request_options: typing.Optional[Requ client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3google_image_gen_async() + client.post_v3google_image_gen_async_form( + search_query="search_query", + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) + "v3/GoogleImageGen/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "search_query": search_query, + "text_prompt": text_prompt, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GoogleImageGenPageStatusResponse, + parse_obj_as( + type_=GoogleImageGenPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3image_segmentation_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3image_segmentation_async_form( + self, + *, + input_image: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] = None, + mask_threshold: typing.Optional[float] = None, + rect_persepective_transform: typing.Optional[bool] = None, + reflection_opacity: typing.Optional[float] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ImageSegmentationPageStatusResponse: """ Parameters ---------- + input_image : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] + + mask_threshold : typing.Optional[float] + + rect_persepective_transform : typing.Optional[bool] + + reflection_opacity : typing.Optional[float] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + ImageSegmentationPageStatusResponse Successful Response Examples @@ -4195,29 +5377,110 @@ def post_v3image_segmentation_async(self, *, request_options: typing.Optional[Re client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3image_segmentation_async() + client.post_v3image_segmentation_async_form( + input_image="input_image", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async", method="POST", request_options=request_options + "v3/ImageSegmentation/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "selected_model": selected_model, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + ImageSegmentationPageStatusResponse, + parse_obj_as( + type_=ImageSegmentationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3img2img_async_form( + self, + *, + input_image: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + text_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + prompt_strength: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + seed: typing.Optional[int] = None, + image_guidance_scale: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Img2ImgPageStatusResponse: """ Parameters ---------- + input_image : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + text_prompt : typing.Optional[str] + + selected_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] + + selected_controlnet_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + Img2ImgPageStatusResponse Successful Response Examples @@ -4227,29 +5490,116 @@ def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOption client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3img2img_async() + client.post_v3img2img_async_form( + input_image="input_image", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/async", method="POST", request_options=request_options + "v3/Img2Img/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "seed": seed, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + Img2ImgPageStatusResponse, + parse_obj_as( + type_=Img2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3letter_writer_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3letter_writer_async_form( + self, + *, + action_id: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + prompt_header: typing.Optional[str] = None, + example_letters: typing.Optional[typing.List[TrainingDataModel]] = None, + lm_selected_api: typing.Optional[str] = None, + lm_selected_engine: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + lm_sampling_temperature: typing.Optional[float] = None, + api_http_method: typing.Optional[str] = None, + api_url: typing.Optional[str] = None, + api_headers: typing.Optional[str] = None, + api_json_body: typing.Optional[str] = None, + input_prompt: typing.Optional[str] = None, + strip_html2text: typing.Optional[bool] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LetterWriterPageStatusResponse: """ Parameters ---------- + action_id : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + prompt_header : typing.Optional[str] + + example_letters : typing.Optional[typing.List[TrainingDataModel]] + + lm_selected_api : typing.Optional[str] + + lm_selected_engine : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + lm_sampling_temperature : typing.Optional[float] + + api_http_method : typing.Optional[str] + + api_url : typing.Optional[str] + + api_headers : typing.Optional[str] + + api_json_body : typing.Optional[str] + + input_prompt : typing.Optional[str] + + strip_html2text : typing.Optional[bool] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + LetterWriterPageStatusResponse Successful Response Examples @@ -4259,29 +5609,98 @@ def post_v3letter_writer_async(self, *, request_options: typing.Optional[Request client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3letter_writer_async() + client.post_v3letter_writer_async_form( + action_id="action_id", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async", method="POST", request_options=request_options + "v3/LetterWriter/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "action_id": action_id, + "prompt_header": prompt_header, + "example_letters": example_letters, + "lm_selected_api": lm_selected_api, + "lm_selected_engine": lm_selected_engine, + "num_outputs": num_outputs, + "quality": quality, + "lm_sampling_temperature": lm_sampling_temperature, + "api_http_method": api_http_method, + "api_url": api_url, + "api_headers": api_headers, + "api_json_body": api_json_body, + "input_prompt": input_prompt, + "strip_html_2_text": strip_html2text, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + LetterWriterPageStatusResponse, + parse_obj_as( + type_=LetterWriterPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3lipsync_async_form( + self, + *, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_face: typing.Optional[str] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[SadTalkerSettings] = None, + selected_model: typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] = None, + input_audio: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncPageStatusResponse: """ Parameters ---------- + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + selected_model : typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] + + input_audio : typing.Optional[str] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + LipsyncPageStatusResponse Successful Response Examples @@ -4291,29 +5710,145 @@ def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOption client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3lipsync_async() + client.post_v3lipsync_async_form() """ _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", method="POST", request_options=request_options + "v3/Lipsync/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "input_audio": input_audio, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + LipsyncPageStatusResponse, + parse_obj_as( + type_=LipsyncPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3lipsync_tts_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- + def post_v3lipsync_tts_async_form( + self, + *, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] = None, + input_face: typing.Optional[str] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[SadTalkerSettings] = None, + selected_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncTtsPageStatusResponse: + """ + Parameters + ---------- + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + selected_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + LipsyncTtsPageStatusResponse Successful Response Examples @@ -4323,29 +5858,131 @@ def post_v3lipsync_tts_async(self, *, request_options: typing.Optional[RequestOp client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3lipsync_tts_async() + client.post_v3lipsync_tts_async_form( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async", method="POST", request_options=request_options + "v3/LipsyncTTS/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + LipsyncTtsPageStatusResponse, + parse_obj_as( + type_=LipsyncTtsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3object_inpainting_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + def post_v3object_inpainting_async_form( + self, + *, + input_image: str, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + mask_threshold: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + sd2upscaling: typing.Optional[bool] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ObjectInpaintingPageStatusResponse: """ Parameters ---------- + input_image : str + + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + ObjectInpaintingPageStatusResponse Successful Response Examples @@ -4355,835 +5992,5417 @@ def post_v3object_inpainting_async(self, *, request_options: typing.Optional[Req client = Gooey( api_key="YOUR_API_KEY", ) - client.post_v3object_inpainting_async() + client.post_v3object_inpainting_async_form( + input_image="input_image", + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async", method="POST", request_options=request_options + "v3/ObjectInpainting/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "text_prompt": text_prompt, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + ObjectInpaintingPageStatusResponse, + parse_obj_as( + type_=ObjectInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3seo_summary_async_form( + self, + *, + search_query: str, + keywords: str, + title: str, + company_url: str, + task_instructions: typing.Optional[str] = None, + enable_html: typing.Optional[bool] = None, + selected_model: typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] = None, + max_search_urls: typing.Optional[int] = None, + enable_crosslinks: typing.Optional[bool] = None, + seed: typing.Optional[int] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SeoSummaryPageStatusResponse: + """ + Parameters + ---------- + search_query : str + + keywords : str + + title : str + + company_url : str + + task_instructions : typing.Optional[str] + + enable_html : typing.Optional[bool] + + selected_model : typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SeoSummaryPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3seo_summary_async_form( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/SEOSummary/async/form", + method="POST", + data={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SeoSummaryPageStatusResponse, + parse_obj_as( + type_=SeoSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3smart_gpt_async_form( + self, + *, + input_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + cot_prompt: typing.Optional[str] = None, + reflexion_prompt: typing.Optional[str] = None, + dera_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SmartGptPageStatusResponse: + """ + Parameters + ---------- + input_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + cot_prompt : typing.Optional[str] + + reflexion_prompt : typing.Optional[str] + + dera_prompt : typing.Optional[str] + + selected_model : typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SmartGptPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3smart_gpt_async_form( + input_prompt="input_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/SmartGPT/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "cot_prompt": cot_prompt, + "reflexion_prompt": reflexion_prompt, + "dera_prompt": dera_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SmartGptPageStatusResponse, + parse_obj_as( + type_=SmartGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3social_lookup_email_async_form( + self, + *, + email_address: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SocialLookupEmailPageStatusResponse: + """ + Parameters + ---------- + email_address : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_model : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SocialLookupEmailPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3social_lookup_email_async_form( + email_address="email_address", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/SocialLookupEmail/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "input_prompt": input_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SocialLookupEmailPageStatusResponse, + parse_obj_as( + type_=SocialLookupEmailPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3text_to_speech_async_form( + self, + *, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> TextToSpeechPageStatusResponse: + """ + Parameters + ---------- + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TextToSpeechPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3text_to_speech_async_form( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/TextToSpeech/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TextToSpeechPageStatusResponse, + parse_obj_as( + type_=TextToSpeechPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3art_qr_code_async_form( + self, + *, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + qr_code_data: typing.Optional[str] = None, + qr_code_input_image: typing.Optional[str] = None, + qr_code_vcard: typing.Optional[Vcard] = None, + qr_code_file: typing.Optional[str] = None, + use_url_shortener: typing.Optional[bool] = None, + negative_prompt: typing.Optional[str] = None, + image_prompt: typing.Optional[str] = None, + image_prompt_controlnet_models: typing.Optional[ + typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem] + ] = None, + image_prompt_strength: typing.Optional[float] = None, + image_prompt_scale: typing.Optional[float] = None, + image_prompt_pos_x: typing.Optional[float] = None, + image_prompt_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[ + typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem] + ] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + scheduler: typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] = None, + seed: typing.Optional[int] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QrCodeGeneratorPageStatusResponse: + """ + Parameters + ---------- + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + qr_code_data : typing.Optional[str] + + qr_code_input_image : typing.Optional[str] + + qr_code_vcard : typing.Optional[Vcard] + + qr_code_file : typing.Optional[str] + + use_url_shortener : typing.Optional[bool] + + negative_prompt : typing.Optional[str] + + image_prompt : typing.Optional[str] + + image_prompt_controlnet_models : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem]] + + image_prompt_strength : typing.Optional[float] + + image_prompt_scale : typing.Optional[float] + + image_prompt_pos_x : typing.Optional[float] + + image_prompt_pos_y : typing.Optional[float] + + selected_model : typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] + + selected_controlnet_model : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QrCodeGeneratorPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3art_qr_code_async_form( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/art-qr-code/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "qr_code_data": qr_code_data, + "qr_code_input_image": qr_code_input_image, + "qr_code_vcard": qr_code_vcard, + "qr_code_file": qr_code_file, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + QrCodeGeneratorPageStatusResponse, + parse_obj_as( + type_=QrCodeGeneratorPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3asr_async_form( + self, + *, + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] = None, + language: typing.Optional[str] = None, + translation_model: typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] = None, + output_format: typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] = None, + google_translate_target: typing.Optional[str] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsrPageStatusResponse: + """ + Parameters + ---------- + documents : typing.List[str] + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] + + language : typing.Optional[str] + + translation_model : typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] + + output_format : typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] + + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsrPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3asr_async_form( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/asr/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "selected_model": selected_model, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AsrPageStatusResponse, + parse_obj_as( + type_=AsrPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3bulk_eval_async_form( + self, + *, + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None, + agg_functions: typing.Optional[typing.List[AggFunction]] = None, + selected_model: typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkEvalPageStatusResponse: + """ + Parameters + ---------- + documents : typing.List[str] + + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_prompts : typing.Optional[typing.List[EvalPrompt]] + + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. + _The `columns` dictionary can be used to reference the spreadsheet columns._ + + + agg_functions : typing.Optional[typing.List[AggFunction]] + + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + + + selected_model : typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + BulkEvalPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3bulk_eval_async_form( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/bulk-eval/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + BulkEvalPageStatusResponse, + parse_obj_as( + type_=BulkEvalPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3bulk_runner_async_form( + self, + *, + documents: typing.List[str], + run_urls: typing.List[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_urls: typing.Optional[typing.List[str]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkRunnerPageStatusResponse: + """ + Parameters + ---------- + documents : typing.List[str] + + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + + + run_urls : typing.List[str] + + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + + + input_columns : typing.Dict[str, str] + + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + + + output_columns : typing.Dict[str, str] + + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_urls : typing.Optional[typing.List[str]] + + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + BulkRunnerPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3bulk_runner_async_form( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/bulk-runner/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + BulkRunnerPageStatusResponse, + parse_obj_as( + type_=BulkRunnerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3compare_ai_upscalers_async_form( + self, + *, + scale: int, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_image: typing.Optional[str] = None, + input_video: typing.Optional[str] = None, + selected_models: typing.Optional[ + typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem] + ] = None, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareUpscalerPageStatusResponse: + """ + Parameters + ---------- + scale : int + The final upsampling scale of the image + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_image : typing.Optional[str] + Input Image + + input_video : typing.Optional[str] + Input Video + + selected_models : typing.Optional[typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem]] + + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CompareUpscalerPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3compare_ai_upscalers_async_form( + scale=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/compare-ai-upscalers/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "input_video": input_video, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CompareUpscalerPageStatusResponse, + parse_obj_as( + type_=CompareUpscalerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3doc_extract_async_form( + self, + *, + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + sheet_url: typing.Optional[str] = None, + selected_asr_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + glossary_document: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocExtractPageStatusResponse: + """ + Parameters + ---------- + documents : typing.List[str] + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + sheet_url : typing.Optional[str] + + selected_asr_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + task_instructions : typing.Optional[str] + + selected_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocExtractPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3doc_extract_async_form( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/doc-extract/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "sheet_url": sheet_url, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "glossary_document": glossary_document, + "task_instructions": task_instructions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DocExtractPageStatusResponse, + parse_obj_as( + type_=DocExtractPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3doc_search_async_form( + self, + *, + search_query: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + keyword_query: typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] = None, + documents: typing.Optional[typing.List[str]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + doc_extract_url: typing.Optional[str] = None, + embedding_model: typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] = None, + citation_style: typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSearchPageStatusResponse: + """ + Parameters + ---------- + search_query : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] + + documents : typing.Optional[typing.List[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] + + citation_style : typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocSearchPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3doc_search_async_form( + search_query="search_query", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/doc-search/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DocSearchPageStatusResponse, + parse_obj_as( + type_=DocSearchPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3doc_summary_async_form( + self, + *, + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + merge_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSummaryPageStatusResponse: + """ + Parameters + ---------- + documents : typing.List[str] + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] + + selected_asr_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DocSummaryPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3doc_summary_async_form( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/doc-summary/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DocSummaryPageStatusResponse, + parse_obj_as( + type_=DocSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3embeddings_async_form( + self, + *, + texts: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> EmbeddingsPageStatusResponse: + """ + Parameters + ---------- + texts : typing.List[str] + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmbeddingsPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3embeddings_async_form( + texts=["texts"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/embeddings/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EmbeddingsPageStatusResponse, + parse_obj_as( + type_=EmbeddingsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3functions_async_form( + self, + *, + code: typing.Optional[str] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> FunctionsPageStatusResponse: + """ + Parameters + ---------- + code : typing.Optional[str] + The JS code to be executed. + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used in the code + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FunctionsPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3functions_async_form() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/functions/async/form", + method="POST", + data={ + "code": code, + "variables": variables, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + FunctionsPageStatusResponse, + parse_obj_as( + type_=FunctionsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3google_gpt_async_form( + self, + *, + search_query: str, + site_filter: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] = None, + max_search_urls: typing.Optional[int] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleGptPageStatusResponse: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + GoogleGptPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3google_gpt_async_form( + search_query="search_query", + site_filter="site_filter", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/google-gpt/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GoogleGptPageStatusResponse, + parse_obj_as( + type_=GoogleGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3related_qna_maker_doc_async_form( + self, + *, + search_query: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + keyword_query: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] = None, + documents: typing.Optional[typing.List[str]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + doc_extract_url: typing.Optional[str] = None, + embedding_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] = None, + citation_style: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnADocPageStatusResponse: + """ + Parameters + ---------- + search_query : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] + + documents : typing.Optional[typing.List[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] + + citation_style : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnADocPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3related_qna_maker_doc_async_form( + search_query="search_query", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/related-qna-maker-doc/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + RelatedQnADocPageStatusResponse, + parse_obj_as( + type_=RelatedQnADocPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3related_qna_maker_async_form( + self, + *, + search_query: str, + site_filter: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] = None, + max_search_urls: typing.Optional[int] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnAPageStatusResponse: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnAPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3related_qna_maker_async_form( + search_query="search_query", + site_filter="site_filter", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/related-qna-maker/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + RelatedQnAPageStatusResponse, + parse_obj_as( + type_=RelatedQnAPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3text2audio_async_form( + self, + *, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + negative_prompt: typing.Optional[str] = None, + duration_sec: typing.Optional[float] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + sd2upscaling: typing.Optional[bool] = None, + selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Text2AudioPageStatusResponse: + """ + Parameters + ---------- + text_prompt : str + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + duration_sec : typing.Optional[float] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + Text2AudioPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3text2audio_async_form( + text_prompt="text_prompt", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/text2audio/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Text2AudioPageStatusResponse, + parse_obj_as( + type_=Text2AudioPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3translate_async_form( + self, + *, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + texts: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> TranslationPageStatusResponse: + """ + Parameters + ---------- + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + texts : typing.Optional[typing.List[str]] + + selected_model : typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TranslationPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3translate_async_form() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/translate/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TranslationPageStatusResponse, + parse_obj_as( + type_=TranslationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def post_v3video_bots_async_form( + self, + *, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + input_audio: typing.Optional[str] = None, + input_images: typing.Optional[typing.List[str]] = None, + input_documents: typing.Optional[typing.List[str]] = None, + doc_extract_url: typing.Optional[str] = None, + messages: typing.Optional[typing.List[ConversationEntry]] = None, + bot_script: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] = None, + document_model: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + keyword_instructions: typing.Optional[str] = None, + documents: typing.Optional[typing.List[str]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + citation_style: typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] = None, + use_url_shortener: typing.Optional[bool] = None, + asr_model: typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] = None, + asr_language: typing.Optional[str] = None, + translation_model: typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] = None, + user_language: typing.Optional[str] = None, + input_glossary_document: typing.Optional[str] = None, + output_glossary_document: typing.Optional[str] = None, + lipsync_model: typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] = None, + tools: typing.Optional[typing.List[LlmTools]] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] = None, + tts_provider: typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] = None, + input_face: typing.Optional[str] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[SadTalkerSettings] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> VideoBotsPageStatusResponse: + """ + Parameters + ---------- + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + input_audio : typing.Optional[str] + + input_images : typing.Optional[typing.List[str]] + + input_documents : typing.Optional[typing.List[str]] + + doc_extract_url : typing.Optional[str] + Select a workflow to extract text from documents and images. + + messages : typing.Optional[typing.List[ConversationEntry]] + + bot_script : typing.Optional[str] + + selected_model : typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] + + document_model : typing.Optional[str] + When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + keyword_instructions : typing.Optional[str] + + documents : typing.Optional[typing.List[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + citation_style : typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] + + use_url_shortener : typing.Optional[bool] + + asr_model : typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] + Choose a model to transcribe incoming audio messages to text. + + asr_language : typing.Optional[str] + Choose a language to transcribe incoming audio messages to text. + + translation_model : typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] + + user_language : typing.Optional[str] + Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + + input_glossary_document : typing.Optional[str] + + Translation Glossary for User Langauge -> LLM Language (English) + + + output_glossary_document : typing.Optional[str] + + Translation Glossary for LLM Language (English) -> User Langauge + + + lipsync_model : typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] + + tools : typing.Optional[typing.List[LlmTools]] + Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] + + tts_provider : typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + VideoBotsPageStatusResponse + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3video_bots_async_form() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/video-bots/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "input_audio": input_audio, + "input_images": input_images, + "input_documents": input_documents, + "doc_extract_url": doc_extract_url, + "messages": messages, + "bot_script": bot_script, + "selected_model": selected_model, + "document_model": document_model, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "keyword_instructions": keyword_instructions, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "citation_style": citation_style, + "use_url_shortener": use_url_shortener, + "asr_model": asr_model, + "asr_language": asr_language, + "translation_model": translation_model, + "user_language": user_language, + "input_glossary_document": input_glossary_document, + "output_glossary_document": output_glossary_document, + "lipsync_model": lipsync_model, + "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + VideoBotsPageStatusResponse, + parse_obj_as( + type_=VideoBotsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncGooey: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : typing.Optional[str] + The base url to use for requests from the client. + + environment : GooeyEnvironment + The environment to use for requests from the client. from .environment import GooeyEnvironment + + + + Defaults to GooeyEnvironment.DEFAULT + + + + api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.AsyncClient] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + """ + + def __init__( + self, + *, + base_url: typing.Optional[str] = None, + environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, + api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.AsyncClient] = None, + ): + _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + if api_key is None: + raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") + self._client_wrapper = AsyncClientWrapper( + base_url=_get_base_url(base_url=base_url, environment=environment), + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + if follow_redirects is not None + else httpx.AsyncClient(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) + self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) + self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) + self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) + self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) + self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) + self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) + + async def animate( + self, + *, + animation_prompts: typing.Sequence[AnimationPrompt], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DeforumSdPageOutput]: + """ + Parameters + ---------- + animation_prompts : typing.Sequence[AnimationPrompt] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + max_frames : typing.Optional[int] + + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + + animation_mode : typing.Optional[str] + + zoom : typing.Optional[str] + + translation_x : typing.Optional[str] + + translation_y : typing.Optional[str] + + rotation3d_x : typing.Optional[str] + + rotation3d_y : typing.Optional[str] + + rotation3d_z : typing.Optional[str] + + fps : typing.Optional[int] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[DeforumSdPageOutput] + Successful Response + + Examples + -------- + import asyncio + + from gooey import AnimationPrompt, AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.animate( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/DeforumSD/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + DeforumSdPageStatusResponse, + parse_obj_as( + type_=DeforumSdPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def qr_code( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + qr_code_data: typing.Optional[str] = OMIT, + qr_code_input_image: typing.Optional[str] = OMIT, + qr_code_vcard: typing.Optional[Vcard] = OMIT, + qr_code_file: typing.Optional[str] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + image_prompt: typing.Optional[str] = OMIT, + image_prompt_controlnet_models: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] + ] = OMIT, + image_prompt_strength: typing.Optional[float] = OMIT, + image_prompt_scale: typing.Optional[float] = OMIT, + image_prompt_pos_x: typing.Optional[float] = OMIT, + image_prompt_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[ + typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] + ] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, + seed: typing.Optional[int] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[QrCodeGeneratorPageOutput]: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + qr_code_data : typing.Optional[str] + + qr_code_input_image : typing.Optional[str] + + qr_code_vcard : typing.Optional[Vcard] + + qr_code_file : typing.Optional[str] + + use_url_shortener : typing.Optional[bool] + + negative_prompt : typing.Optional[str] + + image_prompt : typing.Optional[str] + + image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] + + image_prompt_strength : typing.Optional[float] + + image_prompt_scale : typing.Optional[float] + + image_prompt_pos_x : typing.Optional[float] + + image_prompt_pos_y : typing.Optional[float] + + selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] + + selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[QrCodeGeneratorPageOutput] + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.qr_code( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/art-qr-code/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "qr_code_data": qr_code_data, + "qr_code_input_image": qr_code_input_image, + "qr_code_vcard": qr_code_vcard, + "qr_code_file": qr_code_file, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + QrCodeGeneratorPageStatusResponse, + parse_obj_as( + type_=QrCodeGeneratorPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def seo_people_also_ask( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[RelatedQnAPageOutput]: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[RelatedQnAPageOutput] + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/related-qna-maker/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + RelatedQnAPageStatusResponse, + parse_obj_as( + type_=RelatedQnAPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def seo_content( + self, + *, + search_query: str, + keywords: str, + title: str, + company_url: str, + example_id: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[SeoSummaryPageOutput]: + """ + Parameters + ---------- + search_query : str + + keywords : str + + title : str + + company_url : str + + example_id : typing.Optional[str] + + task_instructions : typing.Optional[str] + + enable_html : typing.Optional[bool] + + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[SeoSummaryPageOutput] + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/SEOSummary/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + SeoSummaryPageStatusResponse, + parse_obj_as( + type_=SeoSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3seo_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def web_search_llm( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[GoogleGptPageOutput]: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + search_query : str - Returns - ------- - typing.Any - Successful Response + site_filter : str - Examples - -------- - from gooey import Gooey + example_id : typing.Optional[str] - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3seo_summary_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] - def post_v3smart_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[GoogleGptPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3smart_gpt_async() + + + async def main() -> None: + await client.web_search_llm( + search_query="search_query", + site_filter="site_filter", + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/google-gpt/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + GoogleGptPageStatusResponse, + parse_obj_as( + type_=GoogleGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3social_lookup_email_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + async def personalize_email( + self, + *, + email_address: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[SocialLookupEmailPageOutput]: """ Parameters ---------- + email_address : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[SocialLookupEmailPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3social_lookup_email_async() + + + async def main() -> None: + await client.personalize_email( + email_address="email_address", + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/SocialLookupEmail/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "input_prompt": input_prompt, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + SocialLookupEmailPageStatusResponse, + parse_obj_as( + type_=SocialLookupEmailPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3text_to_speech_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def bulk_run( + self, + *, + documents: typing.Sequence[str], + run_urls: typing.Sequence[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[BulkRunnerPageOutput]: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + documents : typing.Sequence[str] - Returns - ------- - typing.Any - Successful Response + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. - Examples - -------- - from gooey import Gooey - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3text_to_speech_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + run_urls : typing.Sequence[str] + + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + + + input_columns : typing.Dict[str, str] + + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + + + output_columns : typing.Dict[str, str] + + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_urls : typing.Optional[typing.Sequence[str]] + + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + + + settings : typing.Optional[RunSettings] - def post_v3art_qr_code_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[BulkRunnerPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3art_qr_code_async() + + + async def main() -> None: + await client.bulk_run( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/bulk-runner/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + BulkRunnerPageStatusResponse, + parse_obj_as( + type_=BulkRunnerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def synthesize_data( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + sheet_url: typing.Optional[str] = OMIT, + selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DocExtractPageOutput]: """ Parameters ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + sheet_url : typing.Optional[str] + + selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + task_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocExtractPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[DocExtractPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3asr_async() + + + async def main() -> None: + await client.synthesize_data( + documents=["documents"], + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/asr/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/doc-extract/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "sheet_url": sheet_url, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "glossary_document": glossary_document, + "task_instructions": task_instructions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + DocExtractPageStatusResponse, + parse_obj_as( + type_=DocExtractPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3bulk_eval_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def llm( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[CompareLlmPageOutput]: """ Parameters ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_prompt : typing.Optional[str] + + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[CompareLlmPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3bulk_eval_async() + + + async def main() -> None: + await client.llm() + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/CompareLLM/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "selected_models": selected_models, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + CompareLlmPageStatusResponse, + parse_obj_as( + type_=CompareLlmPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3bulk_runner_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def rag( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DocSearchPageOutput]: """ Parameters ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] + + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[DocSearchPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3bulk_runner_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3compare_ai_upscalers_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - Returns - ------- - typing.Any - Successful Response + async def main() -> None: + await client.rag( + search_query="search_query", + ) - Examples - -------- - from gooey import Gooey - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3compare_ai_upscalers_async() + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/doc-search/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + DocSearchPageStatusResponse, + parse_obj_as( + type_=DocSearchPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3doc_extract_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def doc_summary( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + merge_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, + selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[DocSummaryPageOutput]: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + documents : typing.Sequence[str] - Returns - ------- - typing.Any - Successful Response + example_id : typing.Optional[str] - Examples - -------- - from gooey import Gooey + functions : typing.Optional[typing.Sequence[RecipeFunction]] - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3doc_extract_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-extract/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] + + selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] - def post_v3doc_search_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[DocSummaryPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3doc_search_async() + + + async def main() -> None: + await client.doc_summary( + documents=["documents"], + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-search/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/doc-summary/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + DocSummaryPageStatusResponse, + parse_obj_as( + type_=DocSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3doc_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def lipsync_tts( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[LipsyncTtsPageOutput]: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + text_prompt : str - Returns - ------- - typing.Any - Successful Response + example_id : typing.Optional[str] - Examples - -------- - from gooey import Gooey + functions : typing.Optional[typing.Sequence[RecipeFunction]] - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3doc_summary_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] + + uberduck_voice_name : typing.Optional[str] + + uberduck_speaking_rate : typing.Optional[float] + + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] - def post_v3embeddings_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[LipsyncTtsPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3embeddings_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3functions_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - Returns - ------- - typing.Any - Successful Response + async def main() -> None: + await client.lipsync_tts( + text_prompt="text_prompt", + ) - Examples - -------- - from gooey import Gooey - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3functions_async() + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/functions/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/LipsyncTTS/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + LipsyncTtsPageStatusResponse, + parse_obj_as( + type_=LipsyncTtsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3google_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def text_to_speech( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[TextToSpeechPageOutput]: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + text_prompt : str - Returns - ------- - typing.Any - Successful Response + example_id : typing.Optional[str] - Examples - -------- - from gooey import Gooey + functions : typing.Optional[typing.Sequence[RecipeFunction]] - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3google_gpt_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/google-gpt/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - def post_v3related_qna_maker_doc_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - Returns - ------- - typing.Any - Successful Response + uberduck_voice_name : typing.Optional[str] - Examples - -------- - from gooey import Gooey + uberduck_speaking_rate : typing.Optional[float] - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.post_v3related_qna_maker_doc_async() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + + settings : typing.Optional[RunSettings] - def post_v3related_qna_maker_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[TextToSpeechPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3related_qna_maker_async() + + + async def main() -> None: + await client.text_to_speech( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/TextToSpeech/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + TextToSpeechPageStatusResponse, + parse_obj_as( + type_=TextToSpeechPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3text2audio_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def speech_recognition( + self, + *, + documents: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, + language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, + output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, + google_translate_target: typing.Optional[str] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[AsrPageOutput]: """ Parameters ---------- + documents : typing.Sequence[str] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[AsrPageRequestSelectedModel] + + language : typing.Optional[str] + + translation_model : typing.Optional[AsrPageRequestTranslationModel] + + output_format : typing.Optional[AsrPageRequestOutputFormat] + + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[AsrPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3text2audio_async() + + + async def main() -> None: + await client.speech_recognition( + documents=["documents"], + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/asr/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "selected_model": selected_model, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + AsrPageStatusResponse, + parse_obj_as( + type_=AsrPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3translate_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def text_to_music( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + duration_sec: typing.Optional[float] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[Text2AudioPageOutput]: """ Parameters ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + negative_prompt : typing.Optional[str] + + duration_sec : typing.Optional[float] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[Text2AudioPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3translate_async() + + + async def main() -> None: + await client.text_to_music( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/translate/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/text2audio/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + Text2AudioPageStatusResponse, + parse_obj_as( + type_=Text2AudioPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def post_v3video_bots_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def translate( + self, + *, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + texts: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, + translation_source: typing.Optional[str] = OMIT, + translation_target: typing.Optional[str] = OMIT, + glossary_document: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[TranslationPageOutput]: """ Parameters ---------- + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + texts : typing.Optional[typing.Sequence[str]] + + selected_model : typing.Optional[TranslationPageRequestSelectedModel] + + translation_source : typing.Optional[str] + + translation_target : typing.Optional[str] + + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + typing.Optional[TranslationPageOutput] Successful Response Examples -------- - from gooey import Gooey + import asyncio - client = Gooey( + from gooey import AsyncGooey + + client = AsyncGooey( api_key="YOUR_API_KEY", ) - client.post_v3video_bots_async() + + + async def main() -> None: + await client.translate() + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/async", method="POST", request_options=request_options + _response = await self._client_wrapper.httpx_client.request( + "v3/translate/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + _parsed_response = typing.cast( + TranslationPageStatusResponse, + parse_obj_as( + type_=TranslationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - -class AsyncGooey: - """ - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. - - Parameters - ---------- - base_url : typing.Optional[str] - The base url to use for requests from the client. - - environment : GooeyEnvironment - The environment to use for requests from the client. from .environment import GooeyEnvironment - - - - Defaults to GooeyEnvironment.DEFAULT - - - - api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] - timeout : typing.Optional[float] - The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. - - follow_redirects : typing.Optional[bool] - Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. - - httpx_client : typing.Optional[httpx.AsyncClient] - The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. - - Examples - -------- - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - """ - - def __init__( - self, - *, - base_url: typing.Optional[str] = None, - environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, - api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), - timeout: typing.Optional[float] = None, - follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.AsyncClient] = None - ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None - if api_key is None: - raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") - self._client_wrapper = AsyncClientWrapper( - base_url=_get_base_url(base_url=base_url, environment=environment), - api_key=api_key, - httpx_client=httpx_client - if httpx_client is not None - else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) - if follow_redirects is not None - else httpx.AsyncClient(timeout=_defaulted_timeout), - timeout=_defaulted_timeout, - ) - self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) - self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.ai_animation_generator = AsyncAiAnimationGeneratorClient(client_wrapper=self._client_wrapper) - self.ai_art_qr_code = AsyncAiArtQrCodeClient(client_wrapper=self._client_wrapper) - self.generate_people_also_ask_seo_content = AsyncGeneratePeopleAlsoAskSeoContentClient( - client_wrapper=self._client_wrapper - ) - self.create_a_perfect_seo_optimized_title_paragraph = AsyncCreateAPerfectSeoOptimizedTitleParagraphClient( - client_wrapper=self._client_wrapper - ) - self.web_search_gpt3 = AsyncWebSearchGpt3Client(client_wrapper=self._client_wrapper) - self.profile_lookup_gpt3for_ai_personalized_emails = AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient( - client_wrapper=self._client_wrapper - ) - self.bulk_runner = AsyncBulkRunnerClient(client_wrapper=self._client_wrapper) - self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) - self.synthetic_data_maker_for_videos_pd_fs = AsyncSyntheticDataMakerForVideosPdFsClient( - client_wrapper=self._client_wrapper - ) - self.large_language_models_gpt3 = AsyncLargeLanguageModelsGpt3Client(client_wrapper=self._client_wrapper) - self.search_your_docs_with_gpt = AsyncSearchYourDocsWithGptClient(client_wrapper=self._client_wrapper) - self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) - self.summarize_your_docs_with_gpt = AsyncSummarizeYourDocsWithGptClient(client_wrapper=self._client_wrapper) - self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) - self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) - self.lipsync_video_with_any_text = AsyncLipsyncVideoWithAnyTextClient(client_wrapper=self._client_wrapper) - self.compare_ai_voice_generators = AsyncCompareAiVoiceGeneratorsClient(client_wrapper=self._client_wrapper) - self.speech_recognition_translation = AsyncSpeechRecognitionTranslationClient( - client_wrapper=self._client_wrapper - ) - self.text_guided_audio_generator = AsyncTextGuidedAudioGeneratorClient(client_wrapper=self._client_wrapper) - self.compare_ai_translations = AsyncCompareAiTranslationsClient(client_wrapper=self._client_wrapper) - self.edit_an_image_with_ai_prompt = AsyncEditAnImageWithAiPromptClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_generators = AsyncCompareAiImageGeneratorsClient(client_wrapper=self._client_wrapper) - self.generate_product_photo_backgrounds = AsyncGenerateProductPhotoBackgroundsClient( - client_wrapper=self._client_wrapper - ) - self.ai_image_with_a_face = AsyncAiImageWithAFaceClient(client_wrapper=self._client_wrapper) - self.ai_generated_photo_from_email_profile_lookup = AsyncAiGeneratedPhotoFromEmailProfileLookupClient( - client_wrapper=self._client_wrapper - ) - self.render_image_search_results_with_ai = AsyncRenderImageSearchResultsWithAiClient( - client_wrapper=self._client_wrapper - ) - self.ai_background_changer = AsyncAiBackgroundChangerClient(client_wrapper=self._client_wrapper) - self.compare_ai_image_upscalers = AsyncCompareAiImageUpscalersClient(client_wrapper=self._client_wrapper) - self.chyron_plant_bot = AsyncChyronPlantBotClient(client_wrapper=self._client_wrapper) - self.letter_writer = AsyncLetterWriterClient(client_wrapper=self._client_wrapper) - self.embeddings = AsyncEmbeddingsClient(client_wrapper=self._client_wrapper) - self.people_also_ask_answers_from_a_doc = AsyncPeopleAlsoAskAnswersFromADocClient( - client_wrapper=self._client_wrapper - ) - self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) - - async def animate( + async def remix_image( self, *, - animation_prompts: typing.List[AnimationPrompt], + input_image: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - max_frames: typing.Optional[int] = None, - selected_model: typing.Optional[AnimateRequestSelectedModel] = None, - animation_mode: typing.Optional[str] = None, - zoom: typing.Optional[str] = None, - translation_x: typing.Optional[str] = None, - translation_y: typing.Optional[str] = None, - rotation3d_x: typing.Optional[str] = None, - rotation3d_y: typing.Optional[str] = None, - rotation3d_z: typing.Optional[str] = None, - fps: typing.Optional[int] = None, - seed: typing.Optional[int] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DeforumSdPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + text_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, + selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[Img2ImgPageOutput]: """ Parameters ---------- - animation_prompts : typing.List[AnimationPrompt] + input_image : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - max_frames : typing.Optional[int] + text_prompt : typing.Optional[str] - selected_model : typing.Optional[AnimateRequestSelectedModel] + selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - animation_mode : typing.Optional[str] + selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - zoom : typing.Optional[str] + negative_prompt : typing.Optional[str] - translation_x : typing.Optional[str] + num_outputs : typing.Optional[int] - translation_y : typing.Optional[str] + quality : typing.Optional[int] - rotation3d_x : typing.Optional[str] + output_width : typing.Optional[int] - rotation3d_y : typing.Optional[str] + output_height : typing.Optional[int] - rotation3d_z : typing.Optional[str] + guidance_scale : typing.Optional[float] - fps : typing.Optional[int] + prompt_strength : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] seed : typing.Optional[int] + image_guidance_scale : typing.Optional[float] + settings : typing.Optional[RunSettings] request_options : typing.Optional[RequestOptions] @@ -5191,14 +11410,14 @@ async def animate( Returns ------- - DeforumSdPageStatusResponse + typing.Optional[Img2ImgPageOutput] Successful Response Examples -------- import asyncio - from gooey import AnimationPrompt, AsyncGooey + from gooey import AsyncGooey client = AsyncGooey( api_key="YOUR_API_KEY", @@ -5206,108 +11425,110 @@ async def animate( async def main() -> None: - await client.animate( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], + await client.remix_image( + input_image="input_image", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async/form", + "v3/Img2Img/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, + "input_image": input_image, + "text_prompt": text_prompt, "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, + "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, "seed": seed, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(DeforumSdPageStatusResponse, parse_obj_as(type_=DeforumSdPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + Img2ImgPageStatusResponse, + parse_obj_as( + type_=Img2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def qr_code( + async def text_to_image( self, *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - qr_code_data: typing.Optional[str] = None, - qr_code_input_image: typing.Optional[str] = None, - qr_code_vcard: typing.Optional[Vcard] = None, - qr_code_file: typing.Optional[str] = None, - use_url_shortener: typing.Optional[bool] = None, - negative_prompt: typing.Optional[str] = None, - image_prompt: typing.Optional[str] = None, - image_prompt_controlnet_models: typing.Optional[ - typing.List[QrCodeRequestImagePromptControlnetModelsItem] - ] = None, - image_prompt_strength: typing.Optional[float] = None, - image_prompt_scale: typing.Optional[float] = None, - image_prompt_pos_x: typing.Optional[float] = None, - image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - scheduler: typing.Optional[QrCodeRequestScheduler] = None, - seed: typing.Optional[int] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> QrCodeGeneratorPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[CompareText2ImgPageOutput]: """ Parameters ---------- @@ -5315,54 +11536,179 @@ async def qr_code( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - qr_code_data : typing.Optional[str] + negative_prompt : typing.Optional[str] - qr_code_input_image : typing.Optional[str] + output_width : typing.Optional[int] - qr_code_vcard : typing.Optional[Vcard] + output_height : typing.Optional[int] - qr_code_file : typing.Optional[str] + num_outputs : typing.Optional[int] - use_url_shortener : typing.Optional[bool] + quality : typing.Optional[int] - negative_prompt : typing.Optional[str] + dall_e3quality : typing.Optional[str] - image_prompt : typing.Optional[str] + dall_e3style : typing.Optional[str] - image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] + guidance_scale : typing.Optional[float] - image_prompt_strength : typing.Optional[float] + seed : typing.Optional[int] - image_prompt_scale : typing.Optional[float] + sd2upscaling : typing.Optional[bool] - image_prompt_pos_x : typing.Optional[float] + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - image_prompt_pos_y : typing.Optional[float] + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - selected_model : typing.Optional[QrCodeRequestSelectedModel] + edit_instruction : typing.Optional[str] - selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] + image_guidance_scale : typing.Optional[float] - output_width : typing.Optional[int] + settings : typing.Optional[RunSettings] - output_height : typing.Optional[int] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - guidance_scale : typing.Optional[float] + Returns + ------- + typing.Optional[CompareText2ImgPageOutput] + Successful Response - controlnet_conditioning_scale : typing.Optional[typing.List[float]] + Examples + -------- + import asyncio - num_outputs : typing.Optional[int] + from gooey import AsyncGooey - quality : typing.Optional[int] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - scheduler : typing.Optional[QrCodeRequestScheduler] - seed : typing.Optional[int] + async def main() -> None: + await client.text_to_image( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/CompareText2Img/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, + "num_outputs": num_outputs, + "quality": quality, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + CompareText2ImgPageStatusResponse, + parse_obj_as( + type_=CompareText2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def product_image( + self, + *, + input_image: str, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[ObjectInpaintingPageOutput]: + """ + Parameters + ---------- + input_image : str + + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments obj_scale : typing.Optional[float] @@ -5370,6 +11716,26 @@ async def qr_code( obj_pos_y : typing.Optional[float] + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + settings : typing.Optional[RunSettings] request_options : typing.Optional[RequestOptions] @@ -5377,7 +11743,7 @@ async def qr_code( Returns ------- - QrCodeGeneratorPageStatusResponse + typing.Optional[ObjectInpaintingPageOutput] Successful Response Examples @@ -5392,7 +11758,8 @@ async def qr_code( async def main() -> None: - await client.qr_code( + await client.product_image( + input_image="input_image", text_prompt="text_prompt", ) @@ -5400,159 +11767,139 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async/form", + "v3/ObjectInpainting/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, + "variables": variables, + "input_image": input_image, "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, "output_width": output_width, "output_height": output_height, "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, + "sd_2_upscaling": sd2upscaling, "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(QrCodeGeneratorPageStatusResponse, parse_obj_as(type_=QrCodeGeneratorPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + ObjectInpaintingPageStatusResponse, + parse_obj_as( + type_=ObjectInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_people_also_ask( + async def portrait( self, *, - search_query: str, - site_filter: str, + input_image: str, + text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[SeoPeopleAlsoAskRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[FaceInpaintingPageOutput]: """ Parameters ---------- - search_query : str + input_image : str - site_filter : str + text_prompt : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[SeoPeopleAlsoAskRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[SeoPeopleAlsoAskRequestEmbeddingModel] + face_scale : typing.Optional[float] - dense_weight : typing.Optional[float] + face_pos_x : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + face_pos_y : typing.Optional[float] + selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - avoid_repetition : typing.Optional[bool] + negative_prompt : typing.Optional[str] num_outputs : typing.Optional[int] - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] + quality : typing.Optional[int] - response_format_type : typing.Optional[SeoPeopleAlsoAskRequestResponseFormatType] + upscale_factor : typing.Optional[float] - serp_search_location : typing.Optional[SerpSearchLocation] + output_width : typing.Optional[int] - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + output_height : typing.Optional[int] - serp_search_type : typing.Optional[SerpSearchType] + guidance_scale : typing.Optional[float] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -5561,7 +11908,7 @@ async def seo_people_also_ask( Returns ------- - RelatedQnAPageStatusResponse + typing.Optional[FaceInpaintingPageOutput] Successful Response Examples @@ -5576,149 +11923,174 @@ async def seo_people_also_ask( async def main() -> None: - await client.seo_people_also_ask( - search_query="search_query", - site_filter="site_filter", + await client.portrait( + input_image="input_image", + text_prompt="tony stark from the iron man", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async/form", + "v3/FaceInpainting/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, + "input_image": input_image, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, + "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + FaceInpaintingPageStatusResponse, + parse_obj_as( + type_=FaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_content( + async def image_from_email( self, *, - search_query: str, - keywords: str, - title: str, - company_url: str, + text_prompt: str, example_id: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - enable_html: typing.Optional[bool] = None, - selected_model: typing.Optional[SeoContentRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - enable_crosslinks: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SeoContentRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[EmailFaceInpaintingPageOutput]: """ Parameters ---------- - search_query : str - - keywords : str + text_prompt : str - title : str + example_id : typing.Optional[str] - company_url : str + functions : typing.Optional[typing.Sequence[RecipeFunction]] - example_id : typing.Optional[str] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] + email_address : typing.Optional[str] - enable_html : typing.Optional[bool] + twitter_handle : typing.Optional[str] - selected_model : typing.Optional[SeoContentRequestSelectedModel] + face_scale : typing.Optional[float] - max_search_urls : typing.Optional[int] + face_pos_x : typing.Optional[float] - enable_crosslinks : typing.Optional[bool] + face_pos_y : typing.Optional[float] - seed : typing.Optional[int] + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - avoid_repetition : typing.Optional[bool] + negative_prompt : typing.Optional[str] num_outputs : typing.Optional[int] - quality : typing.Optional[float] + quality : typing.Optional[int] - max_tokens : typing.Optional[int] + upscale_factor : typing.Optional[float] - sampling_temperature : typing.Optional[float] + output_width : typing.Optional[int] - response_format_type : typing.Optional[SeoContentRequestResponseFormatType] + output_height : typing.Optional[int] - serp_search_location : typing.Optional[SerpSearchLocation] + guidance_scale : typing.Optional[float] - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + should_send_email : typing.Optional[bool] - serp_search_type : typing.Optional[SerpSearchType] + email_from : typing.Optional[str] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] + + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -5727,7 +12099,7 @@ async def seo_content( Returns ------- - SeoSummaryPageStatusResponse + typing.Optional[EmailFaceInpaintingPageOutput] Successful Response Examples @@ -5742,162 +12114,154 @@ async def seo_content( async def main() -> None: - await client.seo_content( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", + await client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async/form", + "v3/EmailFaceInpainting/async", method="POST", - params={"example_id": example_id}, - data={ - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, "selected_model": selected_model, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "avoid_repetition": avoid_repetition, + "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + EmailFaceInpaintingPageStatusResponse, + parse_obj_as( + type_=EmailFaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def web_search_llm( + async def image_from_web_search( self, *, search_query: str, - site_filter: str, + text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[WebSearchLlmRequestSelectedModel] = None, - max_search_urls: typing.Optional[int] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[WebSearchLlmRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[WebSearchLlmRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[GoogleImageGenPageOutput]: """ Parameters ---------- search_query : str - site_filter : str + text_prompt : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[WebSearchLlmRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[WebSearchLlmRequestEmbeddingModel] - - dense_weight : typing.Optional[float] + serp_search_location : typing.Optional[SerpSearchLocation] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - avoid_repetition : typing.Optional[bool] + negative_prompt : typing.Optional[str] num_outputs : typing.Optional[int] - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] + quality : typing.Optional[int] - response_format_type : typing.Optional[WebSearchLlmRequestResponseFormatType] + guidance_scale : typing.Optional[float] - serp_search_location : typing.Optional[SerpSearchLocation] + prompt_strength : typing.Optional[float] - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + sd2upscaling : typing.Optional[bool] - serp_search_type : typing.Optional[SerpSearchType] + seed : typing.Optional[int] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -5906,7 +12270,7 @@ async def web_search_llm( Returns ------- - GoogleGptPageStatusResponse + typing.Optional[GoogleImageGenPageOutput] Successful Response Examples @@ -5921,121 +12285,128 @@ async def web_search_llm( async def main() -> None: - await client.web_search_llm( + await client.image_from_web_search( search_query="search_query", - site_filter="site_filter", + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async/form", + "v3/GoogleImageGen/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, + "text_prompt": text_prompt, "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, + "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + GoogleImageGenPageStatusResponse, + parse_obj_as( + type_=GoogleImageGenPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def personalize_email( + async def remove_background( self, *, - email_address: str, + input_image: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[PersonalizeEmailRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[PersonalizeEmailRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, + mask_threshold: typing.Optional[float] = OMIT, + rect_persepective_transform: typing.Optional[bool] = OMIT, + reflection_opacity: typing.Optional[float] = OMIT, + obj_scale: typing.Optional[float] = OMIT, + obj_pos_x: typing.Optional[float] = OMIT, + obj_pos_y: typing.Optional[float] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[ImageSegmentationPageOutput]: """ Parameters ---------- - email_address : str + input_image : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[PersonalizeEmailRequestSelectedModel] + selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - avoid_repetition : typing.Optional[bool] + mask_threshold : typing.Optional[float] - num_outputs : typing.Optional[int] + rect_persepective_transform : typing.Optional[bool] - quality : typing.Optional[float] + reflection_opacity : typing.Optional[float] - max_tokens : typing.Optional[int] + obj_scale : typing.Optional[float] - sampling_temperature : typing.Optional[float] + obj_pos_x : typing.Optional[float] - response_format_type : typing.Optional[PersonalizeEmailRequestResponseFormatType] + obj_pos_y : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -6044,7 +12415,7 @@ async def personalize_email( Returns ------- - SocialLookupEmailPageStatusResponse + typing.Optional[ImageSegmentationPageOutput] Successful Response Examples @@ -6059,114 +12430,234 @@ async def personalize_email( async def main() -> None: - await client.personalize_email( - email_address="email_address", + await client.remove_background( + input_image="input_image", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async/form", + "v3/ImageSegmentation/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, + "input_image": input_image, "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + ImageSegmentationPageStatusResponse, + parse_obj_as( + type_=ImageSegmentationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def bulk_run( + async def upscale( self, *, - documents: typing.List[str], - run_urls: typing.List[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], + scale: int, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - eval_urls: typing.Optional[typing.List[str]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> BulkRunnerPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_image: typing.Optional[str] = OMIT, + input_video: typing.Optional[str] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[CompareUpscalerPageOutput]: """ Parameters ---------- - documents : typing.List[str] + scale : int + The final upsampling scale of the image - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. + example_id : typing.Optional[str] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - run_urls : typing.List[str] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + input_image : typing.Optional[str] + Input Image + input_video : typing.Optional[str] + Input Video - input_columns : typing.Dict[str, str] + selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + settings : typing.Optional[RunSettings] - output_columns : typing.Dict[str, str] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + Returns + ------- + typing.Optional[CompareUpscalerPageOutput] + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.upscale( + scale=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/compare-ai-upscalers/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "input_video": input_video, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + CompareUpscalerPageStatusResponse, + parse_obj_as( + type_=CompareUpscalerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def embed( + self, + *, + texts: typing.Sequence[str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[EmbeddingsPageOutput]: + """ + Parameters + ---------- + texts : typing.Sequence[str] example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - eval_urls : typing.Optional[typing.List[str]] - - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -6175,7 +12666,7 @@ async def bulk_run( Returns ------- - BulkRunnerPageStatusResponse + typing.Optional[EmbeddingsPageOutput] Successful Response Examples @@ -6190,109 +12681,145 @@ async def bulk_run( async def main() -> None: - await client.bulk_run( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, + await client.embed( + texts=["texts"], ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async/form", + "v3/embeddings/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, + "texts": texts, + "selected_model": selected_model, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(BulkRunnerPageStatusResponse, parse_obj_as(type_=BulkRunnerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + EmbeddingsPageStatusResponse, + parse_obj_as( + type_=EmbeddingsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def synthesize_data( + async def seo_people_also_ask_doc( self, *, - documents: typing.List[str], + search_query: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - sheet_url: typing.Optional[str] = None, - selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[RelatedQnADocPageOutput]: """ Parameters ---------- - documents : typing.List[str] + search_query : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - sheet_url : typing.Optional[str] + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] + documents : typing.Optional[typing.Sequence[str]] - google_translate_target : typing.Optional[str] + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). task_instructions : typing.Optional[str] - selected_model : typing.Optional[SynthesizeDataRequestSelectedModel] + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] + + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -6304,7 +12831,17 @@ async def synthesize_data( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -6313,7 +12850,7 @@ async def synthesize_data( Returns ------- - DocExtractPageStatusResponse + typing.Optional[RelatedQnADocPageOutput] Successful Response Examples @@ -6328,109 +12865,169 @@ async def synthesize_data( async def main() -> None: - await client.synthesize_data( - documents=["documents"], + await client.seo_people_also_ask_doc( + search_query="search_query", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async/form", + "v3/related-qna-maker-doc/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, "task_instructions": task_instructions, + "query_instructions": query_instructions, "selected_model": selected_model, + "citation_style": citation_style, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) + _parsed_response = typing.cast( + RelatedQnADocPageStatusResponse, + parse_obj_as( + type_=RelatedQnADocPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + return _parsed_response.output if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def health_status_get( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.health_status_get() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "status", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def llm( + async def post_v3chyron_plant_async_form( self, *, - example_id: typing.Optional[str] = None, + midi_notes: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - selected_models: typing.Optional[typing.List[LlmRequestSelectedModelsItem]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[LlmRequestResponseFormatType] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + midi_notes_prompt: typing.Optional[str] = None, + chyron_prompt: typing.Optional[str] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> ChyronPlantPageStatusResponse: """ Parameters ---------- - example_id : typing.Optional[str] + midi_notes : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.List[LlmRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] + midi_notes_prompt : typing.Optional[str] - response_format_type : typing.Optional[LlmRequestResponseFormatType] + chyron_prompt : typing.Optional[str] settings : typing.Optional[RunSettings] @@ -6439,7 +13036,7 @@ async def llm( Returns ------- - CompareLlmPageStatusResponse + ChyronPlantPageStatusResponse Successful Response Examples @@ -6454,26 +13051,22 @@ async def llm( async def main() -> None: - await client.llm() + await client.post_v3chyron_plant_async_form( + midi_notes="midi_notes", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async/form", + "v3/ChyronPlant/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "midi_notes": midi_notes, + "midi_notes_prompt": midi_notes_prompt, + "chyron_prompt": chyron_prompt, "settings": settings, }, files={}, @@ -6482,99 +13075,45 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + ChyronPlantPageStatusResponse, + parse_obj_as( + type_=ChyronPlantPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def rag( + async def post_v3compare_llm_async_form( self, *, - search_query: str, - example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - keyword_query: typing.Optional[RagRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[RagRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[RagRequestSelectedModel] = None, - citation_style: typing.Optional[RagRequestCitationStyle] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + selected_models: typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[RagRequestResponseFormatType] = None, + response_format_type: typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareLlmPageStatusResponse: """ Parameters ---------- - search_query : str - - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[RagRequestKeywordQuery] - - documents : typing.Optional[typing.List[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RagRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RagRequestSelectedModel] + input_prompt : typing.Optional[str] - citation_style : typing.Optional[RagRequestCitationStyle] + selected_models : typing.Optional[typing.List[PostV3CompareLlmAsyncFormRequestSelectedModelsItem]] avoid_repetition : typing.Optional[bool] @@ -6586,7 +13125,7 @@ async def rag( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[RagRequestResponseFormatType] + response_format_type : typing.Optional[PostV3CompareLlmAsyncFormRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -6595,7 +13134,7 @@ async def rag( Returns ------- - DocSearchPageStatusResponse + CompareLlmPageStatusResponse Successful Response Examples @@ -6609,34 +13148,20 @@ async def rag( ) - async def main() -> None: - await client.rag( - search_query="search_query", - ) + async def main() -> None: + await client.post_v3compare_llm_async_form() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async/form", + "v3/CompareLLM/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, + "input_prompt": input_prompt, + "selected_models": selected_models, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, @@ -6651,89 +13176,78 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + CompareLlmPageStatusResponse, + parse_obj_as( + type_=CompareLlmPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def doc_summary( + async def post_v3compare_text2img_async_form( self, *, - documents: typing.List[str], - example_id: typing.Optional[str] = None, + text_prompt: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - task_instructions: typing.Optional[str] = None, - merge_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, - selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, - google_translate_target: typing.Optional[str] = None, - avoid_repetition: typing.Optional[bool] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + negative_prompt: typing.Optional[str] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, + quality: typing.Optional[int] = None, + dall_e3quality: typing.Optional[str] = None, + dall_e3style: typing.Optional[str] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + sd2upscaling: typing.Optional[bool] = None, + selected_models: typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] = None, + scheduler: typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] = None, + edit_instruction: typing.Optional[str] = None, + image_guidance_scale: typing.Optional[float] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareText2ImgPageStatusResponse: """ Parameters ---------- - documents : typing.List[str] - - example_id : typing.Optional[str] + text_prompt : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] + negative_prompt : typing.Optional[str] - merge_instructions : typing.Optional[str] + output_width : typing.Optional[int] - selected_model : typing.Optional[DocSummaryRequestSelectedModel] + output_height : typing.Optional[int] - chain_type : typing.Optional[typing.Literal["map_reduce"]] + num_outputs : typing.Optional[int] - selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + quality : typing.Optional[int] - google_translate_target : typing.Optional[str] + dall_e3quality : typing.Optional[str] - avoid_repetition : typing.Optional[bool] + dall_e3style : typing.Optional[str] - num_outputs : typing.Optional[int] + guidance_scale : typing.Optional[float] - quality : typing.Optional[float] + seed : typing.Optional[int] - max_tokens : typing.Optional[int] + sd2upscaling : typing.Optional[bool] - sampling_temperature : typing.Optional[float] + selected_models : typing.Optional[typing.List[PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem]] - response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + scheduler : typing.Optional[PostV3CompareText2ImgAsyncFormRequestScheduler] + + edit_instruction : typing.Optional[str] + + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -6742,7 +13256,7 @@ async def doc_summary( Returns ------- - DocSummaryPageStatusResponse + CompareText2ImgPageStatusResponse Successful Response Examples @@ -6757,33 +13271,34 @@ async def doc_summary( async def main() -> None: - await client.doc_summary( - documents=["documents"], + await client.post_v3compare_text2img_async_form( + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async/form", + "v3/CompareText2Img/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "avoid_repetition": avoid_repetition, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "output_width": output_width, + "output_height": output_height, "num_outputs": num_outputs, "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, files={}, @@ -6792,129 +13307,69 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + CompareText2ImgPageStatusResponse, + parse_obj_as( + type_=CompareText2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def lipsync_tts( + async def post_v3deforum_sd_async_form( self, *, - text_prompt: str, - example_id: typing.Optional[str] = None, + animation_prompts: typing.List[AnimationPrompt], functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + max_frames: typing.Optional[int] = None, + selected_model: typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] = None, + animation_mode: typing.Optional[str] = None, + zoom: typing.Optional[str] = None, + translation_x: typing.Optional[str] = None, + translation_y: typing.Optional[str] = None, + rotation3d_x: typing.Optional[str] = None, + rotation3d_y: typing.Optional[str] = None, + rotation3d_z: typing.Optional[str] = None, + fps: typing.Optional[int] = None, + seed: typing.Optional[int] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> DeforumSdPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] + animation_prompts : typing.List[AnimationPrompt] functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] + max_frames : typing.Optional[int] - azure_voice_name : typing.Optional[str] + selected_model : typing.Optional[PostV3DeforumSdAsyncFormRequestSelectedModel] - openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] + animation_mode : typing.Optional[str] - openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] + zoom : typing.Optional[str] - input_face : typing.Optional[str] + translation_x : typing.Optional[str] - face_padding_top : typing.Optional[int] + translation_y : typing.Optional[str] - face_padding_bottom : typing.Optional[int] + rotation3d_x : typing.Optional[str] - face_padding_left : typing.Optional[int] + rotation3d_y : typing.Optional[str] - face_padding_right : typing.Optional[int] + rotation3d_z : typing.Optional[str] - sadtalker_settings : typing.Optional[SadTalkerSettings] + fps : typing.Optional[int] - selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -6923,14 +13378,14 @@ async def lipsync_tts( Returns ------- - LipsyncTtsPageStatusResponse + DeforumSdPageStatusResponse Successful Response Examples -------- import asyncio - from gooey import AsyncGooey + from gooey import AnimationPrompt, AsyncGooey client = AsyncGooey( api_key="YOUR_API_KEY", @@ -6938,46 +13393,36 @@ async def lipsync_tts( async def main() -> None: - await client.lipsync_tts( - text_prompt="text_prompt", + await client.post_v3deforum_sd_async_form( + animation_prompts=[ + AnimationPrompt( + frame="frame", + prompt="prompt", + ) + ], ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async/form", + "v3/DeforumSD/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, + "animation_prompts": animation_prompts, + "max_frames": max_frames, "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, "settings": settings, }, files={}, @@ -6986,108 +13431,102 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + DeforumSdPageStatusResponse, + parse_obj_as( + type_=DeforumSdPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def text_to_speech( + async def post_v3email_face_inpainting_async_form( self, *, text_prompt: str, - example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - tts_provider: typing.Optional[TextToSpeechRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[TextToSpeechRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[TextToSpeechRequestOpenaiTtsModel] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + email_address: typing.Optional[str] = None, + twitter_handle: typing.Optional[str] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + should_send_email: typing.Optional[bool] = None, + email_from: typing.Optional[str] = None, + email_cc: typing.Optional[str] = None, + email_bcc: typing.Optional[str] = None, + email_subject: typing.Optional[str] = None, + email_body: typing.Optional[str] = None, + email_body_enable_html: typing.Optional[bool] = None, + fallback_email_body: typing.Optional[str] = None, + seed: typing.Optional[int] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> EmailFaceInpaintingPageStatusResponse: """ Parameters ---------- text_prompt : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechRequestTtsProvider] + email_address : typing.Optional[str] - uberduck_voice_name : typing.Optional[str] + twitter_handle : typing.Optional[str] - uberduck_speaking_rate : typing.Optional[float] + face_scale : typing.Optional[float] - google_voice_name : typing.Optional[str] + face_pos_x : typing.Optional[float] - google_speaking_rate : typing.Optional[float] + face_pos_y : typing.Optional[float] - google_pitch : typing.Optional[float] + selected_model : typing.Optional[PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel] - bark_history_prompt : typing.Optional[str] + negative_prompt : typing.Optional[str] - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead + num_outputs : typing.Optional[int] - elevenlabs_api_key : typing.Optional[str] + quality : typing.Optional[int] - elevenlabs_voice_id : typing.Optional[str] + upscale_factor : typing.Optional[float] - elevenlabs_model : typing.Optional[str] + output_width : typing.Optional[int] - elevenlabs_stability : typing.Optional[float] + output_height : typing.Optional[int] - elevenlabs_similarity_boost : typing.Optional[float] + guidance_scale : typing.Optional[float] - elevenlabs_style : typing.Optional[float] + should_send_email : typing.Optional[bool] - elevenlabs_speaker_boost : typing.Optional[bool] + email_from : typing.Optional[str] - azure_voice_name : typing.Optional[str] + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] - openai_voice_name : typing.Optional[TextToSpeechRequestOpenaiVoiceName] + fallback_email_body : typing.Optional[str] - openai_tts_model : typing.Optional[TextToSpeechRequestOpenaiTtsModel] + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -7096,7 +13535,7 @@ async def text_to_speech( Returns ------- - TextToSpeechPageStatusResponse + EmailFaceInpaintingPageStatusResponse Successful Response Examples @@ -7111,7 +13550,7 @@ async def text_to_speech( async def main() -> None: - await client.text_to_speech( + await client.post_v3email_face_inpainting_async_form( text_prompt="text_prompt", ) @@ -7119,31 +13558,34 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async/form", + "v3/EmailFaceInpainting/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, + "email_address": email_address, + "twitter_handle": twitter_handle, "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, "settings": settings, }, files={}, @@ -7152,80 +13594,75 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + EmailFaceInpaintingPageStatusResponse, + parse_obj_as( + type_=EmailFaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def speech_recognition( + async def post_v3face_inpainting_async_form( self, *, - documents: typing.List[str], - example_id: typing.Optional[str] = None, + input_image: str, + text_prompt: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, - language: typing.Optional[str] = None, - translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, - output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, - google_translate_target: typing.Optional[str] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> FaceInpaintingPageStatusResponse: """ Parameters ---------- - documents : typing.List[str] + input_image : str - example_id : typing.Optional[str] + text_prompt : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] + face_scale : typing.Optional[float] - language : typing.Optional[str] + face_pos_x : typing.Optional[float] - translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] + face_pos_y : typing.Optional[float] - output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] + selected_model : typing.Optional[PostV3FaceInpaintingAsyncFormRequestSelectedModel] - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. + negative_prompt : typing.Optional[str] - translation_source : typing.Optional[str] + num_outputs : typing.Optional[int] - translation_target : typing.Optional[str] + quality : typing.Optional[int] - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -7234,7 +13671,7 @@ async def speech_recognition( Returns ------- - AsrPageStatusResponse + FaceInpaintingPageStatusResponse Successful Response Examples @@ -7249,29 +13686,34 @@ async def speech_recognition( async def main() -> None: - await client.speech_recognition( - documents=["documents"], + await client.post_v3face_inpainting_async_form( + input_image="input_image", + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/asr/async/form", + "v3/FaceInpainting/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "documents": documents, + "input_image": input_image, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, "settings": settings, }, files={}, @@ -7280,65 +13722,59 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + FaceInpaintingPageStatusResponse, + parse_obj_as( + type_=FaceInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def text_to_music( + async def post_v3google_image_gen_async_form( self, *, + search_query: str, text_prompt: str, - example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] = None, negative_prompt: typing.Optional[str] = None, - duration_sec: typing.Optional[float] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, + prompt_strength: typing.Optional[float] = None, sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None, + seed: typing.Optional[int] = None, + image_guidance_scale: typing.Optional[float] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleImageGenPageStatusResponse: """ Parameters ---------- - text_prompt : str + search_query : str - example_id : typing.Optional[str] + text_prompt : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - negative_prompt : typing.Optional[str] + serp_search_location : typing.Optional[SerpSearchLocation] - duration_sec : typing.Optional[float] + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + selected_model : typing.Optional[PostV3GoogleImageGenAsyncFormRequestSelectedModel] + + negative_prompt : typing.Optional[str] num_outputs : typing.Optional[int] @@ -7346,11 +13782,13 @@ async def text_to_music( guidance_scale : typing.Optional[float] - seed : typing.Optional[int] + prompt_strength : typing.Optional[float] sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]] + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -7359,7 +13797,7 @@ async def text_to_music( Returns ------- - Text2AudioPageStatusResponse + GoogleImageGenPageStatusResponse Successful Response Examples @@ -7374,7 +13812,8 @@ async def text_to_music( async def main() -> None: - await client.text_to_music( + await client.post_v3google_image_gen_async_form( + search_query="search_query", text_prompt="text_prompt", ) @@ -7382,21 +13821,24 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/async/form", + "v3/GoogleImageGen/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "search_query": search_query, "text_prompt": text_prompt, + "selected_model": selected_model, "negative_prompt": negative_prompt, - "duration_sec": duration_sec, "num_outputs": num_outputs, "quality": quality, "guidance_scale": guidance_scale, - "seed": seed, + "prompt_strength": prompt_strength, "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, + "seed": seed, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, files={}, @@ -7405,67 +13847,57 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + GoogleImageGenPageStatusResponse, + parse_obj_as( + type_=GoogleImageGenPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def translate( + async def post_v3image_segmentation_async_form( self, *, - example_id: typing.Optional[str] = None, + input_image: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[TranslateRequestSelectedModel] = None, - translation_source: typing.Optional[str] = None, - translation_target: typing.Optional[str] = None, - glossary_document: typing.Optional[str] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] = None, + mask_threshold: typing.Optional[float] = None, + rect_persepective_transform: typing.Optional[bool] = None, + reflection_opacity: typing.Optional[float] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> ImageSegmentationPageStatusResponse: """ Parameters ---------- - example_id : typing.Optional[str] + input_image : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - texts : typing.Optional[typing.List[str]] + selected_model : typing.Optional[PostV3ImageSegmentationAsyncFormRequestSelectedModel] - selected_model : typing.Optional[TranslateRequestSelectedModel] + mask_threshold : typing.Optional[float] - translation_source : typing.Optional[str] + rect_persepective_transform : typing.Optional[bool] - translation_target : typing.Optional[str] + reflection_opacity : typing.Optional[float] - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -7474,7 +13906,7 @@ async def translate( Returns ------- - TranslationPageStatusResponse + ImageSegmentationPageStatusResponse Successful Response Examples @@ -7489,23 +13921,27 @@ async def translate( async def main() -> None: - await client.translate() + await client.post_v3image_segmentation_async_form( + input_image="input_image", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/translate/async/form", + "v3/ImageSegmentation/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "texts": texts, + "input_image": input_image, "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, "settings": settings, }, files={}, @@ -7514,42 +13950,27 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + ImageSegmentationPageStatusResponse, + parse_obj_as( + type_=ImageSegmentationPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def remix_image( + async def post_v3img2img_async_form( self, *, input_image: str, - example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, + selected_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -7561,25 +13982,23 @@ async def remix_image( seed: typing.Optional[int] = None, image_guidance_scale: typing.Optional[float] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + request_options: typing.Optional[RequestOptions] = None, ) -> Img2ImgPageStatusResponse: """ Parameters ---------- input_image : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments text_prompt : typing.Optional[str] - selected_model : typing.Optional[RemixImageRequestSelectedModel] + selected_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedModel] - selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] + selected_controlnet_model : typing.Optional[PostV3Img2ImgAsyncFormRequestSelectedControlnetModel] negative_prompt : typing.Optional[str] @@ -7623,7 +14042,7 @@ async def remix_image( async def main() -> None: - await client.remix_image( + await client.post_v3img2img_async_form( input_image="input_image", ) @@ -7633,7 +14052,6 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/Img2Img/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, @@ -7659,95 +14077,75 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + Img2ImgPageStatusResponse, + parse_obj_as( + type_=Img2ImgPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def text_to_image( + async def post_v3letter_writer_async_form( self, *, - text_prompt: str, - example_id: typing.Optional[str] = None, + action_id: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - negative_prompt: typing.Optional[str] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + prompt_header: typing.Optional[str] = None, + example_letters: typing.Optional[typing.List[TrainingDataModel]] = None, + lm_selected_api: typing.Optional[str] = None, + lm_selected_engine: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - dall_e3quality: typing.Optional[str] = None, - dall_e3style: typing.Optional[str] = None, - guidance_scale: typing.Optional[float] = None, - seed: typing.Optional[int] = None, - sd2upscaling: typing.Optional[bool] = None, - selected_models: typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]] = None, - scheduler: typing.Optional[TextToImageRequestScheduler] = None, - edit_instruction: typing.Optional[str] = None, - image_guidance_scale: typing.Optional[float] = None, + quality: typing.Optional[float] = None, + lm_sampling_temperature: typing.Optional[float] = None, + api_http_method: typing.Optional[str] = None, + api_url: typing.Optional[str] = None, + api_headers: typing.Optional[str] = None, + api_json_body: typing.Optional[str] = None, + input_prompt: typing.Optional[str] = None, + strip_html2text: typing.Optional[bool] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> LetterWriterPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] + action_id : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] + prompt_header : typing.Optional[str] - output_height : typing.Optional[int] + example_letters : typing.Optional[typing.List[TrainingDataModel]] - num_outputs : typing.Optional[int] + lm_selected_api : typing.Optional[str] - quality : typing.Optional[int] + lm_selected_engine : typing.Optional[str] - dall_e3quality : typing.Optional[str] + num_outputs : typing.Optional[int] - dall_e3style : typing.Optional[str] + quality : typing.Optional[float] - guidance_scale : typing.Optional[float] + lm_sampling_temperature : typing.Optional[float] - seed : typing.Optional[int] + api_http_method : typing.Optional[str] - sd2upscaling : typing.Optional[bool] + api_url : typing.Optional[str] - selected_models : typing.Optional[typing.List[TextToImageRequestSelectedModelsItem]] + api_headers : typing.Optional[str] - scheduler : typing.Optional[TextToImageRequestScheduler] + api_json_body : typing.Optional[str] - edit_instruction : typing.Optional[str] + input_prompt : typing.Optional[str] - image_guidance_scale : typing.Optional[float] + strip_html2text : typing.Optional[bool] settings : typing.Optional[RunSettings] @@ -7756,7 +14154,7 @@ async def text_to_image( Returns ------- - CompareText2ImgPageStatusResponse + LetterWriterPageStatusResponse Successful Response Examples @@ -7771,35 +14169,33 @@ async def text_to_image( async def main() -> None: - await client.text_to_image( - text_prompt="text_prompt", + await client.post_v3letter_writer_async_form( + action_id="action_id", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async/form", + "v3/LetterWriter/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, + "action_id": action_id, + "prompt_header": prompt_header, + "example_letters": example_letters, + "lm_selected_api": lm_selected_api, + "lm_selected_engine": lm_selected_engine, "num_outputs": num_outputs, "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, + "lm_sampling_temperature": lm_sampling_temperature, + "api_http_method": api_http_method, + "api_url": api_url, + "api_headers": api_headers, + "api_json_body": api_json_body, + "input_prompt": input_prompt, + "strip_html_2_text": strip_html2text, "settings": settings, }, files={}, @@ -7808,95 +14204,213 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + LetterWriterPageStatusResponse, + parse_obj_as( + type_=LetterWriterPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def post_v3lipsync_async_form( + self, + *, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_face: typing.Optional[str] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[SadTalkerSettings] = None, + selected_model: typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] = None, + input_audio: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncPageStatusResponse: + """ + Parameters + ---------- + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + selected_model : typing.Optional[PostV3LipsyncAsyncFormRequestSelectedModel] + + input_audio : typing.Optional[str] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LipsyncPageStatusResponse + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.post_v3lipsync_async_form() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/Lipsync/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, + "input_audio": input_audio, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LipsyncPageStatusResponse, + parse_obj_as( + type_=LipsyncPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def product_image( + async def post_v3lipsync_tts_async_form( self, *, - input_image: str, text_prompt: str, - example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, - mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] = None, + input_face: typing.Optional[str] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[SadTalkerSettings] = None, + selected_model: typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> LipsyncTtsPageStatusResponse: """ Parameters ---------- - input_image : str - text_prompt : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - obj_scale : typing.Optional[float] + tts_provider : typing.Optional[PostV3LipsyncTtsAsyncFormRequestTtsProvider] - obj_pos_x : typing.Optional[float] + uberduck_voice_name : typing.Optional[str] - obj_pos_y : typing.Optional[float] + uberduck_speaking_rate : typing.Optional[float] - mask_threshold : typing.Optional[float] + google_voice_name : typing.Optional[str] - selected_model : typing.Optional[ProductImageRequestSelectedModel] + google_speaking_rate : typing.Optional[float] - negative_prompt : typing.Optional[str] + google_pitch : typing.Optional[float] - num_outputs : typing.Optional[int] + bark_history_prompt : typing.Optional[str] - quality : typing.Optional[int] + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead - output_width : typing.Optional[int] + elevenlabs_api_key : typing.Optional[str] - output_height : typing.Optional[int] + elevenlabs_voice_id : typing.Optional[str] - guidance_scale : typing.Optional[float] + elevenlabs_model : typing.Optional[str] - sd2upscaling : typing.Optional[bool] + elevenlabs_stability : typing.Optional[float] - seed : typing.Optional[int] + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel] + + input_face : typing.Optional[str] + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + selected_model : typing.Optional[PostV3LipsyncTtsAsyncFormRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -7905,7 +14419,7 @@ async def product_image( Returns ------- - ObjectInpaintingPageStatusResponse + LipsyncTtsPageStatusResponse Successful Response Examples @@ -7920,8 +14434,7 @@ async def product_image( async def main() -> None: - await client.product_image( - input_image="input_image", + await client.post_v3lipsync_tts_async_form( text_prompt="text_prompt", ) @@ -7929,27 +14442,37 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async/form", + "v3/LipsyncTTS/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "input_image": input_image, "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, "settings": settings, }, files={}, @@ -7958,55 +14481,41 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + LipsyncTtsPageStatusResponse, + parse_obj_as( + type_=LipsyncTtsPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def portrait( + async def post_v3object_inpainting_async_form( self, *, input_image: str, text_prompt: str, - example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PortraitRequestSelectedModel] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + mask_threshold: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, output_width: typing.Optional[int] = None, output_height: typing.Optional[int] = None, guidance_scale: typing.Optional[float] = None, + sd2upscaling: typing.Optional[bool] = None, seed: typing.Optional[int] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> FaceInpaintingPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> ObjectInpaintingPageStatusResponse: """ Parameters ---------- @@ -8014,20 +14523,20 @@ async def portrait( text_prompt : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - face_scale : typing.Optional[float] + obj_scale : typing.Optional[float] - face_pos_x : typing.Optional[float] + obj_pos_x : typing.Optional[float] - face_pos_y : typing.Optional[float] + obj_pos_y : typing.Optional[float] - selected_model : typing.Optional[PortraitRequestSelectedModel] + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[PostV3ObjectInpaintingAsyncFormRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -8035,14 +14544,14 @@ async def portrait( quality : typing.Optional[int] - upscale_factor : typing.Optional[float] - output_width : typing.Optional[int] output_height : typing.Optional[int] guidance_scale : typing.Optional[float] + sd2upscaling : typing.Optional[bool] + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -8052,7 +14561,7 @@ async def portrait( Returns ------- - FaceInpaintingPageStatusResponse + ObjectInpaintingPageStatusResponse Successful Response Examples @@ -8067,7 +14576,7 @@ async def portrait( async def main() -> None: - await client.portrait( + await client.post_v3object_inpainting_async_form( input_image="input_image", text_prompt="text_prompt", ) @@ -8076,25 +14585,25 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async/form", + "v3/ObjectInpainting/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, "input_image": input_image, "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, "selected_model": selected_model, "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "upscale_factor": upscale_factor, "output_width": output_width, "output_height": output_height, "guidance_scale": guidance_scale, + "sd_2_upscaling": sd2upscaling, "seed": seed, "settings": settings, }, @@ -8104,119 +14613,88 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(FaceInpaintingPageStatusResponse, parse_obj_as(type_=FaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + ObjectInpaintingPageStatusResponse, + parse_obj_as( + type_=ObjectInpaintingPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def image_from_email( + async def post_v3seo_summary_async_form( self, *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - email_address: typing.Optional[str] = None, - twitter_handle: typing.Optional[str] = None, - face_scale: typing.Optional[float] = None, - face_pos_x: typing.Optional[float] = None, - face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[ImageFromEmailRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - upscale_factor: typing.Optional[float] = None, - output_width: typing.Optional[int] = None, - output_height: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - should_send_email: typing.Optional[bool] = None, - email_from: typing.Optional[str] = None, - email_cc: typing.Optional[str] = None, - email_bcc: typing.Optional[str] = None, - email_subject: typing.Optional[str] = None, - email_body: typing.Optional[str] = None, - email_body_enable_html: typing.Optional[bool] = None, - fallback_email_body: typing.Optional[str] = None, + search_query: str, + keywords: str, + title: str, + company_url: str, + task_instructions: typing.Optional[str] = None, + enable_html: typing.Optional[bool] = None, + selected_model: typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] = None, + max_search_urls: typing.Optional[int] = None, + enable_crosslinks: typing.Optional[bool] = None, seed: typing.Optional[int] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> EmailFaceInpaintingPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> SeoSummaryPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] + search_query : str - face_pos_x : typing.Optional[float] + keywords : str - face_pos_y : typing.Optional[float] + title : str - selected_model : typing.Optional[ImageFromEmailRequestSelectedModel] + company_url : str - negative_prompt : typing.Optional[str] + task_instructions : typing.Optional[str] - num_outputs : typing.Optional[int] + enable_html : typing.Optional[bool] - quality : typing.Optional[int] + selected_model : typing.Optional[PostV3SeoSummaryAsyncFormRequestSelectedModel] - upscale_factor : typing.Optional[float] + max_search_urls : typing.Optional[int] - output_width : typing.Optional[int] + enable_crosslinks : typing.Optional[bool] - output_height : typing.Optional[int] + seed : typing.Optional[int] - guidance_scale : typing.Optional[float] + avoid_repetition : typing.Optional[bool] - should_send_email : typing.Optional[bool] + num_outputs : typing.Optional[int] - email_from : typing.Optional[str] + quality : typing.Optional[float] - email_cc : typing.Optional[str] + max_tokens : typing.Optional[int] - email_bcc : typing.Optional[str] + sampling_temperature : typing.Optional[float] - email_subject : typing.Optional[str] + response_format_type : typing.Optional[PostV3SeoSummaryAsyncFormRequestResponseFormatType] - email_body : typing.Optional[str] + serp_search_location : typing.Optional[SerpSearchLocation] - email_body_enable_html : typing.Optional[bool] + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead - fallback_email_body : typing.Optional[str] + serp_search_type : typing.Optional[SerpSearchType] - seed : typing.Optional[int] + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -8225,7 +14703,7 @@ async def image_from_email( Returns ------- - EmailFaceInpaintingPageStatusResponse + SeoSummaryPageStatusResponse Successful Response Examples @@ -8240,43 +14718,40 @@ async def image_from_email( async def main() -> None: - await client.image_from_email( - text_prompt="text_prompt", + await client.post_v3seo_summary_async_form( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async/form", + "v3/SEOSummary/async/form", method="POST", - params={"example_id": example_id}, data={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, "selected_model": selected_model, - "negative_prompt": negative_prompt, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, files={}, @@ -8285,90 +14760,66 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(EmailFaceInpaintingPageStatusResponse, parse_obj_as(type_=EmailFaceInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + SeoSummaryPageStatusResponse, + parse_obj_as( + type_=SeoSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def image_from_web_search( + async def post_v3smart_gpt_async_form( self, *, - search_query: str, - text_prompt: str, - example_id: typing.Optional[str] = None, + input_prompt: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[ImageFromWebSearchRequestSelectedModel] = None, - negative_prompt: typing.Optional[str] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + cot_prompt: typing.Optional[str] = None, + reflexion_prompt: typing.Optional[str] = None, + dera_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, - quality: typing.Optional[int] = None, - guidance_scale: typing.Optional[float] = None, - prompt_strength: typing.Optional[float] = None, - sd2upscaling: typing.Optional[bool] = None, - seed: typing.Optional[int] = None, - image_guidance_scale: typing.Optional[float] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> SmartGptPageStatusResponse: """ Parameters ---------- - search_query : str - - text_prompt : str - - example_id : typing.Optional[str] + input_prompt : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + cot_prompt : typing.Optional[str] - selected_model : typing.Optional[ImageFromWebSearchRequestSelectedModel] + reflexion_prompt : typing.Optional[str] - negative_prompt : typing.Optional[str] + dera_prompt : typing.Optional[str] - num_outputs : typing.Optional[int] + selected_model : typing.Optional[PostV3SmartGptAsyncFormRequestSelectedModel] - quality : typing.Optional[int] + avoid_repetition : typing.Optional[bool] - guidance_scale : typing.Optional[float] + num_outputs : typing.Optional[int] - prompt_strength : typing.Optional[float] + quality : typing.Optional[float] - sd2upscaling : typing.Optional[bool] + max_tokens : typing.Optional[int] - seed : typing.Optional[int] + sampling_temperature : typing.Optional[float] - image_guidance_scale : typing.Optional[float] + response_format_type : typing.Optional[PostV3SmartGptAsyncFormRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -8377,7 +14828,7 @@ async def image_from_web_search( Returns ------- - GoogleImageGenPageStatusResponse + SmartGptPageStatusResponse Successful Response Examples @@ -8392,34 +14843,30 @@ async def image_from_web_search( async def main() -> None: - await client.image_from_web_search( - search_query="search_query", - text_prompt="text_prompt", + await client.post_v3smart_gpt_async_form( + input_prompt="input_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async/form", + "v3/SmartGPT/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, + "input_prompt": input_prompt, + "cot_prompt": cot_prompt, + "reflexion_prompt": reflexion_prompt, + "dera_prompt": dera_prompt, "selected_model": selected_model, - "negative_prompt": negative_prompt, + "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, files={}, @@ -8428,74 +14875,60 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + SmartGptPageStatusResponse, + parse_obj_as( + type_=SmartGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def remove_background( + async def post_v3social_lookup_email_async_form( self, *, - input_image: str, - example_id: typing.Optional[str] = None, + email_address: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, - mask_threshold: typing.Optional[float] = None, - rect_persepective_transform: typing.Optional[bool] = None, - reflection_opacity: typing.Optional[float] = None, - obj_scale: typing.Optional[float] = None, - obj_pos_x: typing.Optional[float] = None, - obj_pos_y: typing.Optional[float] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> ImageSegmentationPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> SocialLookupEmailPageStatusResponse: """ Parameters ---------- - input_image : str - - example_id : typing.Optional[str] + email_address : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] + input_prompt : typing.Optional[str] - mask_threshold : typing.Optional[float] + selected_model : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestSelectedModel] - rect_persepective_transform : typing.Optional[bool] + avoid_repetition : typing.Optional[bool] - reflection_opacity : typing.Optional[float] + num_outputs : typing.Optional[int] - obj_scale : typing.Optional[float] + quality : typing.Optional[float] - obj_pos_x : typing.Optional[float] + max_tokens : typing.Optional[int] - obj_pos_y : typing.Optional[float] + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3SocialLookupEmailAsyncFormRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -8504,7 +14937,7 @@ async def remove_background( Returns ------- - ImageSegmentationPageStatusResponse + SocialLookupEmailPageStatusResponse Successful Response Examples @@ -8519,28 +14952,28 @@ async def remove_background( async def main() -> None: - await client.remove_background( - input_image="input_image", + await client.post_v3social_lookup_email_async_form( + email_address="email_address", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async/form", + "v3/SocialLookupEmail/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "input_image": input_image, + "email_address": email_address, + "input_prompt": input_prompt, "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, files={}, @@ -8549,68 +14982,91 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(ImageSegmentationPageStatusResponse, parse_obj_as(type_=ImageSegmentationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + SocialLookupEmailPageStatusResponse, + parse_obj_as( + type_=SocialLookupEmailPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def upscale( + async def post_v3text_to_speech_async_form( self, *, - scale: int, - example_id: typing.Optional[str] = None, + text_prompt: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_image: typing.Optional[str] = None, - input_video: typing.Optional[str] = None, - selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> TextToSpeechPageStatusResponse: """ Parameters ---------- - scale : int - The final upsampling scale of the image - - example_id : typing.Optional[str] + text_prompt : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_image : typing.Optional[str] - Input Image + tts_provider : typing.Optional[PostV3TextToSpeechAsyncFormRequestTtsProvider] - input_video : typing.Optional[str] - Input Video + uberduck_voice_name : typing.Optional[str] - selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + uberduck_speaking_rate : typing.Optional[float] - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + google_voice_name : typing.Optional[str] + + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel] settings : typing.Optional[RunSettings] @@ -8619,7 +15075,7 @@ async def upscale( Returns ------- - CompareUpscalerPageStatusResponse + TextToSpeechPageStatusResponse Successful Response Examples @@ -8634,25 +15090,38 @@ async def upscale( async def main() -> None: - await client.upscale( - scale=1, + await client.post_v3text_to_speech_async_form( + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async/form", + "v3/TextToSpeech/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, "settings": settings, }, files={}, @@ -8661,56 +15130,115 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + TextToSpeechPageStatusResponse, + parse_obj_as( + type_=TextToSpeechPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def embed( + async def post_v3art_qr_code_async_form( self, *, - texts: typing.List[str], - example_id: typing.Optional[str] = None, + text_prompt: str, functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - selected_model: typing.Optional[EmbedRequestSelectedModel] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + qr_code_data: typing.Optional[str] = None, + qr_code_input_image: typing.Optional[str] = None, + qr_code_vcard: typing.Optional[Vcard] = None, + qr_code_file: typing.Optional[str] = None, + use_url_shortener: typing.Optional[bool] = None, + negative_prompt: typing.Optional[str] = None, + image_prompt: typing.Optional[str] = None, + image_prompt_controlnet_models: typing.Optional[ + typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem] + ] = None, + image_prompt_strength: typing.Optional[float] = None, + image_prompt_scale: typing.Optional[float] = None, + image_prompt_pos_x: typing.Optional[float] = None, + image_prompt_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[ + typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem] + ] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + scheduler: typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] = None, + seed: typing.Optional[int] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: + request_options: typing.Optional[RequestOptions] = None, + ) -> QrCodeGeneratorPageStatusResponse: """ Parameters ---------- - texts : typing.List[str] - - example_id : typing.Optional[str] + text_prompt : str functions : typing.Optional[typing.List[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[EmbedRequestSelectedModel] + qr_code_data : typing.Optional[str] + + qr_code_input_image : typing.Optional[str] + + qr_code_vcard : typing.Optional[Vcard] + + qr_code_file : typing.Optional[str] + + use_url_shortener : typing.Optional[bool] + + negative_prompt : typing.Optional[str] + + image_prompt : typing.Optional[str] + + image_prompt_controlnet_models : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem]] + + image_prompt_strength : typing.Optional[float] + + image_prompt_scale : typing.Optional[float] + + image_prompt_pos_x : typing.Optional[float] + + image_prompt_pos_y : typing.Optional[float] + + selected_model : typing.Optional[PostV3ArtQrCodeAsyncFormRequestSelectedModel] + + selected_controlnet_model : typing.Optional[typing.List[PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[PostV3ArtQrCodeAsyncFormRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -8719,7 +15247,7 @@ async def embed( Returns ------- - EmbeddingsPageStatusResponse + QrCodeGeneratorPageStatusResponse Successful Response Examples @@ -8734,22 +15262,45 @@ async def embed( async def main() -> None: - await client.embed( - texts=["texts"], + await client.post_v3art_qr_code_async_form( + text_prompt="text_prompt", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/async/form", + "v3/art-qr-code/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "texts": texts, + "qr_code_data": qr_code_data, + "qr_code_input_image": qr_code_input_image, + "qr_code_vcard": qr_code_vcard, + "qr_code_file": qr_code_file, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, "settings": settings, }, files={}, @@ -8758,125 +15309,63 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + QrCodeGeneratorPageStatusResponse, + parse_obj_as( + type_=QrCodeGeneratorPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_people_also_ask_doc( + async def post_v3asr_async_form( self, *, - search_query: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - keyword_query: typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - doc_extract_url: typing.Optional[str] = None, - embedding_model: typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - selected_model: typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel] = None, - citation_style: typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType] = None, - serp_search_location: typing.Optional[SerpSearchLocation] = None, - scaleserp_locations: typing.Optional[typing.List[str]] = None, - serp_search_type: typing.Optional[SerpSearchType] = None, - scaleserp_search_field: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: - """ - Parameters - ---------- - search_query : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.List[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Any]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[SeoPeopleAlsoAskDocRequestKeywordQuery] - - documents : typing.Optional[typing.List[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[SeoPeopleAlsoAskDocRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[SeoPeopleAlsoAskDocRequestSelectedModel] - - citation_style : typing.Optional[SeoPeopleAlsoAskDocRequestCitationStyle] + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] = None, + language: typing.Optional[str] = None, + translation_model: typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] = None, + output_format: typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] = None, + google_translate_target: typing.Optional[str] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsrPageStatusResponse: + """ + Parameters + ---------- + documents : typing.List[str] - avoid_repetition : typing.Optional[bool] + functions : typing.Optional[typing.List[RecipeFunction]] - num_outputs : typing.Optional[int] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - quality : typing.Optional[float] + selected_model : typing.Optional[PostV3AsrAsyncFormRequestSelectedModel] - max_tokens : typing.Optional[int] + language : typing.Optional[str] - sampling_temperature : typing.Optional[float] + translation_model : typing.Optional[PostV3AsrAsyncFormRequestTranslationModel] - response_format_type : typing.Optional[SeoPeopleAlsoAskDocRequestResponseFormatType] + output_format : typing.Optional[PostV3AsrAsyncFormRequestOutputFormat] - serp_search_location : typing.Optional[SerpSearchLocation] + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. - scaleserp_locations : typing.Optional[typing.List[str]] - DEPRECATED: use `serp_search_location` instead + translation_source : typing.Optional[str] - serp_search_type : typing.Optional[SerpSearchType] + translation_target : typing.Optional[str] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). settings : typing.Optional[RunSettings] @@ -8885,7 +15374,7 @@ async def seo_people_also_ask_doc( Returns ------- - RelatedQnADocPageStatusResponse + AsrPageStatusResponse Successful Response Examples @@ -8900,43 +15389,28 @@ async def seo_people_also_ask_doc( async def main() -> None: - await client.seo_people_also_ask_doc( - search_query="search_query", + await client.post_v3asr_async_form( + documents=["documents"], ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async/form", + "v3/asr/async/form", method="POST", - params={"example_id": example_id}, data={ "functions": functions, "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, "settings": settings, }, files={}, @@ -8945,122 +15419,84 @@ async def main() -> None: ) try: if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore + return typing.cast( + AsrPageStatusResponse, + parse_obj_as( + type_=AsrPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def health_status_get(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3bulk_eval_async_form( + self, + *, + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None, + agg_functions: typing.Optional[typing.List[AggFunction]] = None, + selected_model: typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkEvalPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + documents : typing.List[str] - Returns - ------- - typing.Any - Successful Response + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. - Examples - -------- - import asyncio - from gooey import AsyncGooey + functions : typing.Optional[typing.List[RecipeFunction]] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + eval_prompts : typing.Optional[typing.List[EvalPrompt]] - async def main() -> None: - await client.health_status_get() + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. + _The `columns` dictionary can be used to reference the spreadsheet columns._ - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "status", method="GET", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + agg_functions : typing.Optional[typing.List[AggFunction]] - async def post_v3chyron_plant_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - Returns - ------- - typing.Any - Successful Response - Examples - -------- - import asyncio + selected_model : typing.Optional[PostV3BulkEvalAsyncFormRequestSelectedModel] - from gooey import AsyncGooey + avoid_repetition : typing.Optional[bool] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + num_outputs : typing.Optional[int] + quality : typing.Optional[float] - async def main() -> None: - await client.post_v3chyron_plant_async() + max_tokens : typing.Optional[int] + sampling_temperature : typing.Optional[float] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ChyronPlant/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response_format_type : typing.Optional[PostV3BulkEvalAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] - async def post_v3compare_llm_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + BulkEvalPageStatusResponse Successful Response Examples @@ -9075,116 +15511,106 @@ async def post_v3compare_llm_async(self, *, request_options: typing.Optional[Req async def main() -> None: - await client.post_v3compare_llm_async() + await client.post_v3bulk_eval_async_form( + documents=["documents"], + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async", method="POST", request_options=request_options + "v3/bulk-eval/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + BulkEvalPageStatusResponse, + parse_obj_as( + type_=BulkEvalPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3compare_text2img_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + async def post_v3bulk_runner_async_form( + self, + *, + documents: typing.List[str], + run_urls: typing.List[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_urls: typing.Optional[typing.List[str]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkRunnerPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Any - Successful Response + documents : typing.List[str] - Examples - -------- - import asyncio + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. - from gooey import AsyncGooey - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + run_urls : typing.List[str] + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - async def main() -> None: - await client.post_v3compare_text2img_async() + input_columns : typing.Dict[str, str] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - async def post_v3deforum_sd_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - Returns - ------- - typing.Any - Successful Response + output_columns : typing.Dict[str, str] - Examples - -------- - import asyncio + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - from gooey import AsyncGooey - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + functions : typing.Optional[typing.List[RecipeFunction]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - async def main() -> None: - await client.post_v3deforum_sd_async() + eval_urls : typing.Optional[typing.List[str]] + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3email_face_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + BulkRunnerPageStatusResponse Successful Response Examples @@ -9199,34 +15625,91 @@ async def post_v3email_face_inpainting_async( async def main() -> None: - await client.post_v3email_face_inpainting_async() + await client.post_v3bulk_runner_async_form( + documents=["documents"], + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async", method="POST", request_options=request_options + "v3/bulk-runner/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + BulkRunnerPageStatusResponse, + parse_obj_as( + type_=BulkRunnerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3face_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + async def post_v3compare_ai_upscalers_async_form( + self, + *, + scale: int, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_image: typing.Optional[str] = None, + input_video: typing.Optional[str] = None, + selected_models: typing.Optional[ + typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem] + ] = None, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareUpscalerPageStatusResponse: """ Parameters ---------- + scale : int + The final upsampling scale of the image + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_image : typing.Optional[str] + Input Image + + input_video : typing.Optional[str] + Input Video + + selected_models : typing.Optional[typing.List[PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem]] + + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + CompareUpscalerPageStatusResponse Successful Response Examples @@ -9241,76 +15724,109 @@ async def post_v3face_inpainting_async( async def main() -> None: - await client.post_v3face_inpainting_async() + await client.post_v3compare_ai_upscalers_async_form( + scale=1, + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async", method="POST", request_options=request_options + "v3/compare-ai-upscalers/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_image": input_image, + "input_video": input_video, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + CompareUpscalerPageStatusResponse, + parse_obj_as( + type_=CompareUpscalerPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3google_image_gen_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + async def post_v3doc_extract_async_form( + self, + *, + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + sheet_url: typing.Optional[str] = None, + selected_asr_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + glossary_document: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocExtractPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + documents : typing.List[str] - Returns - ------- - typing.Any - Successful Response + functions : typing.Optional[typing.List[RecipeFunction]] - Examples - -------- - import asyncio + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - from gooey import AsyncGooey + sheet_url : typing.Optional[str] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + selected_asr_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedAsrModel] + google_translate_target : typing.Optional[str] - async def main() -> None: - await client.post_v3google_image_gen_async() + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + task_instructions : typing.Optional[str] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + selected_model : typing.Optional[PostV3DocExtractAsyncFormRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3DocExtractAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] - async def post_v3image_segmentation_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + DocExtractPageStatusResponse Successful Response Examples @@ -9325,114 +15841,137 @@ async def post_v3image_segmentation_async( async def main() -> None: - await client.post_v3image_segmentation_async() + await client.post_v3doc_extract_async_form( + documents=["documents"], + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async", method="POST", request_options=request_options + "v3/doc-extract/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "sheet_url": sheet_url, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "glossary_document": glossary_document, + "task_instructions": task_instructions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + DocExtractPageStatusResponse, + parse_obj_as( + type_=DocExtractPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3img2img_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3doc_search_async_form( + self, + *, + search_query: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + keyword_query: typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] = None, + documents: typing.Optional[typing.List[str]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + doc_extract_url: typing.Optional[str] = None, + embedding_model: typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] = None, + citation_style: typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSearchPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + search_query : str - Returns - ------- - typing.Any - Successful Response + functions : typing.Optional[typing.List[RecipeFunction]] - Examples - -------- - import asyncio + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - from gooey import AsyncGooey + keyword_query : typing.Optional[PostV3DocSearchAsyncFormRequestKeywordQuery] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + documents : typing.Optional[typing.List[str]] + max_references : typing.Optional[int] - async def main() -> None: - await client.post_v3img2img_async() + max_context_words : typing.Optional[int] + scroll_jump : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Img2Img/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + doc_extract_url : typing.Optional[str] - async def post_v3letter_writer_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + embedding_model : typing.Optional[PostV3DocSearchAsyncFormRequestEmbeddingModel] - Returns - ------- - typing.Any - Successful Response + dense_weight : typing.Optional[float] - Examples - -------- - import asyncio + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - from gooey import AsyncGooey - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + task_instructions : typing.Optional[str] + query_instructions : typing.Optional[str] - async def main() -> None: - await client.post_v3letter_writer_async() + selected_model : typing.Optional[PostV3DocSearchAsyncFormRequestSelectedModel] + citation_style : typing.Optional[PostV3DocSearchAsyncFormRequestCitationStyle] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LetterWriter/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3DocSearchAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] - async def post_v3lipsync_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + DocSearchPageStatusResponse Successful Response Examples @@ -9447,74 +15986,121 @@ async def post_v3lipsync_async(self, *, request_options: typing.Optional[Request async def main() -> None: - await client.post_v3lipsync_async() + await client.post_v3doc_search_async_form( + search_query="search_query", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", method="POST", request_options=request_options + "v3/doc-search/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + DocSearchPageStatusResponse, + parse_obj_as( + type_=DocSearchPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3lipsync_tts_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3doc_summary_async_form( + self, + *, + documents: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + merge_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DocSummaryPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + documents : typing.List[str] - Returns - ------- - typing.Any - Successful Response + functions : typing.Optional[typing.List[RecipeFunction]] - Examples - -------- - import asyncio + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - from gooey import AsyncGooey + task_instructions : typing.Optional[str] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + merge_instructions : typing.Optional[str] + selected_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedModel] - async def main() -> None: - await client.post_v3lipsync_tts_async() + chain_type : typing.Optional[typing.Literal["map_reduce"]] + selected_asr_model : typing.Optional[PostV3DocSummaryAsyncFormRequestSelectedAsrModel] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + google_translate_target : typing.Optional[str] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3DocSummaryAsyncFormRequestResponseFormatType] + + settings : typing.Optional[RunSettings] - async def post_v3object_inpainting_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + DocSummaryPageStatusResponse Successful Response Examples @@ -9529,32 +16115,82 @@ async def post_v3object_inpainting_async( async def main() -> None: - await client.post_v3object_inpainting_async() + await client.post_v3doc_summary_async_form( + documents=["documents"], + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async", method="POST", request_options=request_options + "v3/doc-summary/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "documents": documents, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + DocSummaryPageStatusResponse, + parse_obj_as( + type_=DocSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3seo_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3embeddings_async_form( + self, + *, + texts: typing.List[str], + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> EmbeddingsPageStatusResponse: """ Parameters ---------- + texts : typing.List[str] + + functions : typing.Optional[typing.List[RecipeFunction]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + selected_model : typing.Optional[PostV3EmbeddingsAsyncFormRequestSelectedModel] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + EmbeddingsPageStatusResponse Successful Response Examples @@ -9569,32 +16205,66 @@ async def post_v3seo_summary_async(self, *, request_options: typing.Optional[Req async def main() -> None: - await client.post_v3seo_summary_async() + await client.post_v3embeddings_async_form( + texts=["texts"], + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async", method="POST", request_options=request_options + "v3/embeddings/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + EmbeddingsPageStatusResponse, + parse_obj_as( + type_=EmbeddingsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3smart_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3functions_async_form( + self, + *, + code: typing.Optional[str] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> FunctionsPageStatusResponse: """ Parameters ---------- + code : typing.Optional[str] + The JS code to be executed. + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used in the code + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + FunctionsPageStatusResponse Successful Response Examples @@ -9609,76 +16279,130 @@ async def post_v3smart_gpt_async(self, *, request_options: typing.Optional[Reque async def main() -> None: - await client.post_v3smart_gpt_async() + await client.post_v3functions_async_form() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", method="POST", request_options=request_options + "v3/functions/async/form", + method="POST", + data={ + "code": code, + "variables": variables, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + FunctionsPageStatusResponse, + parse_obj_as( + type_=FunctionsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3social_lookup_email_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + async def post_v3google_gpt_async_form( + self, + *, + search_query: str, + site_filter: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] = None, + max_search_urls: typing.Optional[int] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleGptPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + search_query : str - Returns - ------- - typing.Any - Successful Response + site_filter : str - Examples - -------- - import asyncio + functions : typing.Optional[typing.List[RecipeFunction]] - from gooey import AsyncGooey + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + task_instructions : typing.Optional[str] + query_instructions : typing.Optional[str] - async def main() -> None: - await client.post_v3social_lookup_email_async() + selected_model : typing.Optional[PostV3GoogleGptAsyncFormRequestSelectedModel] + max_search_urls : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[PostV3GoogleGptAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3GoogleGptAsyncFormRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] - async def post_v3text_to_speech_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + GoogleGptPageStatusResponse Successful Response Examples @@ -9693,72 +16417,160 @@ async def post_v3text_to_speech_async( async def main() -> None: - await client.post_v3text_to_speech_async() + await client.post_v3google_gpt_async_form( + search_query="search_query", + site_filter="site_filter", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async", method="POST", request_options=request_options + "v3/google-gpt/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + GoogleGptPageStatusResponse, + parse_obj_as( + type_=GoogleGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3art_qr_code_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3related_qna_maker_doc_async_form( + self, + *, + search_query: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + keyword_query: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] = None, + documents: typing.Optional[typing.List[str]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + doc_extract_url: typing.Optional[str] = None, + embedding_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] = None, + citation_style: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnADocPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + search_query : str - Returns - ------- - typing.Any - Successful Response + functions : typing.Optional[typing.List[RecipeFunction]] - Examples - -------- - import asyncio + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - from gooey import AsyncGooey + keyword_query : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + documents : typing.Optional[typing.List[str]] + max_references : typing.Optional[int] - async def main() -> None: - await client.post_v3art_qr_code_async() + max_context_words : typing.Optional[int] + scroll_jump : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel] + + citation_style : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] - async def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + RelatedQnADocPageStatusResponse Successful Response Examples @@ -9773,72 +16585,155 @@ async def post_v3asr_async(self, *, request_options: typing.Optional[RequestOpti async def main() -> None: - await client.post_v3asr_async() + await client.post_v3related_qna_maker_doc_async_form( + search_query="search_query", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/asr/async", method="POST", request_options=request_options + "v3/related-qna-maker-doc/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + RelatedQnADocPageStatusResponse, + parse_obj_as( + type_=RelatedQnADocPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3bulk_eval_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3related_qna_maker_async_form( + self, + *, + search_query: str, + site_filter: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] = None, + max_search_urls: typing.Optional[int] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] = None, + serp_search_location: typing.Optional[SerpSearchLocation] = None, + scaleserp_locations: typing.Optional[typing.List[str]] = None, + serp_search_type: typing.Optional[SerpSearchType] = None, + scaleserp_search_field: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnAPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + search_query : str - Returns - ------- - typing.Any - Successful Response + site_filter : str - Examples - -------- - import asyncio + functions : typing.Optional[typing.List[RecipeFunction]] - from gooey import AsyncGooey + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + task_instructions : typing.Optional[str] + query_instructions : typing.Optional[str] - async def main() -> None: - await client.post_v3bulk_eval_async() + selected_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestSelectedModel] + max_search_urls : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.List[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] - async def post_v3bulk_runner_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + RelatedQnAPageStatusResponse Successful Response Examples @@ -9853,74 +16748,112 @@ async def post_v3bulk_runner_async(self, *, request_options: typing.Optional[Req async def main() -> None: - await client.post_v3bulk_runner_async() + await client.post_v3related_qna_maker_async_form( + search_query="search_query", + site_filter="site_filter", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async", method="POST", request_options=request_options + "v3/related-qna-maker/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + RelatedQnAPageStatusResponse, + parse_obj_as( + type_=RelatedQnAPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3compare_ai_upscalers_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + async def post_v3text2audio_async_form( + self, + *, + text_prompt: str, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + negative_prompt: typing.Optional[str] = None, + duration_sec: typing.Optional[float] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + sd2upscaling: typing.Optional[bool] = None, + selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> Text2AudioPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + text_prompt : str - Returns - ------- - typing.Any - Successful Response + functions : typing.Optional[typing.List[RecipeFunction]] - Examples - -------- - import asyncio + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - from gooey import AsyncGooey + negative_prompt : typing.Optional[str] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + duration_sec : typing.Optional[float] + num_outputs : typing.Optional[int] - async def main() -> None: - await client.post_v3compare_ai_upscalers_async() + quality : typing.Optional[int] + guidance_scale : typing.Optional[float] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + seed : typing.Optional[int] + + sd2upscaling : typing.Optional[bool] + + selected_models : typing.Optional[typing.List[typing.Literal["audio_ldm"]]] + + settings : typing.Optional[RunSettings] - async def post_v3doc_extract_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + Text2AudioPageStatusResponse Successful Response Examples @@ -9935,72 +16868,89 @@ async def post_v3doc_extract_async(self, *, request_options: typing.Optional[Req async def main() -> None: - await client.post_v3doc_extract_async() + await client.post_v3text2audio_async_form( + text_prompt="text_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async", method="POST", request_options=request_options + "v3/text2audio/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "duration_sec": duration_sec, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + Text2AudioPageStatusResponse, + parse_obj_as( + type_=Text2AudioPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3doc_search_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3translate_async_form( + self, + *, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + texts: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[str] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> TranslationPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Any - Successful Response + functions : typing.Optional[typing.List[RecipeFunction]] - Examples - -------- - import asyncio + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - from gooey import AsyncGooey + texts : typing.Optional[typing.List[str]] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + selected_model : typing.Optional[PostV3TranslateAsyncFormRequestSelectedModel] + translation_source : typing.Optional[str] - async def main() -> None: - await client.post_v3doc_search_async() + translation_target : typing.Optional[str] + glossary_document : typing.Optional[str] + Provide a glossary to customize translation and improve accuracy of domain-specific terms. + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + settings : typing.Optional[RunSettings] - async def post_v3doc_summary_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + TranslationPageStatusResponse Successful Response Examples @@ -10015,316 +16965,257 @@ async def post_v3doc_summary_async(self, *, request_options: typing.Optional[Req async def main() -> None: - await client.post_v3doc_summary_async() + await client.post_v3translate_async_form() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", method="POST", request_options=request_options + "v3/translate/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "translation_source": translation_source, + "translation_target": translation_target, + "glossary_document": glossary_document, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + TranslationPageStatusResponse, + parse_obj_as( + type_=TranslationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def post_v3embeddings_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: + async def post_v3video_bots_async_form( + self, + *, + functions: typing.Optional[typing.List[RecipeFunction]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_prompt: typing.Optional[str] = None, + input_audio: typing.Optional[str] = None, + input_images: typing.Optional[typing.List[str]] = None, + input_documents: typing.Optional[typing.List[str]] = None, + doc_extract_url: typing.Optional[str] = None, + messages: typing.Optional[typing.List[ConversationEntry]] = None, + bot_script: typing.Optional[str] = None, + selected_model: typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] = None, + document_model: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = None, + query_instructions: typing.Optional[str] = None, + keyword_instructions: typing.Optional[str] = None, + documents: typing.Optional[typing.List[str]] = None, + max_references: typing.Optional[int] = None, + max_context_words: typing.Optional[int] = None, + scroll_jump: typing.Optional[int] = None, + embedding_model: typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] = None, + dense_weight: typing.Optional[float] = None, + citation_style: typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] = None, + use_url_shortener: typing.Optional[bool] = None, + asr_model: typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] = None, + asr_language: typing.Optional[str] = None, + translation_model: typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] = None, + user_language: typing.Optional[str] = None, + input_glossary_document: typing.Optional[str] = None, + output_glossary_document: typing.Optional[str] = None, + lipsync_model: typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] = None, + tools: typing.Optional[typing.List[LlmTools]] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] = None, + tts_provider: typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] = None, + input_face: typing.Optional[str] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[SadTalkerSettings] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> VideoBotsPageStatusResponse: """ Parameters ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + functions : typing.Optional[typing.List[RecipeFunction]] - Returns - ------- - typing.Any - Successful Response + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - Examples - -------- - import asyncio + input_prompt : typing.Optional[str] - from gooey import AsyncGooey + input_audio : typing.Optional[str] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + input_images : typing.Optional[typing.List[str]] + input_documents : typing.Optional[typing.List[str]] - async def main() -> None: - await client.post_v3embeddings_async() + doc_extract_url : typing.Optional[str] + Select a workflow to extract text from documents and images. + messages : typing.Optional[typing.List[ConversationEntry]] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + bot_script : typing.Optional[str] + + selected_model : typing.Optional[PostV3VideoBotsAsyncFormRequestSelectedModel] - async def post_v3functions_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + document_model : typing.Optional[str] + When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - Returns - ------- - typing.Any - Successful Response + task_instructions : typing.Optional[str] - Examples - -------- - import asyncio + query_instructions : typing.Optional[str] - from gooey import AsyncGooey + keyword_instructions : typing.Optional[str] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + documents : typing.Optional[typing.List[str]] + max_references : typing.Optional[int] - async def main() -> None: - await client.post_v3functions_async() + max_context_words : typing.Optional[int] + scroll_jump : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/functions/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + embedding_model : typing.Optional[PostV3VideoBotsAsyncFormRequestEmbeddingModel] - async def post_v3google_gpt_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + dense_weight : typing.Optional[float] - Returns - ------- - typing.Any - Successful Response + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - Examples - -------- - import asyncio - from gooey import AsyncGooey + citation_style : typing.Optional[PostV3VideoBotsAsyncFormRequestCitationStyle] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + use_url_shortener : typing.Optional[bool] + asr_model : typing.Optional[PostV3VideoBotsAsyncFormRequestAsrModel] + Choose a model to transcribe incoming audio messages to text. - async def main() -> None: - await client.post_v3google_gpt_async() + asr_language : typing.Optional[str] + Choose a language to transcribe incoming audio messages to text. + translation_model : typing.Optional[PostV3VideoBotsAsyncFormRequestTranslationModel] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + user_language : typing.Optional[str] + Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - async def post_v3related_qna_maker_doc_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + input_glossary_document : typing.Optional[str] - Returns - ------- - typing.Any - Successful Response + Translation Glossary for User Langauge -> LLM Language (English) - Examples - -------- - import asyncio - from gooey import AsyncGooey + output_glossary_document : typing.Optional[str] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + Translation Glossary for LLM Language (English) -> User Langauge - async def main() -> None: - await client.post_v3related_qna_maker_doc_async() + lipsync_model : typing.Optional[PostV3VideoBotsAsyncFormRequestLipsyncModel] + tools : typing.Optional[typing.List[LlmTools]] + Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + avoid_repetition : typing.Optional[bool] - async def post_v3related_qna_maker_async( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + num_outputs : typing.Optional[int] - Returns - ------- - typing.Any - Successful Response + quality : typing.Optional[float] - Examples - -------- - import asyncio + max_tokens : typing.Optional[int] - from gooey import AsyncGooey + sampling_temperature : typing.Optional[float] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + response_format_type : typing.Optional[PostV3VideoBotsAsyncFormRequestResponseFormatType] + tts_provider : typing.Optional[PostV3VideoBotsAsyncFormRequestTtsProvider] - async def main() -> None: - await client.post_v3related_qna_maker_async() + uberduck_voice_name : typing.Optional[str] + uberduck_speaking_rate : typing.Optional[float] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + google_voice_name : typing.Optional[str] - async def post_v3text2audio_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + google_speaking_rate : typing.Optional[float] - Returns - ------- - typing.Any - Successful Response + google_pitch : typing.Optional[float] - Examples - -------- - import asyncio + bark_history_prompt : typing.Optional[str] - from gooey import AsyncGooey + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + elevenlabs_api_key : typing.Optional[str] + elevenlabs_voice_id : typing.Optional[str] - async def main() -> None: - await client.post_v3text2audio_async() + elevenlabs_model : typing.Optional[str] + elevenlabs_stability : typing.Optional[float] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + elevenlabs_similarity_boost : typing.Optional[float] - async def post_v3translate_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + elevenlabs_style : typing.Optional[float] - Returns - ------- - typing.Any - Successful Response + elevenlabs_speaker_boost : typing.Optional[bool] - Examples - -------- - import asyncio + azure_voice_name : typing.Optional[str] - from gooey import AsyncGooey + openai_voice_name : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiVoiceName] - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) + openai_tts_model : typing.Optional[PostV3VideoBotsAsyncFormRequestOpenaiTtsModel] + input_face : typing.Optional[str] - async def main() -> None: - await client.post_v3translate_async() + face_padding_top : typing.Optional[int] + face_padding_bottom : typing.Optional[int] - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/translate/async", method="POST", request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[SadTalkerSettings] + + settings : typing.Optional[RunSettings] - async def post_v3video_bots_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.Any: - """ - Parameters - ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Any + VideoBotsPageStatusResponse Successful Response Examples @@ -10339,17 +17230,90 @@ async def post_v3video_bots_async(self, *, request_options: typing.Optional[Requ async def main() -> None: - await client.post_v3video_bots_async() + await client.post_v3video_bots_async_form() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/async", method="POST", request_options=request_options + "v3/video-bots/async/form", + method="POST", + data={ + "functions": functions, + "variables": variables, + "input_prompt": input_prompt, + "input_audio": input_audio, + "input_images": input_images, + "input_documents": input_documents, + "doc_extract_url": doc_extract_url, + "messages": messages, + "bot_script": bot_script, + "selected_model": selected_model, + "document_model": document_model, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "keyword_instructions": keyword_instructions, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "citation_style": citation_style, + "use_url_shortener": use_url_shortener, + "asr_model": asr_model, + "asr_language": asr_language, + "translation_model": translation_model, + "user_language": user_language, + "input_glossary_document": input_glossary_document, + "output_glossary_document": output_glossary_document, + "lipsync_model": lipsync_model, + "tools": tools, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "input_face": input_face, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "settings": settings, + }, + files={}, + request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + VideoBotsPageStatusResponse, + parse_obj_as( + type_=VideoBotsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/gooey/compare_ai_image_generators/__init__.py b/src/gooey/compare_ai_image_generators/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_image_generators/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_image_generators/client.py b/src/gooey/compare_ai_image_generators/client.py deleted file mode 100644 index f2fb2fa..0000000 --- a/src/gooey/compare_ai_image_generators/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class CompareAiImageGeneratorsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_compare_text2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_generators.status_compare_text2img( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiImageGeneratorsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_compare_text2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareText2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareText2ImgPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_generators.status_compare_text2img( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareText2ImgPageStatusResponse, parse_obj_as(type_=CompareText2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/compare_ai_image_upscalers/__init__.py b/src/gooey/compare_ai_image_upscalers/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_image_upscalers/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_image_upscalers/client.py b/src/gooey/compare_ai_image_upscalers/client.py deleted file mode 100644 index 259bad1..0000000 --- a/src/gooey/compare_ai_image_upscalers/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class CompareAiImageUpscalersClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_compare_ai_upscalers( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareUpscalerPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.compare_ai_image_upscalers.status_compare_ai_upscalers( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiImageUpscalersClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_compare_ai_upscalers( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareUpscalerPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareUpscalerPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_image_upscalers.status_compare_ai_upscalers( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareUpscalerPageStatusResponse, parse_obj_as(type_=CompareUpscalerPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/compare_ai_translations/__init__.py b/src/gooey/compare_ai_translations/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_translations/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_translations/client.py b/src/gooey/compare_ai_translations/client.py deleted file mode 100644 index 54852b0..0000000 --- a/src/gooey/compare_ai_translations/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.translation_page_status_response import TranslationPageStatusResponse - - -class CompareAiTranslationsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_translate( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TranslationPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.compare_ai_translations.status_translate( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/translate/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiTranslationsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_translate( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TranslationPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TranslationPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_translations.status_translate( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/translate/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TranslationPageStatusResponse, parse_obj_as(type_=TranslationPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/compare_ai_voice_generators/__init__.py b/src/gooey/compare_ai_voice_generators/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/compare_ai_voice_generators/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/compare_ai_voice_generators/client.py b/src/gooey/compare_ai_voice_generators/client.py deleted file mode 100644 index 6b0f88c..0000000 --- a/src/gooey/compare_ai_voice_generators/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse - - -class CompareAiVoiceGeneratorsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_text_to_speech( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TextToSpeechPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.compare_ai_voice_generators.status_text_to_speech( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCompareAiVoiceGeneratorsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_text_to_speech( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> TextToSpeechPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - TextToSpeechPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compare_ai_voice_generators.status_text_to_speech( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(TextToSpeechPageStatusResponse, parse_obj_as(type_=TextToSpeechPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py index 28df997..f1637db 100644 --- a/src/gooey/copilot_for_your_enterprise/__init__.py +++ b/src/gooey/copilot_for_your_enterprise/__init__.py @@ -1,27 +1,27 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( - AsyncFormVideoBotsRequestAsrModel, - AsyncFormVideoBotsRequestCitationStyle, - AsyncFormVideoBotsRequestEmbeddingModel, - AsyncFormVideoBotsRequestLipsyncModel, - AsyncFormVideoBotsRequestOpenaiTtsModel, - AsyncFormVideoBotsRequestOpenaiVoiceName, - AsyncFormVideoBotsRequestResponseFormatType, - AsyncFormVideoBotsRequestSelectedModel, - AsyncFormVideoBotsRequestTranslationModel, - AsyncFormVideoBotsRequestTtsProvider, + VideoBotsPageRequestAsrModel, + VideoBotsPageRequestCitationStyle, + VideoBotsPageRequestEmbeddingModel, + VideoBotsPageRequestLipsyncModel, + VideoBotsPageRequestOpenaiTtsModel, + VideoBotsPageRequestOpenaiVoiceName, + VideoBotsPageRequestResponseFormatType, + VideoBotsPageRequestSelectedModel, + VideoBotsPageRequestTranslationModel, + VideoBotsPageRequestTtsProvider, ) __all__ = [ - "AsyncFormVideoBotsRequestAsrModel", - "AsyncFormVideoBotsRequestCitationStyle", - "AsyncFormVideoBotsRequestEmbeddingModel", - "AsyncFormVideoBotsRequestLipsyncModel", - "AsyncFormVideoBotsRequestOpenaiTtsModel", - "AsyncFormVideoBotsRequestOpenaiVoiceName", - "AsyncFormVideoBotsRequestResponseFormatType", - "AsyncFormVideoBotsRequestSelectedModel", - "AsyncFormVideoBotsRequestTranslationModel", - "AsyncFormVideoBotsRequestTtsProvider", + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", + "VideoBotsPageRequestLipsyncModel", + "VideoBotsPageRequestOpenaiTtsModel", + "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", + "VideoBotsPageRequestSelectedModel", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", ] diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot_for_your_enterprise/client.py index c5d0114..5668f9f 100644 --- a/src/gooey/copilot_for_your_enterprise/client.py +++ b/src/gooey/copilot_for_your_enterprise/client.py @@ -1,36 +1,33 @@ # This file was auto-generated by Fern from our API Definition. import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.bad_request_error import BadRequestError -from ..errors.internal_server_error import InternalServerError -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..core.client_wrapper import SyncClientWrapper +from ..types.recipe_function import RecipeFunction from ..types.conversation_entry import ConversationEntry -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError +from .types.video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel +from .types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel +from .types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel from ..types.llm_tools import LlmTools -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings +from .types.video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType +from .types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider +from .types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName +from .types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel from ..types.sad_talker_settings import SadTalkerSettings +from ..types.run_settings import RunSettings +from ..core.request_options import RequestOptions from ..types.video_bots_page_status_response import VideoBotsPageStatusResponse -from .types.async_form_video_bots_request_asr_model import AsyncFormVideoBotsRequestAsrModel -from .types.async_form_video_bots_request_citation_style import AsyncFormVideoBotsRequestCitationStyle -from .types.async_form_video_bots_request_embedding_model import AsyncFormVideoBotsRequestEmbeddingModel -from .types.async_form_video_bots_request_lipsync_model import AsyncFormVideoBotsRequestLipsyncModel -from .types.async_form_video_bots_request_openai_tts_model import AsyncFormVideoBotsRequestOpenaiTtsModel -from .types.async_form_video_bots_request_openai_voice_name import AsyncFormVideoBotsRequestOpenaiVoiceName -from .types.async_form_video_bots_request_response_format_type import AsyncFormVideoBotsRequestResponseFormatType -from .types.async_form_video_bots_request_selected_model import AsyncFormVideoBotsRequestSelectedModel -from .types.async_form_video_bots_request_translation_model import AsyncFormVideoBotsRequestTranslationModel -from .types.async_form_video_bots_request_tts_provider import AsyncFormVideoBotsRequestTtsProvider +from ..core.pydantic_utilities import parse_obj_as +from ..errors.payment_required_error import PaymentRequiredError +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -40,99 +37,99 @@ class CopilotForYourEnterpriseClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def async_form_video_bots( + def async_video_bots( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - input_audio: typing.Optional[str] = None, - input_images: typing.Optional[typing.List[str]] = None, - input_documents: typing.Optional[typing.List[str]] = None, - doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[ConversationEntry]] = None, - bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[AsyncFormVideoBotsRequestSelectedModel] = None, - document_model: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - keyword_instructions: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[AsyncFormVideoBotsRequestCitationStyle] = None, - use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[AsyncFormVideoBotsRequestAsrModel] = None, - asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[AsyncFormVideoBotsRequestTranslationModel] = None, - user_language: typing.Optional[str] = None, - input_glossary_document: typing.Optional[str] = None, - output_glossary_document: typing.Optional[str] = None, - lipsync_model: typing.Optional[AsyncFormVideoBotsRequestLipsyncModel] = None, - tools: typing.Optional[typing.List[LlmTools]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncFormVideoBotsRequestResponseFormatType] = None, - tts_provider: typing.Optional[AsyncFormVideoBotsRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + input_audio: typing.Optional[str] = OMIT, + input_images: typing.Optional[typing.Sequence[str]] = OMIT, + input_documents: typing.Optional[typing.Sequence[str]] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, + bot_script: typing.Optional[str] = OMIT, + selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, + document_model: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + keyword_instructions: typing.Optional[str] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT, + asr_language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT, + user_language: typing.Optional[str] = OMIT, + input_glossary_document: typing.Optional[str] = OMIT, + output_glossary_document: typing.Optional[str] = OMIT, + lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, + tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT, + tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> VideoBotsPageStatusResponse: """ Parameters ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_prompt : typing.Optional[str] input_audio : typing.Optional[str] - input_images : typing.Optional[typing.List[str]] + input_images : typing.Optional[typing.Sequence[str]] - input_documents : typing.Optional[typing.List[str]] + input_documents : typing.Optional[typing.Sequence[str]] doc_extract_url : typing.Optional[str] Select a workflow to extract text from documents and images. - messages : typing.Optional[typing.List[ConversationEntry]] + messages : typing.Optional[typing.Sequence[ConversationEntry]] bot_script : typing.Optional[str] - selected_model : typing.Optional[AsyncFormVideoBotsRequestSelectedModel] + selected_model : typing.Optional[VideoBotsPageRequestSelectedModel] document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) @@ -143,7 +140,7 @@ def async_form_video_bots( keyword_instructions : typing.Optional[str] - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] max_references : typing.Optional[int] @@ -151,7 +148,7 @@ def async_form_video_bots( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel] + embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -159,17 +156,17 @@ def async_form_video_bots( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[AsyncFormVideoBotsRequestCitationStyle] + citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[AsyncFormVideoBotsRequestAsrModel] + asr_model : typing.Optional[VideoBotsPageRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[AsyncFormVideoBotsRequestTranslationModel] + translation_model : typing.Optional[VideoBotsPageRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -184,9 +181,9 @@ def async_form_video_bots( Translation Glossary for LLM Language (English) -> User Langauge - lipsync_model : typing.Optional[AsyncFormVideoBotsRequestLipsyncModel] + lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] - tools : typing.Optional[typing.List[LlmTools]] + tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). avoid_repetition : typing.Optional[bool] @@ -199,9 +196,9 @@ def async_form_video_bots( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncFormVideoBotsRequestResponseFormatType] + response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType] - tts_provider : typing.Optional[AsyncFormVideoBotsRequestTtsProvider] + tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -234,9 +231,9 @@ def async_form_video_bots( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel] input_face : typing.Optional[str] @@ -267,13 +264,15 @@ def async_form_video_bots( client = Gooey( api_key="YOUR_API_KEY", ) - client.copilot_for_your_enterprise.async_form_video_bots() + client.copilot_for_your_enterprise.async_video_bots() """ _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/async/form", + "v3/video-bots/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_prompt": input_prompt, @@ -336,82 +335,47 @@ def async_form_video_bots( "sadtalker_settings": sadtalker_settings, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + VideoBotsPageStatusResponse, + parse_obj_as( + type_=VideoBotsPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_video_bots( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.copilot_for_your_enterprise.status_video_bots( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -423,99 +387,99 @@ class AsyncCopilotForYourEnterpriseClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def async_form_video_bots( + async def async_video_bots( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - input_audio: typing.Optional[str] = None, - input_images: typing.Optional[typing.List[str]] = None, - input_documents: typing.Optional[typing.List[str]] = None, - doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[ConversationEntry]] = None, - bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[AsyncFormVideoBotsRequestSelectedModel] = None, - document_model: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - keyword_instructions: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[AsyncFormVideoBotsRequestCitationStyle] = None, - use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[AsyncFormVideoBotsRequestAsrModel] = None, - asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[AsyncFormVideoBotsRequestTranslationModel] = None, - user_language: typing.Optional[str] = None, - input_glossary_document: typing.Optional[str] = None, - output_glossary_document: typing.Optional[str] = None, - lipsync_model: typing.Optional[AsyncFormVideoBotsRequestLipsyncModel] = None, - tools: typing.Optional[typing.List[LlmTools]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncFormVideoBotsRequestResponseFormatType] = None, - tts_provider: typing.Optional[AsyncFormVideoBotsRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + input_audio: typing.Optional[str] = OMIT, + input_images: typing.Optional[typing.Sequence[str]] = OMIT, + input_documents: typing.Optional[typing.Sequence[str]] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, + bot_script: typing.Optional[str] = OMIT, + selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, + document_model: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + keyword_instructions: typing.Optional[str] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT, + asr_language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT, + user_language: typing.Optional[str] = OMIT, + input_glossary_document: typing.Optional[str] = OMIT, + output_glossary_document: typing.Optional[str] = OMIT, + lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, + tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT, + tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> VideoBotsPageStatusResponse: """ Parameters ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_prompt : typing.Optional[str] input_audio : typing.Optional[str] - input_images : typing.Optional[typing.List[str]] + input_images : typing.Optional[typing.Sequence[str]] - input_documents : typing.Optional[typing.List[str]] + input_documents : typing.Optional[typing.Sequence[str]] doc_extract_url : typing.Optional[str] Select a workflow to extract text from documents and images. - messages : typing.Optional[typing.List[ConversationEntry]] + messages : typing.Optional[typing.Sequence[ConversationEntry]] bot_script : typing.Optional[str] - selected_model : typing.Optional[AsyncFormVideoBotsRequestSelectedModel] + selected_model : typing.Optional[VideoBotsPageRequestSelectedModel] document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) @@ -526,7 +490,7 @@ async def async_form_video_bots( keyword_instructions : typing.Optional[str] - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] max_references : typing.Optional[int] @@ -534,7 +498,7 @@ async def async_form_video_bots( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[AsyncFormVideoBotsRequestEmbeddingModel] + embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -542,17 +506,17 @@ async def async_form_video_bots( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[AsyncFormVideoBotsRequestCitationStyle] + citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[AsyncFormVideoBotsRequestAsrModel] + asr_model : typing.Optional[VideoBotsPageRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[AsyncFormVideoBotsRequestTranslationModel] + translation_model : typing.Optional[VideoBotsPageRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -567,9 +531,9 @@ async def async_form_video_bots( Translation Glossary for LLM Language (English) -> User Langauge - lipsync_model : typing.Optional[AsyncFormVideoBotsRequestLipsyncModel] + lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] - tools : typing.Optional[typing.List[LlmTools]] + tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). avoid_repetition : typing.Optional[bool] @@ -582,9 +546,9 @@ async def async_form_video_bots( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncFormVideoBotsRequestResponseFormatType] + response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType] - tts_provider : typing.Optional[AsyncFormVideoBotsRequestTtsProvider] + tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -617,9 +581,9 @@ async def async_form_video_bots( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[AsyncFormVideoBotsRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[AsyncFormVideoBotsRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel] input_face : typing.Optional[str] @@ -655,16 +619,18 @@ async def async_form_video_bots( async def main() -> None: - await client.copilot_for_your_enterprise.async_form_video_bots() + await client.copilot_for_your_enterprise.async_video_bots() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/async/form", + "v3/video-bots/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_prompt": input_prompt, @@ -727,90 +693,47 @@ async def main() -> None: "sadtalker_settings": sadtalker_settings, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + return typing.cast( + VideoBotsPageStatusResponse, + parse_obj_as( + type_=VideoBotsPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_video_bots( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.copilot_for_your_enterprise.status_video_bots( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsPageStatusResponse, parse_obj_as(type_=VideoBotsPageStatusResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: diff --git a/src/gooey/copilot_for_your_enterprise/types/__init__.py b/src/gooey/copilot_for_your_enterprise/types/__init__.py index a638966..dd7ed8b 100644 --- a/src/gooey/copilot_for_your_enterprise/types/__init__.py +++ b/src/gooey/copilot_for_your_enterprise/types/__init__.py @@ -1,25 +1,25 @@ # This file was auto-generated by Fern from our API Definition. -from .async_form_video_bots_request_asr_model import AsyncFormVideoBotsRequestAsrModel -from .async_form_video_bots_request_citation_style import AsyncFormVideoBotsRequestCitationStyle -from .async_form_video_bots_request_embedding_model import AsyncFormVideoBotsRequestEmbeddingModel -from .async_form_video_bots_request_lipsync_model import AsyncFormVideoBotsRequestLipsyncModel -from .async_form_video_bots_request_openai_tts_model import AsyncFormVideoBotsRequestOpenaiTtsModel -from .async_form_video_bots_request_openai_voice_name import AsyncFormVideoBotsRequestOpenaiVoiceName -from .async_form_video_bots_request_response_format_type import AsyncFormVideoBotsRequestResponseFormatType -from .async_form_video_bots_request_selected_model import AsyncFormVideoBotsRequestSelectedModel -from .async_form_video_bots_request_translation_model import AsyncFormVideoBotsRequestTranslationModel -from .async_form_video_bots_request_tts_provider import AsyncFormVideoBotsRequestTtsProvider +from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel +from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel +from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel +from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName +from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType +from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel +from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider __all__ = [ - "AsyncFormVideoBotsRequestAsrModel", - "AsyncFormVideoBotsRequestCitationStyle", - "AsyncFormVideoBotsRequestEmbeddingModel", - "AsyncFormVideoBotsRequestLipsyncModel", - "AsyncFormVideoBotsRequestOpenaiTtsModel", - "AsyncFormVideoBotsRequestOpenaiVoiceName", - "AsyncFormVideoBotsRequestResponseFormatType", - "AsyncFormVideoBotsRequestSelectedModel", - "AsyncFormVideoBotsRequestTranslationModel", - "AsyncFormVideoBotsRequestTtsProvider", + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", + "VideoBotsPageRequestLipsyncModel", + "VideoBotsPageRequestOpenaiTtsModel", + "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", + "VideoBotsPageRequestSelectedModel", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", ] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py deleted file mode 100644 index 88f876c..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_lipsync_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncFormVideoBotsRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py deleted file mode 100644 index 66ac856..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncFormVideoBotsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py b/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py deleted file mode 100644 index fa42a29..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncFormVideoBotsRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_asr_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py similarity index 90% rename from src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_asr_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py index 6fb72ad..7db13bc 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_asr_model.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py @@ -2,7 +2,7 @@ import typing -AsyncFormVideoBotsRequestAsrModel = typing.Union[ +VideoBotsPageRequestAsrModel = typing.Union[ typing.Literal[ "whisper_large_v2", "whisper_large_v3", diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_citation_style.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py similarity index 89% rename from src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_citation_style.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py index 340070c..dc3630b 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_citation_style.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py @@ -2,7 +2,7 @@ import typing -AsyncFormVideoBotsRequestCitationStyle = typing.Union[ +VideoBotsPageRequestCitationStyle = typing.Union[ typing.Literal[ "number", "title", diff --git a/src/gooey/types/web_search_llm_request_embedding_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py similarity index 87% rename from src/gooey/types/web_search_llm_request_embedding_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py index 1e9a6c5..19c8972 100644 --- a/src/gooey/types/web_search_llm_request_embedding_model.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py @@ -2,7 +2,7 @@ import typing -WebSearchLlmRequestEmbeddingModel = typing.Union[ +VideoBotsPageRequestEmbeddingModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py new file mode 100644 index 0000000..3bb98e0 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py new file mode 100644 index 0000000..1df5de0 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_request_openai_voice_name.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py similarity index 76% rename from src/gooey/types/lipsync_tts_request_openai_voice_name.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py index 7ea601b..a08f96c 100644 --- a/src/gooey/types/lipsync_tts_request_openai_voice_name.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py @@ -2,6 +2,6 @@ import typing -LipsyncTtsRequestOpenaiVoiceName = typing.Union[ +VideoBotsPageRequestOpenaiVoiceName = typing.Union[ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any ] diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py new file mode 100644 index 0000000..25cc8f1 --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/rag_request_selected_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py similarity index 95% rename from src/gooey/types/rag_request_selected_model.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py index 8904215..e327a7d 100644 --- a/src/gooey/types/rag_request_selected_model.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -RagRequestSelectedModel = typing.Union[ +VideoBotsPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py new file mode 100644 index 0000000..0373c0c --- /dev/null +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/text_to_speech_request_tts_provider.py b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py similarity index 79% rename from src/gooey/types/text_to_speech_request_tts_provider.py rename to src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py index ffabe23..3fc8d0a 100644 --- a/src/gooey/types/text_to_speech_request_tts_provider.py +++ b/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py @@ -2,6 +2,6 @@ import typing -TextToSpeechRequestTtsProvider = typing.Union[ +VideoBotsPageRequestTtsProvider = typing.Union[ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any ] diff --git a/src/gooey/copilot_integrations/__init__.py b/src/gooey/copilot_integrations/__init__.py index 87847bb..8d66257 100644 --- a/src/gooey/copilot_integrations/__init__.py +++ b/src/gooey/copilot_integrations/__init__.py @@ -1,29 +1,29 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( - VideoBotsStreamCreateRequestAsrModel, - VideoBotsStreamCreateRequestCitationStyle, - VideoBotsStreamCreateRequestEmbeddingModel, - VideoBotsStreamCreateRequestLipsyncModel, - VideoBotsStreamCreateRequestOpenaiTtsModel, - VideoBotsStreamCreateRequestOpenaiVoiceName, - VideoBotsStreamCreateRequestResponseFormatType, - VideoBotsStreamCreateRequestSelectedModel, - VideoBotsStreamCreateRequestTranslationModel, - VideoBotsStreamCreateRequestTtsProvider, + CreateStreamRequestAsrModel, + CreateStreamRequestCitationStyle, + CreateStreamRequestEmbeddingModel, + CreateStreamRequestLipsyncModel, + CreateStreamRequestOpenaiTtsModel, + CreateStreamRequestOpenaiVoiceName, + CreateStreamRequestResponseFormatType, + CreateStreamRequestSelectedModel, + CreateStreamRequestTranslationModel, + CreateStreamRequestTtsProvider, VideoBotsStreamResponse, ) __all__ = [ - "VideoBotsStreamCreateRequestAsrModel", - "VideoBotsStreamCreateRequestCitationStyle", - "VideoBotsStreamCreateRequestEmbeddingModel", - "VideoBotsStreamCreateRequestLipsyncModel", - "VideoBotsStreamCreateRequestOpenaiTtsModel", - "VideoBotsStreamCreateRequestOpenaiVoiceName", - "VideoBotsStreamCreateRequestResponseFormatType", - "VideoBotsStreamCreateRequestSelectedModel", - "VideoBotsStreamCreateRequestTranslationModel", - "VideoBotsStreamCreateRequestTtsProvider", + "CreateStreamRequestAsrModel", + "CreateStreamRequestCitationStyle", + "CreateStreamRequestEmbeddingModel", + "CreateStreamRequestLipsyncModel", + "CreateStreamRequestOpenaiTtsModel", + "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", + "CreateStreamRequestSelectedModel", + "CreateStreamRequestTranslationModel", + "CreateStreamRequestTtsProvider", "VideoBotsStreamResponse", ] diff --git a/src/gooey/copilot_integrations/client.py b/src/gooey/copilot_integrations/client.py index 0feaff7..5606096 100644 --- a/src/gooey/copilot_integrations/client.py +++ b/src/gooey/copilot_integrations/client.py @@ -1,33 +1,33 @@ # This file was auto-generated by Fern from our API Definition. import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..core.client_wrapper import SyncClientWrapper from ..types.button_pressed import ButtonPressed +from ..types.recipe_function import RecipeFunction from ..types.conversation_entry import ConversationEntry -from ..types.create_stream_response import CreateStreamResponse -from ..types.http_validation_error import HttpValidationError +from .types.create_stream_request_selected_model import CreateStreamRequestSelectedModel +from .types.create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel +from .types.create_stream_request_citation_style import CreateStreamRequestCitationStyle +from .types.create_stream_request_asr_model import CreateStreamRequestAsrModel +from .types.create_stream_request_translation_model import CreateStreamRequestTranslationModel +from .types.create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel from ..types.llm_tools import LlmTools -from ..types.recipe_function import RecipeFunction +from .types.create_stream_request_response_format_type import CreateStreamRequestResponseFormatType +from .types.create_stream_request_tts_provider import CreateStreamRequestTtsProvider +from .types.create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName +from .types.create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel from ..types.sad_talker_settings import SadTalkerSettings -from .types.video_bots_stream_create_request_asr_model import VideoBotsStreamCreateRequestAsrModel -from .types.video_bots_stream_create_request_citation_style import VideoBotsStreamCreateRequestCitationStyle -from .types.video_bots_stream_create_request_embedding_model import VideoBotsStreamCreateRequestEmbeddingModel -from .types.video_bots_stream_create_request_lipsync_model import VideoBotsStreamCreateRequestLipsyncModel -from .types.video_bots_stream_create_request_openai_tts_model import VideoBotsStreamCreateRequestOpenaiTtsModel -from .types.video_bots_stream_create_request_openai_voice_name import VideoBotsStreamCreateRequestOpenaiVoiceName -from .types.video_bots_stream_create_request_response_format_type import VideoBotsStreamCreateRequestResponseFormatType -from .types.video_bots_stream_create_request_selected_model import VideoBotsStreamCreateRequestSelectedModel -from .types.video_bots_stream_create_request_translation_model import VideoBotsStreamCreateRequestTranslationModel -from .types.video_bots_stream_create_request_tts_provider import VideoBotsStreamCreateRequestTtsProvider +from ..core.request_options import RequestOptions +from ..types.create_stream_response import CreateStreamResponse +from ..core.pydantic_utilities import parse_obj_as +from ..errors.payment_required_error import PaymentRequiredError +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError from .types.video_bots_stream_response import VideoBotsStreamResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -41,71 +41,71 @@ def video_bots_stream_create( self, *, integration_id: str, - conversation_id: typing.Optional[str] = None, - user_id: typing.Optional[str] = None, - user_message_id: typing.Optional[str] = None, - button_pressed: typing.Optional[ButtonPressed] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - input_audio: typing.Optional[str] = None, - input_images: typing.Optional[typing.List[str]] = None, - input_documents: typing.Optional[typing.List[str]] = None, - doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[ConversationEntry]] = None, - bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[VideoBotsStreamCreateRequestSelectedModel] = None, - document_model: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - keyword_instructions: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[VideoBotsStreamCreateRequestCitationStyle] = None, - use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[VideoBotsStreamCreateRequestAsrModel] = None, - asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[VideoBotsStreamCreateRequestTranslationModel] = None, - user_language: typing.Optional[str] = None, - input_glossary_document: typing.Optional[str] = None, - output_glossary_document: typing.Optional[str] = None, - lipsync_model: typing.Optional[VideoBotsStreamCreateRequestLipsyncModel] = None, - tools: typing.Optional[typing.List[LlmTools]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[VideoBotsStreamCreateRequestResponseFormatType] = None, - tts_provider: typing.Optional[VideoBotsStreamCreateRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - input_text: typing.Optional[str] = None, + conversation_id: typing.Optional[str] = OMIT, + user_id: typing.Optional[str] = OMIT, + user_message_id: typing.Optional[str] = OMIT, + button_pressed: typing.Optional[ButtonPressed] = OMIT, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + input_audio: typing.Optional[str] = OMIT, + input_images: typing.Optional[typing.Sequence[str]] = OMIT, + input_documents: typing.Optional[typing.Sequence[str]] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, + bot_script: typing.Optional[str] = OMIT, + selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT, + document_model: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + keyword_instructions: typing.Optional[str] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT, + asr_language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT, + user_language: typing.Optional[str] = OMIT, + input_glossary_document: typing.Optional[str] = OMIT, + output_glossary_document: typing.Optional[str] = OMIT, + lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT, + tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT, + tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + input_text: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateStreamResponse: """ @@ -134,27 +134,27 @@ def video_bots_stream_create( button_pressed : typing.Optional[ButtonPressed] The button that was pressed by the user. - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_prompt : typing.Optional[str] input_audio : typing.Optional[str] - input_images : typing.Optional[typing.List[str]] + input_images : typing.Optional[typing.Sequence[str]] - input_documents : typing.Optional[typing.List[str]] + input_documents : typing.Optional[typing.Sequence[str]] doc_extract_url : typing.Optional[str] Select a workflow to extract text from documents and images. - messages : typing.Optional[typing.List[ConversationEntry]] + messages : typing.Optional[typing.Sequence[ConversationEntry]] bot_script : typing.Optional[str] - selected_model : typing.Optional[VideoBotsStreamCreateRequestSelectedModel] + selected_model : typing.Optional[CreateStreamRequestSelectedModel] document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) @@ -165,7 +165,7 @@ def video_bots_stream_create( keyword_instructions : typing.Optional[str] - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] max_references : typing.Optional[int] @@ -173,7 +173,7 @@ def video_bots_stream_create( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel] + embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -181,17 +181,17 @@ def video_bots_stream_create( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[VideoBotsStreamCreateRequestCitationStyle] + citation_style : typing.Optional[CreateStreamRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[VideoBotsStreamCreateRequestAsrModel] + asr_model : typing.Optional[CreateStreamRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[VideoBotsStreamCreateRequestTranslationModel] + translation_model : typing.Optional[CreateStreamRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -206,9 +206,9 @@ def video_bots_stream_create( Translation Glossary for LLM Language (English) -> User Langauge - lipsync_model : typing.Optional[VideoBotsStreamCreateRequestLipsyncModel] + lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel] - tools : typing.Optional[typing.List[LlmTools]] + tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). avoid_repetition : typing.Optional[bool] @@ -221,9 +221,9 @@ def video_bots_stream_create( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[VideoBotsStreamCreateRequestResponseFormatType] + response_format_type : typing.Optional[CreateStreamRequestResponseFormatType] - tts_provider : typing.Optional[VideoBotsStreamCreateRequestTtsProvider] + tts_provider : typing.Optional[CreateStreamRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -256,9 +256,9 @@ def video_bots_stream_create( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel] input_face : typing.Optional[str] @@ -297,7 +297,7 @@ def video_bots_stream_create( _response = self._client_wrapper.httpx_client.request( "v3/integrations/stream", method="POST", - data={ + json={ "integration_id": integration_id, "conversation_id": conversation_id, "user_id": user_id, @@ -365,20 +365,37 @@ def video_bots_stream_create( "sadtalker_settings": sadtalker_settings, "input_text": input_text, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(CreateStreamResponse, parse_obj_as(type_=CreateStreamResponse, object_=_response.json())) # type: ignore + return typing.cast( + CreateStreamResponse, + parse_obj_as( + type_=CreateStreamResponse, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -413,18 +430,38 @@ def video_bots_stream( ) """ _response = self._client_wrapper.httpx_client.request( - f"v3/integrations/stream/{jsonable_encoder(request_id)}", method="GET", request_options=request_options + f"v3/integrations/stream/{jsonable_encoder(request_id)}", + method="GET", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsStreamResponse, parse_obj_as(type_=VideoBotsStreamResponse, object_=_response.json())) # type: ignore + return typing.cast( + VideoBotsStreamResponse, + parse_obj_as( + type_=VideoBotsStreamResponse, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -440,71 +477,71 @@ async def video_bots_stream_create( self, *, integration_id: str, - conversation_id: typing.Optional[str] = None, - user_id: typing.Optional[str] = None, - user_message_id: typing.Optional[str] = None, - button_pressed: typing.Optional[ButtonPressed] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_prompt: typing.Optional[str] = None, - input_audio: typing.Optional[str] = None, - input_images: typing.Optional[typing.List[str]] = None, - input_documents: typing.Optional[typing.List[str]] = None, - doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[ConversationEntry]] = None, - bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[VideoBotsStreamCreateRequestSelectedModel] = None, - document_model: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = None, - query_instructions: typing.Optional[str] = None, - keyword_instructions: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - max_references: typing.Optional[int] = None, - max_context_words: typing.Optional[int] = None, - scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel] = None, - dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[VideoBotsStreamCreateRequestCitationStyle] = None, - use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[VideoBotsStreamCreateRequestAsrModel] = None, - asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[VideoBotsStreamCreateRequestTranslationModel] = None, - user_language: typing.Optional[str] = None, - input_glossary_document: typing.Optional[str] = None, - output_glossary_document: typing.Optional[str] = None, - lipsync_model: typing.Optional[VideoBotsStreamCreateRequestLipsyncModel] = None, - tools: typing.Optional[typing.List[LlmTools]] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[VideoBotsStreamCreateRequestResponseFormatType] = None, - tts_provider: typing.Optional[VideoBotsStreamCreateRequestTtsProvider] = None, - uberduck_voice_name: typing.Optional[str] = None, - uberduck_speaking_rate: typing.Optional[float] = None, - google_voice_name: typing.Optional[str] = None, - google_speaking_rate: typing.Optional[float] = None, - google_pitch: typing.Optional[float] = None, - bark_history_prompt: typing.Optional[str] = None, - elevenlabs_voice_name: typing.Optional[str] = None, - elevenlabs_api_key: typing.Optional[str] = None, - elevenlabs_voice_id: typing.Optional[str] = None, - elevenlabs_model: typing.Optional[str] = None, - elevenlabs_stability: typing.Optional[float] = None, - elevenlabs_similarity_boost: typing.Optional[float] = None, - elevenlabs_style: typing.Optional[float] = None, - elevenlabs_speaker_boost: typing.Optional[bool] = None, - azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - input_text: typing.Optional[str] = None, + conversation_id: typing.Optional[str] = OMIT, + user_id: typing.Optional[str] = OMIT, + user_message_id: typing.Optional[str] = OMIT, + button_pressed: typing.Optional[ButtonPressed] = OMIT, + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + input_audio: typing.Optional[str] = OMIT, + input_images: typing.Optional[typing.Sequence[str]] = OMIT, + input_documents: typing.Optional[typing.Sequence[str]] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, + bot_script: typing.Optional[str] = OMIT, + selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT, + document_model: typing.Optional[str] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + keyword_instructions: typing.Optional[str] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT, + use_url_shortener: typing.Optional[bool] = OMIT, + asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT, + asr_language: typing.Optional[str] = OMIT, + translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT, + user_language: typing.Optional[str] = OMIT, + input_glossary_document: typing.Optional[str] = OMIT, + output_glossary_document: typing.Optional[str] = OMIT, + lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT, + tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT, + tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + input_text: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateStreamResponse: """ @@ -533,27 +570,27 @@ async def video_bots_stream_create( button_pressed : typing.Optional[ButtonPressed] The button that was pressed by the user. - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_prompt : typing.Optional[str] input_audio : typing.Optional[str] - input_images : typing.Optional[typing.List[str]] + input_images : typing.Optional[typing.Sequence[str]] - input_documents : typing.Optional[typing.List[str]] + input_documents : typing.Optional[typing.Sequence[str]] doc_extract_url : typing.Optional[str] Select a workflow to extract text from documents and images. - messages : typing.Optional[typing.List[ConversationEntry]] + messages : typing.Optional[typing.Sequence[ConversationEntry]] bot_script : typing.Optional[str] - selected_model : typing.Optional[VideoBotsStreamCreateRequestSelectedModel] + selected_model : typing.Optional[CreateStreamRequestSelectedModel] document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) @@ -564,7 +601,7 @@ async def video_bots_stream_create( keyword_instructions : typing.Optional[str] - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] max_references : typing.Optional[int] @@ -572,7 +609,7 @@ async def video_bots_stream_create( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[VideoBotsStreamCreateRequestEmbeddingModel] + embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -580,17 +617,17 @@ async def video_bots_stream_create( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[VideoBotsStreamCreateRequestCitationStyle] + citation_style : typing.Optional[CreateStreamRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[VideoBotsStreamCreateRequestAsrModel] + asr_model : typing.Optional[CreateStreamRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[VideoBotsStreamCreateRequestTranslationModel] + translation_model : typing.Optional[CreateStreamRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -605,9 +642,9 @@ async def video_bots_stream_create( Translation Glossary for LLM Language (English) -> User Langauge - lipsync_model : typing.Optional[VideoBotsStreamCreateRequestLipsyncModel] + lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel] - tools : typing.Optional[typing.List[LlmTools]] + tools : typing.Optional[typing.Sequence[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). avoid_repetition : typing.Optional[bool] @@ -620,9 +657,9 @@ async def video_bots_stream_create( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[VideoBotsStreamCreateRequestResponseFormatType] + response_format_type : typing.Optional[CreateStreamRequestResponseFormatType] - tts_provider : typing.Optional[VideoBotsStreamCreateRequestTtsProvider] + tts_provider : typing.Optional[CreateStreamRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -655,9 +692,9 @@ async def video_bots_stream_create( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[VideoBotsStreamCreateRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[VideoBotsStreamCreateRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel] input_face : typing.Optional[str] @@ -704,7 +741,7 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/integrations/stream", method="POST", - data={ + json={ "integration_id": integration_id, "conversation_id": conversation_id, "user_id": user_id, @@ -772,20 +809,37 @@ async def main() -> None: "sadtalker_settings": sadtalker_settings, "input_text": input_text, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(CreateStreamResponse, parse_obj_as(type_=CreateStreamResponse, object_=_response.json())) # type: ignore + return typing.cast( + CreateStreamResponse, + parse_obj_as( + type_=CreateStreamResponse, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -828,18 +882,38 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v3/integrations/stream/{jsonable_encoder(request_id)}", method="GET", request_options=request_options + f"v3/integrations/stream/{jsonable_encoder(request_id)}", + method="GET", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return typing.cast(VideoBotsStreamResponse, parse_obj_as(type_=VideoBotsStreamResponse, object_=_response.json())) # type: ignore + return typing.cast( + VideoBotsStreamResponse, + parse_obj_as( + type_=VideoBotsStreamResponse, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: diff --git a/src/gooey/copilot_integrations/types/__init__.py b/src/gooey/copilot_integrations/types/__init__.py index 6e93ace..1224051 100644 --- a/src/gooey/copilot_integrations/types/__init__.py +++ b/src/gooey/copilot_integrations/types/__init__.py @@ -1,27 +1,27 @@ # This file was auto-generated by Fern from our API Definition. -from .video_bots_stream_create_request_asr_model import VideoBotsStreamCreateRequestAsrModel -from .video_bots_stream_create_request_citation_style import VideoBotsStreamCreateRequestCitationStyle -from .video_bots_stream_create_request_embedding_model import VideoBotsStreamCreateRequestEmbeddingModel -from .video_bots_stream_create_request_lipsync_model import VideoBotsStreamCreateRequestLipsyncModel -from .video_bots_stream_create_request_openai_tts_model import VideoBotsStreamCreateRequestOpenaiTtsModel -from .video_bots_stream_create_request_openai_voice_name import VideoBotsStreamCreateRequestOpenaiVoiceName -from .video_bots_stream_create_request_response_format_type import VideoBotsStreamCreateRequestResponseFormatType -from .video_bots_stream_create_request_selected_model import VideoBotsStreamCreateRequestSelectedModel -from .video_bots_stream_create_request_translation_model import VideoBotsStreamCreateRequestTranslationModel -from .video_bots_stream_create_request_tts_provider import VideoBotsStreamCreateRequestTtsProvider +from .create_stream_request_asr_model import CreateStreamRequestAsrModel +from .create_stream_request_citation_style import CreateStreamRequestCitationStyle +from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel +from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel +from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel +from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName +from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType +from .create_stream_request_selected_model import CreateStreamRequestSelectedModel +from .create_stream_request_translation_model import CreateStreamRequestTranslationModel +from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider from .video_bots_stream_response import VideoBotsStreamResponse __all__ = [ - "VideoBotsStreamCreateRequestAsrModel", - "VideoBotsStreamCreateRequestCitationStyle", - "VideoBotsStreamCreateRequestEmbeddingModel", - "VideoBotsStreamCreateRequestLipsyncModel", - "VideoBotsStreamCreateRequestOpenaiTtsModel", - "VideoBotsStreamCreateRequestOpenaiVoiceName", - "VideoBotsStreamCreateRequestResponseFormatType", - "VideoBotsStreamCreateRequestSelectedModel", - "VideoBotsStreamCreateRequestTranslationModel", - "VideoBotsStreamCreateRequestTtsProvider", + "CreateStreamRequestAsrModel", + "CreateStreamRequestCitationStyle", + "CreateStreamRequestEmbeddingModel", + "CreateStreamRequestLipsyncModel", + "CreateStreamRequestOpenaiTtsModel", + "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", + "CreateStreamRequestSelectedModel", + "CreateStreamRequestTranslationModel", + "CreateStreamRequestTtsProvider", "VideoBotsStreamResponse", ] diff --git a/src/gooey/types/doc_summary_request_selected_asr_model.py b/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py similarity index 90% rename from src/gooey/types/doc_summary_request_selected_asr_model.py rename to src/gooey/copilot_integrations/types/create_stream_request_asr_model.py index 8b8a338..af166fa 100644 --- a/src/gooey/types/doc_summary_request_selected_asr_model.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py @@ -2,7 +2,7 @@ import typing -DocSummaryRequestSelectedAsrModel = typing.Union[ +CreateStreamRequestAsrModel = typing.Union[ typing.Literal[ "whisper_large_v2", "whisper_large_v3", diff --git a/src/gooey/types/rag_request_citation_style.py b/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py similarity index 90% rename from src/gooey/types/rag_request_citation_style.py rename to src/gooey/copilot_integrations/types/create_stream_request_citation_style.py index 521a218..e57bab1 100644 --- a/src/gooey/types/rag_request_citation_style.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py @@ -2,7 +2,7 @@ import typing -RagRequestCitationStyle = typing.Union[ +CreateStreamRequestCitationStyle = typing.Union[ typing.Literal[ "number", "title", diff --git a/src/gooey/types/embed_request_selected_model.py b/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py similarity index 87% rename from src/gooey/types/embed_request_selected_model.py rename to src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py index 91f89cd..cef26bf 100644 --- a/src/gooey/types/embed_request_selected_model.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py @@ -2,7 +2,7 @@ import typing -EmbedRequestSelectedModel = typing.Union[ +CreateStreamRequestEmbeddingModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py b/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py new file mode 100644 index 0000000..c207d45 --- /dev/null +++ b/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/text_to_speech_request_openai_tts_model.py b/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py similarity index 64% rename from src/gooey/types/text_to_speech_request_openai_tts_model.py rename to src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py index 29e0dbe..475ca67 100644 --- a/src/gooey/types/text_to_speech_request_openai_tts_model.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py @@ -2,4 +2,4 @@ import typing -TextToSpeechRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] +CreateStreamRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/text_to_speech_request_openai_voice_name.py b/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py similarity index 76% rename from src/gooey/types/text_to_speech_request_openai_voice_name.py rename to src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py index 495482a..4f3dd7a 100644 --- a/src/gooey/types/text_to_speech_request_openai_voice_name.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py @@ -2,6 +2,6 @@ import typing -TextToSpeechRequestOpenaiVoiceName = typing.Union[ +CreateStreamRequestOpenaiVoiceName = typing.Union[ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any ] diff --git a/src/gooey/types/web_search_llm_request_response_format_type.py b/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py similarity index 65% rename from src/gooey/types/web_search_llm_request_response_format_type.py rename to src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py index 4989a6b..dc5024d 100644 --- a/src/gooey/types/web_search_llm_request_response_format_type.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py @@ -2,4 +2,4 @@ import typing -WebSearchLlmRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] +CreateStreamRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/llm_request_selected_models_item.py b/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py similarity index 95% rename from src/gooey/types/llm_request_selected_models_item.py rename to src/gooey/copilot_integrations/types/create_stream_request_selected_model.py index 019f9b9..7227a94 100644 --- a/src/gooey/types/llm_request_selected_models_item.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py @@ -2,7 +2,7 @@ import typing -LlmRequestSelectedModelsItem = typing.Union[ +CreateStreamRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py b/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py new file mode 100644 index 0000000..3876937 --- /dev/null +++ b/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_request_tts_provider.py b/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py similarity index 79% rename from src/gooey/types/lipsync_tts_request_tts_provider.py rename to src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py index 1a23fe3..cad602d 100644 --- a/src/gooey/types/lipsync_tts_request_tts_provider.py +++ b/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py @@ -2,6 +2,6 @@ import typing -LipsyncTtsRequestTtsProvider = typing.Union[ +CreateStreamRequestTtsProvider = typing.Union[ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any ] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py deleted file mode 100644 index a499eec..0000000 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_lipsync_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsStreamCreateRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py b/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py deleted file mode 100644 index 05802c9..0000000 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsStreamCreateRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_response.py b/src/gooey/copilot_integrations/types/video_bots_stream_response.py index a8f1ad1..22f8858 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_response.py +++ b/src/gooey/copilot_integrations/types/video_bots_stream_response.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. import typing - from ...types.conversation_start import ConversationStart -from ...types.final_response import FinalResponse -from ...types.message_part import MessagePart from ...types.run_start import RunStart +from ...types.message_part import MessagePart +from ...types.final_response import FinalResponse from ...types.stream_error import StreamError VideoBotsStreamResponse = typing.Union[ConversationStart, RunStart, MessagePart, FinalResponse, StreamError] diff --git a/src/gooey/core/__init__.py b/src/gooey/core/__init__.py index 5a0bee3..4213c34 100644 --- a/src/gooey/core/__init__.py +++ b/src/gooey/core/__init__.py @@ -10,7 +10,6 @@ IS_PYDANTIC_V2, UniversalBaseModel, UniversalRootModel, - deep_union_pydantic_dicts, parse_obj_as, universal_field_validator, universal_root_validator, @@ -36,7 +35,6 @@ "UniversalRootModel", "convert_and_respect_annotation_metadata", "convert_file_dict_to_httpx_tuples", - "deep_union_pydantic_dicts", "encode_query", "jsonable_encoder", "parse_obj_as", diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index 7f52b8d..4793299 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. import typing - import httpx - -from .http_client import AsyncHttpClient, HttpClient +from .http_client import HttpClient +from .http_client import AsyncHttpClient class BaseClientWrapper: @@ -23,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1-beta6", + "X-Fern-SDK-Version": "0.0.1-beta7", } headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers diff --git a/src/gooey/core/file.py b/src/gooey/core/file.py index cb0d40b..6e0f92b 100644 --- a/src/gooey/core/file.py +++ b/src/gooey/core/file.py @@ -13,12 +13,17 @@ # (filename, file (or bytes), content_type) typing.Tuple[typing.Optional[str], FileContent, typing.Optional[str]], # (filename, file (or bytes), content_type, headers) - typing.Tuple[typing.Optional[str], FileContent, typing.Optional[str], typing.Mapping[str, str]], + typing.Tuple[ + typing.Optional[str], + FileContent, + typing.Optional[str], + typing.Mapping[str, str], + ], ] def convert_file_dict_to_httpx_tuples( - d: typing.Dict[str, typing.Union[File, typing.List[File]]] + d: typing.Dict[str, typing.Union[File, typing.List[File]]], ) -> typing.List[typing.Tuple[str, File]]: """ The format we use is a list of tuples, where the first element is the diff --git a/src/gooey/core/jsonable_encoder.py b/src/gooey/core/jsonable_encoder.py index 9251cd5..1b631e9 100644 --- a/src/gooey/core/jsonable_encoder.py +++ b/src/gooey/core/jsonable_encoder.py @@ -19,7 +19,11 @@ import pydantic from .datetime_utils import serialize_datetime -from .pydantic_utilities import IS_PYDANTIC_V2, encode_by_type, to_jsonable_with_fallback +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + encode_by_type, + to_jsonable_with_fallback, +) SetIntStr = Set[Union[int, str]] DictIntStrAny = Dict[Union[int, str], Any] @@ -48,7 +52,7 @@ def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any] obj_dict = obj_dict["root"] return jsonable_encoder(obj_dict, custom_encoder=encoder) if dataclasses.is_dataclass(obj): - obj_dict = dataclasses.asdict(obj) + obj_dict = dataclasses.asdict(obj) # type: ignore return jsonable_encoder(obj_dict, custom_encoder=custom_encoder) if isinstance(obj, bytes): return base64.b64encode(obj).decode("utf-8") diff --git a/src/gooey/core/pydantic_utilities.py b/src/gooey/core/pydantic_utilities.py index 7c5418b..eb42918 100644 --- a/src/gooey/core/pydantic_utilities.py +++ b/src/gooey/core/pydantic_utilities.py @@ -4,7 +4,8 @@ import datetime as dt import typing from collections import defaultdict -from functools import wraps + +import typing_extensions import pydantic @@ -27,11 +28,15 @@ from pydantic.v1.typing import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 get_args as get_args, ) - from pydantic.v1.typing import get_origin as get_origin # pyright: ignore[reportMissingImports] # Pydantic v2 + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + get_origin as get_origin, + ) from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 is_literal_type as is_literal_type, ) - from pydantic.v1.typing import is_union as is_union # pyright: ignore[reportMissingImports] # Pydantic v2 + from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2 + is_union as is_union, + ) from pydantic.v1.fields import ModelField as ModelField # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2 else: from pydantic.datetime_parse import parse_date as parse_date # type: ignore # Pydantic v1 @@ -50,19 +55,6 @@ Model = typing.TypeVar("Model", bound=pydantic.BaseModel) -def deep_union_pydantic_dicts( - source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any] -) -> typing.Dict[str, typing.Any]: - for key, value in source.items(): - if isinstance(value, dict): - node = destination.setdefault(key, {}) - deep_union_pydantic_dicts(value, node) - else: - destination[key] = value - - return destination - - def parse_obj_as(type_: typing.Type[T], object_: typing.Any) -> T: if IS_PYDANTIC_V2: adapter = pydantic.TypeAdapter(type_) # type: ignore # Pydantic v2 @@ -90,42 +82,61 @@ class Config: json_encoders = {dt.datetime: serialize_datetime} def json(self, **kwargs: typing.Any) -> str: - kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} + kwargs_with_defaults: typing.Any = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } if IS_PYDANTIC_V2: return super().model_dump_json(**kwargs_with_defaults) # type: ignore # Pydantic v2 else: return super().json(**kwargs_with_defaults) def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: - kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs} - kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs} + """ + Override the default dict method to `exclude_unset` by default. This function patches + `exclude_unset` to work include fields within non-None default values. + """ + _fields_set = self.__fields_set__ + + fields = _get_model_fields(self.__class__) + for name, field in fields.items(): + if name not in _fields_set: + default = _get_field_default(field) + + # If the default values are non-null act like they've been set + # This effectively allows exclude_unset to work like exclude_none where + # the latter passes through intentionally set none values. + if default != None: + _fields_set.add(name) + + kwargs_with_defaults_exclude_unset: typing.Any = { + "by_alias": True, + "exclude_unset": True, + "include": _fields_set, + **kwargs, + } if IS_PYDANTIC_V2: - return deep_union_pydantic_dicts( - super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore # Pydantic v2 - super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore # Pydantic v2 - ) + return super().model_dump(**kwargs_with_defaults_exclude_unset) # type: ignore # Pydantic v2 else: - return deep_union_pydantic_dicts( - super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none) - ) + return super().dict(**kwargs_with_defaults_exclude_unset) -UniversalRootModel: typing.Type[typing.Any] if IS_PYDANTIC_V2: class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore # Pydantic v2 pass - UniversalRootModel = V2RootModel + UniversalRootModel: typing_extensions.TypeAlias = V2RootModel # type: ignore else: - UniversalRootModel = UniversalBaseModel + UniversalRootModel: typing_extensions.TypeAlias = UniversalBaseModel # type: ignore def encode_by_type(o: typing.Any) -> typing.Any: - encoders_by_class_tuples: typing.Dict[ - typing.Callable[[typing.Any], typing.Any], typing.Tuple[typing.Any, ...] - ] = defaultdict(tuple) + encoders_by_class_tuples: typing.Dict[typing.Callable[[typing.Any], typing.Any], typing.Tuple[typing.Any, ...]] = ( + defaultdict(tuple) + ) for type_, encoder in encoders_by_type.items(): encoders_by_class_tuples[encoder] += (type_,) @@ -136,44 +147,60 @@ def encode_by_type(o: typing.Any) -> typing.Any: return encoder(o) -def update_forward_refs(model: typing.Type["Model"], **localns: typing.Any) -> None: +def update_forward_refs(model: typing.Type["Model"]) -> None: if IS_PYDANTIC_V2: - model.model_rebuild(force=True, raise_errors=False) # type: ignore # Pydantic v2 + model.model_rebuild(raise_errors=False) # type: ignore # Pydantic v2 else: - model.update_forward_refs(**localns) + model.update_forward_refs() # Mirrors Pydantic's internal typing AnyCallable = typing.Callable[..., typing.Any] -def universal_root_validator(pre: bool = False) -> typing.Callable[[AnyCallable], AnyCallable]: +def universal_root_validator( + pre: bool = False, +) -> typing.Callable[[AnyCallable], AnyCallable]: def decorator(func: AnyCallable) -> AnyCallable: - @wraps(func) - def validate(*args: typing.Any, **kwargs: typing.Any) -> AnyCallable: - if IS_PYDANTIC_V2: - wrapped_func = pydantic.model_validator("before" if pre else "after")(func) # type: ignore # Pydantic v2 - else: - wrapped_func = pydantic.root_validator(pre=pre)(func) # type: ignore # Pydantic v1 - - return wrapped_func(*args, **kwargs) - - return validate + if IS_PYDANTIC_V2: + return pydantic.model_validator(mode="before" if pre else "after")(func) # type: ignore # Pydantic v2 + else: + return pydantic.root_validator(pre=pre)(func) # type: ignore # Pydantic v1 return decorator def universal_field_validator(field_name: str, pre: bool = False) -> typing.Callable[[AnyCallable], AnyCallable]: def decorator(func: AnyCallable) -> AnyCallable: - @wraps(func) - def validate(*args: typing.Any, **kwargs: typing.Any) -> AnyCallable: - if IS_PYDANTIC_V2: - wrapped_func = pydantic.field_validator(field_name, mode="before" if pre else "after")(func) # type: ignore # Pydantic v2 - else: - wrapped_func = pydantic.validator(field_name, pre=pre)(func) + if IS_PYDANTIC_V2: + return pydantic.field_validator(field_name, mode="before" if pre else "after")(func) # type: ignore # Pydantic v2 + else: + return pydantic.validator(field_name, pre=pre)(func) # type: ignore # Pydantic v1 - return wrapped_func(*args, **kwargs) + return decorator - return validate - return decorator +PydanticField = typing.Union[ModelField, pydantic.fields.FieldInfo] + + +def _get_model_fields( + model: typing.Type["Model"], +) -> typing.Mapping[str, PydanticField]: + if IS_PYDANTIC_V2: + return model.model_fields # type: ignore # Pydantic v2 + else: + return model.__fields__ # type: ignore # Pydantic v1 + + +def _get_field_default(field: PydanticField) -> typing.Any: + try: + value = field.get_default() # type: ignore # Pydantic < v1.10.15 + except: + value = field.default + if IS_PYDANTIC_V2: + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + return value diff --git a/src/gooey/core/query_encoder.py b/src/gooey/core/query_encoder.py index 24076d7..3183001 100644 --- a/src/gooey/core/query_encoder.py +++ b/src/gooey/core/query_encoder.py @@ -1,33 +1,58 @@ # This file was auto-generated by Fern from our API Definition. -from collections import ChainMap -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional, Tuple import pydantic # Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict -def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[str] = None) -> Dict[str, Any]: - result = {} +def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[str] = None) -> List[Tuple[str, Any]]: + result = [] for k, v in dict_flat.items(): key = f"{key_prefix}[{k}]" if key_prefix is not None else k if isinstance(v, dict): - result.update(traverse_query_dict(v, key)) + result.extend(traverse_query_dict(v, key)) + elif isinstance(v, list): + for arr_v in v: + if isinstance(arr_v, dict): + result.extend(traverse_query_dict(arr_v, key)) + else: + result.append((key, arr_v)) else: - result[key] = v + result.append((key, v)) return result -def single_query_encoder(query_key: str, query_value: Any) -> Dict[str, Any]: +def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple[str, Any]]: if isinstance(query_value, pydantic.BaseModel) or isinstance(query_value, dict): if isinstance(query_value, pydantic.BaseModel): obj_dict = query_value.dict(by_alias=True) else: obj_dict = query_value return traverse_query_dict(obj_dict, query_key) + elif isinstance(query_value, list): + encoded_values: List[Tuple[str, Any]] = [] + for value in query_value: + if isinstance(value, pydantic.BaseModel) or isinstance(value, dict): + if isinstance(value, pydantic.BaseModel): + obj_dict = value.dict(by_alias=True) + elif isinstance(value, dict): + obj_dict = value - return {query_key: query_value} + encoded_values.extend(single_query_encoder(query_key, obj_dict)) + else: + encoded_values.append((query_key, value)) + return encoded_values -def encode_query(query: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: - return dict(ChainMap(*[single_query_encoder(k, v) for k, v in query.items()])) if query is not None else None + return [(query_key, query_value)] + + +def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple[str, Any]]]: + if query is None: + return None + + encoded_query = [] + for k, v in query.items(): + encoded_query.extend(single_query_encoder(k, v)) + return encoded_query diff --git a/src/gooey/core/serialization.py b/src/gooey/core/serialization.py index 8ad5cf8..36180ac 100644 --- a/src/gooey/core/serialization.py +++ b/src/gooey/core/serialization.py @@ -25,7 +25,10 @@ def __init__(self, *, alias: str) -> None: def convert_and_respect_annotation_metadata( - *, object_: typing.Any, annotation: typing.Any, inner_type: typing.Optional[typing.Any] = None + *, + object_: typing.Any, + annotation: typing.Any, + inner_type: typing.Optional[typing.Any] = None, ) -> typing.Any: """ Respect the metadata annotations on a field, such as aliasing. This function effectively diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py b/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py deleted file mode 100644 index da651fc..0000000 --- a/src/gooey/create_a_perfect_seo_optimized_title_paragraph/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.seo_summary_page_status_response import SeoSummaryPageStatusResponse - - -class CreateAPerfectSeoOptimizedTitleParagraphClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_seo_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SeoSummaryPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SEOSummary/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCreateAPerfectSeoOptimizedTitleParagraphClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_seo_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SeoSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SeoSummaryPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.create_a_perfect_seo_optimized_title_paragraph.status_seo_summary( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SeoSummaryPageStatusResponse, parse_obj_as(type_=SeoSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/edit_an_image_with_ai_prompt/__init__.py b/src/gooey/edit_an_image_with_ai_prompt/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/edit_an_image_with_ai_prompt/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/edit_an_image_with_ai_prompt/client.py b/src/gooey/edit_an_image_with_ai_prompt/client.py deleted file mode 100644 index c8b36ee..0000000 --- a/src/gooey/edit_an_image_with_ai_prompt/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.img2img_page_status_response import Img2ImgPageStatusResponse - - -class EditAnImageWithAiPromptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_img2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Img2ImgPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.edit_an_image_with_ai_prompt.status_img2img( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncEditAnImageWithAiPromptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_img2img( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Img2ImgPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Img2ImgPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.edit_an_image_with_ai_prompt.status_img2img( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Img2Img/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Img2ImgPageStatusResponse, parse_obj_as(type_=Img2ImgPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/embeddings/__init__.py b/src/gooey/embeddings/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/embeddings/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/embeddings/client.py b/src/gooey/embeddings/client.py deleted file mode 100644 index 3cbe583..0000000 --- a/src/gooey/embeddings/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.embeddings_page_status_response import EmbeddingsPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class EmbeddingsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_embeddings( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmbeddingsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.embeddings.status_embeddings( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncEmbeddingsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_embeddings( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EmbeddingsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EmbeddingsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.embeddings.status_embeddings( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(EmbeddingsPageStatusResponse, parse_obj_as(type_=EmbeddingsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/errors/__init__.py b/src/gooey/errors/__init__.py index 459b921..19ea9c4 100644 --- a/src/gooey/errors/__init__.py +++ b/src/gooey/errors/__init__.py @@ -1,15 +1,7 @@ # This file was auto-generated by Fern from our API Definition. -from .bad_request_error import BadRequestError -from .internal_server_error import InternalServerError from .payment_required_error import PaymentRequiredError from .too_many_requests_error import TooManyRequestsError from .unprocessable_entity_error import UnprocessableEntityError -__all__ = [ - "BadRequestError", - "InternalServerError", - "PaymentRequiredError", - "TooManyRequestsError", - "UnprocessableEntityError", -] +__all__ = ["PaymentRequiredError", "TooManyRequestsError", "UnprocessableEntityError"] diff --git a/src/gooey/errors/bad_request_error.py b/src/gooey/errors/bad_request_error.py deleted file mode 100644 index 02f5144..0000000 --- a/src/gooey/errors/bad_request_error.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.api_error import ApiError -from ..types.generic_error_response import GenericErrorResponse - - -class BadRequestError(ApiError): - def __init__(self, body: GenericErrorResponse): - super().__init__(status_code=400, body=body) diff --git a/src/gooey/errors/internal_server_error.py b/src/gooey/errors/internal_server_error.py deleted file mode 100644 index 3be52c0..0000000 --- a/src/gooey/errors/internal_server_error.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.api_error import ApiError -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 - - -class InternalServerError(ApiError): - def __init__(self, body: FailedReponseModelV2): - super().__init__(status_code=500, body=body) diff --git a/src/gooey/errors/payment_required_error.py b/src/gooey/errors/payment_required_error.py index b0cc099..81da343 100644 --- a/src/gooey/errors/payment_required_error.py +++ b/src/gooey/errors/payment_required_error.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - from ..core.api_error import ApiError +import typing class PaymentRequiredError(ApiError): - def __init__(self, body: typing.Any): + def __init__(self, body: typing.Optional[typing.Any]): super().__init__(status_code=402, body=body) diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py index 753a030..7ceefb0 100644 --- a/src/gooey/evaluator/__init__.py +++ b/src/gooey/evaluator/__init__.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from .types import AsyncFormBulkEvalRequestResponseFormatType, AsyncFormBulkEvalRequestSelectedModel +from .types import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel -__all__ = ["AsyncFormBulkEvalRequestResponseFormatType", "AsyncFormBulkEvalRequestSelectedModel"] +__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"] diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py index e2a469c..e3734ec 100644 --- a/src/gooey/evaluator/client.py +++ b/src/gooey/evaluator/client.py @@ -1,27 +1,24 @@ # This file was auto-generated by Fern from our API Definition. import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as +from ..core.client_wrapper import SyncClientWrapper +from ..types.recipe_function import RecipeFunction +from ..types.eval_prompt import EvalPrompt +from ..types.agg_function import AggFunction +from .types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel +from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from ..types.run_settings import RunSettings from ..core.request_options import RequestOptions -from ..errors.bad_request_error import BadRequestError -from ..errors.internal_server_error import InternalServerError +from ..types.bulk_eval_page_status_response import BulkEvalPageStatusResponse +from ..core.pydantic_utilities import parse_obj_as from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.agg_function import AggFunction -from ..types.bulk_eval_page_status_response import BulkEvalPageStatusResponse -from ..types.eval_prompt import EvalPrompt -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from .types.async_form_bulk_eval_request_response_format_type import AsyncFormBulkEvalRequestResponseFormatType -from .types.async_form_bulk_eval_request_selected_model import AsyncFormBulkEvalRequestSelectedModel +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -31,29 +28,29 @@ class EvaluatorClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def async_form_bulk_eval( + def async_bulk_eval( self, *, - documents: typing.List[str], + documents: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None, - agg_functions: typing.Optional[typing.List[AggFunction]] = None, - selected_model: typing.Optional[AsyncFormBulkEvalRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncFormBulkEvalRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, + selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> BulkEvalPageStatusResponse: """ Parameters ---------- - documents : typing.List[str] + documents : typing.Sequence[str] Upload or link to a CSV or google sheet that contains your sample input data. For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. @@ -62,23 +59,23 @@ def async_form_bulk_eval( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - eval_prompts : typing.Optional[typing.List[EvalPrompt]] + eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. _The `columns` dictionary can be used to reference the spreadsheet columns._ - agg_functions : typing.Optional[typing.List[AggFunction]] + agg_functions : typing.Optional[typing.Sequence[AggFunction]] Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - selected_model : typing.Optional[AsyncFormBulkEvalRequestSelectedModel] + selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -90,7 +87,7 @@ def async_form_bulk_eval( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncFormBulkEvalRequestResponseFormatType] + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -109,15 +106,17 @@ def async_form_bulk_eval( client = Gooey( api_key="YOUR_API_KEY", ) - client.evaluator.async_form_bulk_eval( + client.evaluator.async_bulk_eval( documents=["documents"], ) """ _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async/form", + "v3/bulk-eval/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "documents": documents, @@ -132,82 +131,47 @@ def async_form_bulk_eval( "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + BulkEvalPageStatusResponse, + parse_obj_as( + type_=BulkEvalPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_bulk_eval( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkEvalPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.evaluator.status_bulk_eval( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -219,29 +183,29 @@ class AsyncEvaluatorClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def async_form_bulk_eval( + async def async_bulk_eval( self, *, - documents: typing.List[str], + documents: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - eval_prompts: typing.Optional[typing.List[EvalPrompt]] = None, - agg_functions: typing.Optional[typing.List[AggFunction]] = None, - selected_model: typing.Optional[AsyncFormBulkEvalRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncFormBulkEvalRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, + selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> BulkEvalPageStatusResponse: """ Parameters ---------- - documents : typing.List[str] + documents : typing.Sequence[str] Upload or link to a CSV or google sheet that contains your sample input data. For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. @@ -250,23 +214,23 @@ async def async_form_bulk_eval( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - eval_prompts : typing.Optional[typing.List[EvalPrompt]] + eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. _The `columns` dictionary can be used to reference the spreadsheet columns._ - agg_functions : typing.Optional[typing.List[AggFunction]] + agg_functions : typing.Optional[typing.Sequence[AggFunction]] Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - selected_model : typing.Optional[AsyncFormBulkEvalRequestSelectedModel] + selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -278,7 +242,7 @@ async def async_form_bulk_eval( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncFormBulkEvalRequestResponseFormatType] + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -302,7 +266,7 @@ async def async_form_bulk_eval( async def main() -> None: - await client.evaluator.async_form_bulk_eval( + await client.evaluator.async_bulk_eval( documents=["documents"], ) @@ -310,10 +274,12 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async/form", + "v3/bulk-eval/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "documents": documents, @@ -328,90 +294,47 @@ async def main() -> None: "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + BulkEvalPageStatusResponse, + parse_obj_as( + type_=BulkEvalPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_bulk_eval( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> BulkEvalPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.evaluator.status_bulk_eval( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(BulkEvalPageStatusResponse, parse_obj_as(type_=BulkEvalPageStatusResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: diff --git a/src/gooey/evaluator/types/__init__.py b/src/gooey/evaluator/types/__init__.py index 5d6d502..67f1384 100644 --- a/src/gooey/evaluator/types/__init__.py +++ b/src/gooey/evaluator/types/__init__.py @@ -1,6 +1,6 @@ # This file was auto-generated by Fern from our API Definition. -from .async_form_bulk_eval_request_response_format_type import AsyncFormBulkEvalRequestResponseFormatType -from .async_form_bulk_eval_request_selected_model import AsyncFormBulkEvalRequestSelectedModel +from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel -__all__ = ["AsyncFormBulkEvalRequestResponseFormatType", "AsyncFormBulkEvalRequestSelectedModel"] +__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"] diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py b/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py new file mode 100644 index 0000000..f1c242f --- /dev/null +++ b/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BulkEvalPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_request_selected_model.py b/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py similarity index 95% rename from src/gooey/types/doc_summary_request_selected_model.py rename to src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py index db13c45..853cf33 100644 --- a/src/gooey/types/doc_summary_request_selected_model.py +++ b/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -DocSummaryRequestSelectedModel = typing.Union[ +BulkEvalPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py index 6daa1ec..0479229 100644 --- a/src/gooey/functions/client.py +++ b/src/gooey/functions/client.py @@ -1,22 +1,19 @@ # This file was auto-generated by Fern from our API Definition. import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as +from ..core.client_wrapper import SyncClientWrapper +from ..types.run_settings import RunSettings from ..core.request_options import RequestOptions -from ..errors.bad_request_error import BadRequestError -from ..errors.internal_server_error import InternalServerError +from ..types.functions_page_status_response import FunctionsPageStatusResponse +from ..core.pydantic_utilities import parse_obj_as from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.functions_page_status_response import FunctionsPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError -from ..types.run_settings import RunSettings +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -26,14 +23,14 @@ class FunctionsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def async_form_functions( + def async_functions( self, *, example_id: typing.Optional[str] = None, - code: typing.Optional[str] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + code: typing.Optional[str] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> FunctionsPageStatusResponse: """ Parameters @@ -43,7 +40,7 @@ def async_form_functions( code : typing.Optional[str] The JS code to be executed. - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used in the code settings : typing.Optional[RunSettings] @@ -63,89 +60,60 @@ def async_form_functions( client = Gooey( api_key="YOUR_API_KEY", ) - client.functions.async_form_functions() + client.functions.async_functions() """ _response = self._client_wrapper.httpx_client.request( - "v3/functions/async/form", + "v3/functions/async", method="POST", - params={"example_id": example_id}, - data={"code": code, "variables": variables, "settings": settings}, - files={}, + params={ + "example_id": example_id, + }, + json={ + "code": code, + "variables": variables, + "settings": settings, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + FunctionsPageStatusResponse, + parse_obj_as( + type_=FunctionsPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_functions( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FunctionsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.functions.status_functions( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/functions/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -157,14 +125,14 @@ class AsyncFunctionsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def async_form_functions( + async def async_functions( self, *, example_id: typing.Optional[str] = None, - code: typing.Optional[str] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + code: typing.Optional[str] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> FunctionsPageStatusResponse: """ Parameters @@ -174,7 +142,7 @@ async def async_form_functions( code : typing.Optional[str] The JS code to be executed. - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used in the code settings : typing.Optional[RunSettings] @@ -199,100 +167,63 @@ async def async_form_functions( async def main() -> None: - await client.functions.async_form_functions() + await client.functions.async_functions() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/functions/async/form", + "v3/functions/async", method="POST", - params={"example_id": example_id}, - data={"code": code, "variables": variables, "settings": settings}, - files={}, + params={ + "example_id": example_id, + }, + json={ + "code": code, + "variables": variables, + "settings": settings, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + FunctionsPageStatusResponse, + parse_obj_as( + type_=FunctionsPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_functions( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FunctionsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.functions.status_functions( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/functions/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(FunctionsPageStatusResponse, parse_obj_as(type_=FunctionsPageStatusResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: diff --git a/src/gooey/generate_people_also_ask_seo_content/__init__.py b/src/gooey/generate_people_also_ask_seo_content/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/generate_people_also_ask_seo_content/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/generate_people_also_ask_seo_content/client.py b/src/gooey/generate_people_also_ask_seo_content/client.py deleted file mode 100644 index 2db9847..0000000 --- a/src/gooey/generate_people_also_ask_seo_content/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse - - -class GeneratePeopleAlsoAskSeoContentClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_related_qna_maker( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.generate_people_also_ask_seo_content.status_related_qna_maker( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncGeneratePeopleAlsoAskSeoContentClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_related_qna_maker( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnAPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnAPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_people_also_ask_seo_content.status_related_qna_maker( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnAPageStatusResponse, parse_obj_as(type_=RelatedQnAPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/generate_product_photo_backgrounds/__init__.py b/src/gooey/generate_product_photo_backgrounds/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/generate_product_photo_backgrounds/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/generate_product_photo_backgrounds/client.py b/src/gooey/generate_product_photo_backgrounds/client.py deleted file mode 100644 index 2869c08..0000000 --- a/src/gooey/generate_product_photo_backgrounds/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse - - -class GenerateProductPhotoBackgroundsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_object_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ObjectInpaintingPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.generate_product_photo_backgrounds.status_object_inpainting( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncGenerateProductPhotoBackgroundsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_object_inpainting( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ObjectInpaintingPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ObjectInpaintingPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.generate_product_photo_backgrounds.status_object_inpainting( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(ObjectInpaintingPageStatusResponse, parse_obj_as(type_=ObjectInpaintingPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/large_language_models_gpt3/__init__.py b/src/gooey/large_language_models_gpt3/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/large_language_models_gpt3/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/large_language_models_gpt3/client.py b/src/gooey/large_language_models_gpt3/client.py deleted file mode 100644 index 9c167fd..0000000 --- a/src/gooey/large_language_models_gpt3/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.compare_llm_page_status_response import CompareLlmPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class LargeLanguageModelsGpt3Client: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_compare_llm( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareLlmPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.large_language_models_gpt3.status_compare_llm( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/CompareLLM/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLargeLanguageModelsGpt3Client: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_compare_llm( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> CompareLlmPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CompareLlmPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.large_language_models_gpt3.status_compare_llm( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(CompareLlmPageStatusResponse, parse_obj_as(type_=CompareLlmPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/letter_writer/__init__.py b/src/gooey/letter_writer/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/letter_writer/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/letter_writer/client.py b/src/gooey/letter_writer/client.py deleted file mode 100644 index 75b6be2..0000000 --- a/src/gooey/letter_writer/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.letter_writer_page_status_response import LetterWriterPageStatusResponse - - -class LetterWriterClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_letter_writer( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.letter_writer.status_letter_writer( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LetterWriter/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LetterWriterPageStatusResponse, parse_obj_as(type_=LetterWriterPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLetterWriterClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_letter_writer( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LetterWriterPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LetterWriterPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.letter_writer.status_letter_writer( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LetterWriter/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LetterWriterPageStatusResponse, parse_obj_as(type_=LetterWriterPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py index 4575c3f..4d094b1 100644 --- a/src/gooey/lip_syncing/__init__.py +++ b/src/gooey/lip_syncing/__init__.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from .types import AsyncFormLipsyncRequestSelectedModel +from .types import LipsyncPageRequestSelectedModel -__all__ = ["AsyncFormLipsyncRequestSelectedModel"] +__all__ = ["LipsyncPageRequestSelectedModel"] diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py index 292dd9f..1ece28c 100644 --- a/src/gooey/lip_syncing/client.py +++ b/src/gooey/lip_syncing/client.py @@ -1,25 +1,22 @@ # This file was auto-generated by Fern from our API Definition. import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as +from ..core.client_wrapper import SyncClientWrapper +from ..types.recipe_function import RecipeFunction +from ..types.sad_talker_settings import SadTalkerSettings +from .types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel +from ..types.run_settings import RunSettings from ..core.request_options import RequestOptions -from ..errors.bad_request_error import BadRequestError -from ..errors.internal_server_error import InternalServerError +from ..types.lipsync_page_status_response import LipsyncPageStatusResponse +from ..core.pydantic_utilities import parse_obj_as from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError -from ..types.lipsync_page_status_response import LipsyncPageStatusResponse -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.sad_talker_settings import SadTalkerSettings -from .types.async_form_lipsync_request_selected_model import AsyncFormLipsyncRequestSelectedModel +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -29,31 +26,31 @@ class LipSyncingClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def async_form_lipsync( + def async_lipsync( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[AsyncFormLipsyncRequestSelectedModel] = None, - input_audio: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT, + input_audio: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> LipsyncPageStatusResponse: """ Parameters ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_face : typing.Optional[str] @@ -68,7 +65,7 @@ def async_form_lipsync( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[AsyncFormLipsyncRequestSelectedModel] + selected_model : typing.Optional[LipsyncPageRequestSelectedModel] input_audio : typing.Optional[str] @@ -89,13 +86,15 @@ def async_form_lipsync( client = Gooey( api_key="YOUR_API_KEY", ) - client.lip_syncing.async_form_lipsync() + client.lip_syncing.async_lipsync() """ _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/async/form", + "v3/Lipsync/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_face": input_face, @@ -108,82 +107,47 @@ def async_form_lipsync( "input_audio": input_audio, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + LipsyncPageStatusResponse, + parse_obj_as( + type_=LipsyncPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_lipsync( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.lip_syncing.status_lipsync( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -195,31 +159,31 @@ class AsyncLipSyncingClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def async_form_lipsync( + async def async_lipsync( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - input_face: typing.Optional[str] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[AsyncFormLipsyncRequestSelectedModel] = None, - input_audio: typing.Optional[str] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + input_face: typing.Optional[str] = OMIT, + face_padding_top: typing.Optional[int] = OMIT, + face_padding_bottom: typing.Optional[int] = OMIT, + face_padding_left: typing.Optional[int] = OMIT, + face_padding_right: typing.Optional[int] = OMIT, + sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, + selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT, + input_audio: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> LipsyncPageStatusResponse: """ Parameters ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_face : typing.Optional[str] @@ -234,7 +198,7 @@ async def async_form_lipsync( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[AsyncFormLipsyncRequestSelectedModel] + selected_model : typing.Optional[LipsyncPageRequestSelectedModel] input_audio : typing.Optional[str] @@ -260,16 +224,18 @@ async def async_form_lipsync( async def main() -> None: - await client.lip_syncing.async_form_lipsync() + await client.lip_syncing.async_lipsync() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/async/form", + "v3/Lipsync/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_face": input_face, @@ -282,90 +248,47 @@ async def main() -> None: "input_audio": input_audio, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + LipsyncPageStatusResponse, + parse_obj_as( + type_=LipsyncPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_lipsync( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lip_syncing.status_lipsync( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncPageStatusResponse, parse_obj_as(type_=LipsyncPageStatusResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: diff --git a/src/gooey/lip_syncing/types/__init__.py b/src/gooey/lip_syncing/types/__init__.py index 230913e..e7e3b85 100644 --- a/src/gooey/lip_syncing/types/__init__.py +++ b/src/gooey/lip_syncing/types/__init__.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from .async_form_lipsync_request_selected_model import AsyncFormLipsyncRequestSelectedModel +from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel -__all__ = ["AsyncFormLipsyncRequestSelectedModel"] +__all__ = ["LipsyncPageRequestSelectedModel"] diff --git a/src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py b/src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py deleted file mode 100644 index 4aeb464..0000000 --- a/src/gooey/lip_syncing/types/async_form_lipsync_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncFormLipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py b/src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py new file mode 100644 index 0000000..da68ef8 --- /dev/null +++ b/src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/lipsync_video_with_any_text/__init__.py b/src/gooey/lipsync_video_with_any_text/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/lipsync_video_with_any_text/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/lipsync_video_with_any_text/client.py b/src/gooey/lipsync_video_with_any_text/client.py deleted file mode 100644 index ccfe50a..0000000 --- a/src/gooey/lipsync_video_with_any_text/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse - - -class LipsyncVideoWithAnyTextClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_lipsync_tts( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.lipsync_video_with_any_text.status_lipsync_tts( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLipsyncVideoWithAnyTextClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_lipsync_tts( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> LipsyncTtsPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lipsync_video_with_any_text.status_lipsync_tts( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(LipsyncTtsPageStatusResponse, parse_obj_as(type_=LipsyncTtsPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/misc/client.py b/src/gooey/misc/client.py index bdf585b..8d6d2c2 100644 --- a/src/gooey/misc/client.py +++ b/src/gooey/misc/client.py @@ -1,17 +1,17 @@ # This file was auto-generated by Fern from our API Definition. import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as +from ..core.client_wrapper import SyncClientWrapper from ..core.request_options import RequestOptions -from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.balance_response import BalanceResponse +from ..core.pydantic_utilities import parse_obj_as +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.reply_button import ReplyButton from ..types.bot_broadcast_filters import BotBroadcastFilters +from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError -from ..types.reply_button import ReplyButton +from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -43,11 +43,19 @@ def get_balance(self, *, request_options: typing.Optional[RequestOptions] = None client.misc.get_balance() """ _response = self._client_wrapper.httpx_client.request( - "v1/balance/", method="GET", request_options=request_options + "v1/balance/", + method="GET", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return typing.cast(BalanceResponse, parse_obj_as(type_=BalanceResponse, object_=_response.json())) # type: ignore + return typing.cast( + BalanceResponse, + parse_obj_as( + type_=BalanceResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -59,13 +67,13 @@ def video_bots_broadcast( text: str, example_id: typing.Optional[str] = None, run_id: typing.Optional[str] = None, - audio: typing.Optional[str] = None, - video: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - buttons: typing.Optional[typing.List[ReplyButton]] = None, - filters: typing.Optional[BotBroadcastFilters] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + audio: typing.Optional[str] = OMIT, + video: typing.Optional[str] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + buttons: typing.Optional[typing.Sequence[ReplyButton]] = OMIT, + filters: typing.Optional[BotBroadcastFilters] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: """ Parameters ---------- @@ -82,10 +90,10 @@ def video_bots_broadcast( video : typing.Optional[str] Video URL to send to all users - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] Video URL to send to all users - buttons : typing.Optional[typing.List[ReplyButton]] + buttons : typing.Optional[typing.Sequence[ReplyButton]] Buttons to send to all users filters : typing.Optional[BotBroadcastFilters] @@ -96,7 +104,7 @@ def video_bots_broadcast( Returns ------- - typing.Any + typing.Optional[typing.Any] Successful Response Examples @@ -113,8 +121,11 @@ def video_bots_broadcast( _response = self._client_wrapper.httpx_client.request( "v2/video-bots/broadcast/send/", method="POST", - params={"example_id": example_id, "run_id": run_id}, - data={ + params={ + "example_id": example_id, + "run_id": run_id, + }, + json={ "text": text, "audio": audio, "video": video, @@ -122,16 +133,27 @@ def video_bots_broadcast( "buttons": buttons, "filters": filters, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -173,11 +195,19 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/balance/", method="GET", request_options=request_options + "v1/balance/", + method="GET", + request_options=request_options, ) try: if 200 <= _response.status_code < 300: - return typing.cast(BalanceResponse, parse_obj_as(type_=BalanceResponse, object_=_response.json())) # type: ignore + return typing.cast( + BalanceResponse, + parse_obj_as( + type_=BalanceResponse, # type: ignore + object_=_response.json(), + ), + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -189,13 +219,13 @@ async def video_bots_broadcast( text: str, example_id: typing.Optional[str] = None, run_id: typing.Optional[str] = None, - audio: typing.Optional[str] = None, - video: typing.Optional[str] = None, - documents: typing.Optional[typing.List[str]] = None, - buttons: typing.Optional[typing.List[ReplyButton]] = None, - filters: typing.Optional[BotBroadcastFilters] = None, - request_options: typing.Optional[RequestOptions] = None - ) -> typing.Any: + audio: typing.Optional[str] = OMIT, + video: typing.Optional[str] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + buttons: typing.Optional[typing.Sequence[ReplyButton]] = OMIT, + filters: typing.Optional[BotBroadcastFilters] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Optional[typing.Any]: """ Parameters ---------- @@ -212,10 +242,10 @@ async def video_bots_broadcast( video : typing.Optional[str] Video URL to send to all users - documents : typing.Optional[typing.List[str]] + documents : typing.Optional[typing.Sequence[str]] Video URL to send to all users - buttons : typing.Optional[typing.List[ReplyButton]] + buttons : typing.Optional[typing.Sequence[ReplyButton]] Buttons to send to all users filters : typing.Optional[BotBroadcastFilters] @@ -226,7 +256,7 @@ async def video_bots_broadcast( Returns ------- - typing.Any + typing.Optional[typing.Any] Successful Response Examples @@ -251,8 +281,11 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v2/video-bots/broadcast/send/", method="POST", - params={"example_id": example_id, "run_id": run_id}, - data={ + params={ + "example_id": example_id, + "run_id": run_id, + }, + json={ "text": text, "audio": audio, "video": video, @@ -260,16 +293,27 @@ async def main() -> None: "buttons": buttons, "filters": filters, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: diff --git a/src/gooey/people_also_ask_answers_from_a_doc/__init__.py b/src/gooey/people_also_ask_answers_from_a_doc/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/people_also_ask_answers_from_a_doc/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/people_also_ask_answers_from_a_doc/client.py b/src/gooey/people_also_ask_answers_from_a_doc/client.py deleted file mode 100644 index f313451..0000000 --- a/src/gooey/people_also_ask_answers_from_a_doc/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse - - -class PeopleAlsoAskAnswersFromADocClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_related_qna_maker_doc( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncPeopleAlsoAskAnswersFromADocClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_related_qna_maker_doc( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> RelatedQnADocPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RelatedQnADocPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.people_also_ask_answers_from_a_doc.status_related_qna_maker_doc( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(RelatedQnADocPageStatusResponse, parse_obj_as(type_=RelatedQnADocPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py b/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py deleted file mode 100644 index e650727..0000000 --- a/src/gooey/profile_lookup_gpt3for_ai_personalized_emails/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse - - -class ProfileLookupGpt3ForAiPersonalizedEmailsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_social_lookup_email( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SocialLookupEmailPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncProfileLookupGpt3ForAiPersonalizedEmailsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_social_lookup_email( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SocialLookupEmailPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SocialLookupEmailPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.profile_lookup_gpt3for_ai_personalized_emails.status_social_lookup_email( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SocialLookupEmailPageStatusResponse, parse_obj_as(type_=SocialLookupEmailPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/render_image_search_results_with_ai/__init__.py b/src/gooey/render_image_search_results_with_ai/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/render_image_search_results_with_ai/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/render_image_search_results_with_ai/client.py b/src/gooey/render_image_search_results_with_ai/client.py deleted file mode 100644 index 8c75b75..0000000 --- a/src/gooey/render_image_search_results_with_ai/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse -from ..types.http_validation_error import HttpValidationError - - -class RenderImageSearchResultsWithAiClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_google_image_gen( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleImageGenPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.render_image_search_results_with_ai.status_google_image_gen( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncRenderImageSearchResultsWithAiClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_google_image_gen( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleImageGenPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleImageGenPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.render_image_search_results_with_ai.status_google_image_gen( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleImageGenPageStatusResponse, parse_obj_as(type_=GoogleImageGenPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/search_your_docs_with_gpt/__init__.py b/src/gooey/search_your_docs_with_gpt/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/search_your_docs_with_gpt/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/search_your_docs_with_gpt/client.py b/src/gooey/search_your_docs_with_gpt/client.py deleted file mode 100644 index 29abb71..0000000 --- a/src/gooey/search_your_docs_with_gpt/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.doc_search_page_status_response import DocSearchPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class SearchYourDocsWithGptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_doc_search( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSearchPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.search_your_docs_with_gpt.status_doc_search( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-search/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSearchYourDocsWithGptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_doc_search( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSearchPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSearchPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.search_your_docs_with_gpt.status_doc_search( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSearchPageStatusResponse, parse_obj_as(type_=DocSearchPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py index daee63a..fce5f3e 100644 --- a/src/gooey/smart_gpt/__init__.py +++ b/src/gooey/smart_gpt/__init__.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from .types import AsyncFormSmartGptRequestResponseFormatType, AsyncFormSmartGptRequestSelectedModel +from .types import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel -__all__ = ["AsyncFormSmartGptRequestResponseFormatType", "AsyncFormSmartGptRequestSelectedModel"] +__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"] diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py index b7a2425..bad19d2 100644 --- a/src/gooey/smart_gpt/client.py +++ b/src/gooey/smart_gpt/client.py @@ -1,25 +1,22 @@ # This file was auto-generated by Fern from our API Definition. import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as +from ..core.client_wrapper import SyncClientWrapper +from ..types.recipe_function import RecipeFunction +from .types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel +from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from ..types.run_settings import RunSettings from ..core.request_options import RequestOptions -from ..errors.bad_request_error import BadRequestError -from ..errors.internal_server_error import InternalServerError +from ..types.smart_gpt_page_status_response import SmartGptPageStatusResponse +from ..core.pydantic_utilities import parse_obj_as from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.failed_reponse_model_v2 import FailedReponseModelV2 -from ..types.generic_error_response import GenericErrorResponse from ..types.http_validation_error import HttpValidationError -from ..types.recipe_function import RecipeFunction -from ..types.run_settings import RunSettings -from ..types.smart_gpt_page_status_response import SmartGptPageStatusResponse -from .types.async_form_smart_gpt_request_response_format_type import AsyncFormSmartGptRequestResponseFormatType -from .types.async_form_smart_gpt_request_selected_model import AsyncFormSmartGptRequestSelectedModel +from ..errors.too_many_requests_error import TooManyRequestsError +from ..types.generic_error_response import GenericErrorResponse +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -29,25 +26,25 @@ class SmartGptClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def async_form_smart_gpt( + def async_smart_gpt( self, *, input_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - cot_prompt: typing.Optional[str] = None, - reflexion_prompt: typing.Optional[str] = None, - dera_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[AsyncFormSmartGptRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncFormSmartGptRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + cot_prompt: typing.Optional[str] = OMIT, + reflexion_prompt: typing.Optional[str] = OMIT, + dera_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> SmartGptPageStatusResponse: """ Parameters @@ -56,9 +53,9 @@ def async_form_smart_gpt( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments cot_prompt : typing.Optional[str] @@ -67,7 +64,7 @@ def async_form_smart_gpt( dera_prompt : typing.Optional[str] - selected_model : typing.Optional[AsyncFormSmartGptRequestSelectedModel] + selected_model : typing.Optional[SmartGptPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -79,7 +76,7 @@ def async_form_smart_gpt( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncFormSmartGptRequestResponseFormatType] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -98,15 +95,17 @@ def async_form_smart_gpt( client = Gooey( api_key="YOUR_API_KEY", ) - client.smart_gpt.async_form_smart_gpt( + client.smart_gpt.async_smart_gpt( input_prompt="input_prompt", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async/form", + "v3/SmartGPT/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_prompt": input_prompt, @@ -122,82 +121,47 @@ def async_form_smart_gpt( "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + SmartGptPageStatusResponse, + parse_obj_as( + type_=SmartGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def status_smart_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SmartGptPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.smart_gpt.status_smart_gpt( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: @@ -209,25 +173,25 @@ class AsyncSmartGptClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def async_form_smart_gpt( + async def async_smart_gpt( self, *, input_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RecipeFunction]] = None, - variables: typing.Optional[typing.Dict[str, typing.Any]] = None, - cot_prompt: typing.Optional[str] = None, - reflexion_prompt: typing.Optional[str] = None, - dera_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[AsyncFormSmartGptRequestSelectedModel] = None, - avoid_repetition: typing.Optional[bool] = None, - num_outputs: typing.Optional[int] = None, - quality: typing.Optional[float] = None, - max_tokens: typing.Optional[int] = None, - sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncFormSmartGptRequestResponseFormatType] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None + functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + cot_prompt: typing.Optional[str] = OMIT, + reflexion_prompt: typing.Optional[str] = OMIT, + dera_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> SmartGptPageStatusResponse: """ Parameters @@ -236,9 +200,9 @@ async def async_form_smart_gpt( example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RecipeFunction]] + functions : typing.Optional[typing.Sequence[RecipeFunction]] - variables : typing.Optional[typing.Dict[str, typing.Any]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments cot_prompt : typing.Optional[str] @@ -247,7 +211,7 @@ async def async_form_smart_gpt( dera_prompt : typing.Optional[str] - selected_model : typing.Optional[AsyncFormSmartGptRequestSelectedModel] + selected_model : typing.Optional[SmartGptPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -259,7 +223,7 @@ async def async_form_smart_gpt( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncFormSmartGptRequestResponseFormatType] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -283,7 +247,7 @@ async def async_form_smart_gpt( async def main() -> None: - await client.smart_gpt.async_form_smart_gpt( + await client.smart_gpt.async_smart_gpt( input_prompt="input_prompt", ) @@ -291,10 +255,12 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async/form", + "v3/SmartGPT/async", method="POST", - params={"example_id": example_id}, - data={ + params={ + "example_id": example_id, + }, + json={ "functions": functions, "variables": variables, "input_prompt": input_prompt, @@ -310,90 +276,47 @@ async def main() -> None: "response_format_type": response_format_type, "settings": settings, }, - files={}, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 400: - raise BadRequestError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + return typing.cast( + SmartGptPageStatusResponse, + parse_obj_as( + type_=SmartGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast(FailedReponseModelV2, parse_obj_as(type_=FailedReponseModelV2, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def status_smart_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> SmartGptPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.smart_gpt.status_smart_gpt( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(SmartGptPageStatusResponse, parse_obj_as(type_=SmartGptPageStatusResponse, object_=_response.json())) # type: ignore if _response.status_code == 402: raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore + typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 422: raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) if _response.status_code == 429: raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) ) _response_json = _response.json() except JSONDecodeError: diff --git a/src/gooey/smart_gpt/types/__init__.py b/src/gooey/smart_gpt/types/__init__.py index 1297bd3..3032d41 100644 --- a/src/gooey/smart_gpt/types/__init__.py +++ b/src/gooey/smart_gpt/types/__init__.py @@ -1,6 +1,6 @@ # This file was auto-generated by Fern from our API Definition. -from .async_form_smart_gpt_request_response_format_type import AsyncFormSmartGptRequestResponseFormatType -from .async_form_smart_gpt_request_selected_model import AsyncFormSmartGptRequestSelectedModel +from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel -__all__ = ["AsyncFormSmartGptRequestResponseFormatType", "AsyncFormSmartGptRequestSelectedModel"] +__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"] diff --git a/src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py b/src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py deleted file mode 100644 index 4f73056..0000000 --- a/src/gooey/smart_gpt/types/async_form_smart_gpt_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncFormSmartGptRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py new file mode 100644 index 0000000..1eaf901 --- /dev/null +++ b/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SmartGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/seo_content_request_selected_model.py b/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py similarity index 95% rename from src/gooey/types/seo_content_request_selected_model.py rename to src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py index f2d129a..9142b8f 100644 --- a/src/gooey/types/seo_content_request_selected_model.py +++ b/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -SeoContentRequestSelectedModel = typing.Union[ +SmartGptPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/speech_recognition_translation/__init__.py b/src/gooey/speech_recognition_translation/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/speech_recognition_translation/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/speech_recognition_translation/client.py b/src/gooey/speech_recognition_translation/client.py deleted file mode 100644 index 3f83966..0000000 --- a/src/gooey/speech_recognition_translation/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.asr_page_status_response import AsrPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class SpeechRecognitionTranslationClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_asr( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsrPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.speech_recognition_translation.status_asr( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/asr/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSpeechRecognitionTranslationClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_asr( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsrPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsrPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.speech_recognition_translation.status_asr( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/asr/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(AsrPageStatusResponse, parse_obj_as(type_=AsrPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/summarize_your_docs_with_gpt/__init__.py b/src/gooey/summarize_your_docs_with_gpt/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/summarize_your_docs_with_gpt/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/summarize_your_docs_with_gpt/client.py b/src/gooey/summarize_your_docs_with_gpt/client.py deleted file mode 100644 index 65f0059..0000000 --- a/src/gooey/summarize_your_docs_with_gpt/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.doc_summary_page_status_response import DocSummaryPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class SummarizeYourDocsWithGptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_doc_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSummaryPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.summarize_your_docs_with_gpt.status_doc_summary( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSummarizeYourDocsWithGptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_doc_summary( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocSummaryPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocSummaryPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.summarize_your_docs_with_gpt.status_doc_summary( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocSummaryPageStatusResponse, parse_obj_as(type_=DocSummaryPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/synthetic_data_maker_for_videos_pd_fs/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py b/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py deleted file mode 100644 index 3abe7cc..0000000 --- a/src/gooey/synthetic_data_maker_for_videos_pd_fs/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.doc_extract_page_status_response import DocExtractPageStatusResponse -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError - - -class SyntheticDataMakerForVideosPdFsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_doc_extract( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocExtractPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/doc-extract/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSyntheticDataMakerForVideosPdFsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_doc_extract( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DocExtractPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DocExtractPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.synthetic_data_maker_for_videos_pd_fs.status_doc_extract( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(DocExtractPageStatusResponse, parse_obj_as(type_=DocExtractPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/text_guided_audio_generator/__init__.py b/src/gooey/text_guided_audio_generator/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/text_guided_audio_generator/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/text_guided_audio_generator/client.py b/src/gooey/text_guided_audio_generator/client.py deleted file mode 100644 index d9bd16a..0000000 --- a/src/gooey/text_guided_audio_generator/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.http_validation_error import HttpValidationError -from ..types.text2audio_page_status_response import Text2AudioPageStatusResponse - - -class TextGuidedAudioGeneratorClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_text2audio( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Text2AudioPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.text_guided_audio_generator.status_text2audio( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncTextGuidedAudioGeneratorClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_text2audio( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> Text2AudioPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - Text2AudioPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.text_guided_audio_generator.status_text2audio( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/text2audio/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(Text2AudioPageStatusResponse, parse_obj_as(type_=Text2AudioPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index e422650..da61628 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -4,12 +4,14 @@ from .agg_function_function import AggFunctionFunction from .agg_function_result import AggFunctionResult from .agg_function_result_function import AggFunctionResultFunction -from .animate_request_selected_model import AnimateRequestSelectedModel from .animation_prompt import AnimationPrompt from .asr_chunk import AsrChunk from .asr_output_json import AsrOutputJson from .asr_page_output import AsrPageOutput from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem +from .asr_page_request_output_format import AsrPageRequestOutputFormat +from .asr_page_request_selected_model import AsrPageRequestSelectedModel +from .asr_page_request_translation_model import AsrPageRequestTranslationModel from .asr_page_status_response import AsrPageStatusResponse from .async_api_response_model_v3 import AsyncApiResponseModelV3 from .balance_response import BalanceResponse @@ -27,10 +29,15 @@ from .chyron_plant_page_request import ChyronPlantPageRequest from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse from .compare_llm_page_output import CompareLlmPageOutput +from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType +from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem from .compare_llm_page_status_response import CompareLlmPageStatusResponse from .compare_text2img_page_output import CompareText2ImgPageOutput +from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler +from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse from .compare_upscaler_page_output import CompareUpscalerPageOutput +from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse from .console_logs import ConsoleLogs from .console_logs_level import ConsoleLogsLevel @@ -45,43 +52,58 @@ from .conversation_start import ConversationStart from .create_stream_response import CreateStreamResponse from .deforum_sd_page_output import DeforumSdPageOutput +from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel from .deforum_sd_page_status_response import DeforumSdPageStatusResponse from .doc_extract_page_output import DocExtractPageOutput +from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType +from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel +from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel from .doc_extract_page_status_response import DocExtractPageStatusResponse from .doc_search_page_output import DocSearchPageOutput +from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle +from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel +from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery +from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType +from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel from .doc_search_page_status_response import DocSearchPageStatusResponse from .doc_summary_page_output import DocSummaryPageOutput +from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType +from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel +from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel from .doc_summary_page_status_response import DocSummaryPageStatusResponse -from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType -from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel -from .doc_summary_request_selected_model import DocSummaryRequestSelectedModel from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput +from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse -from .embed_request_selected_model import EmbedRequestSelectedModel from .embeddings_page_output import EmbeddingsPageOutput +from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel from .embeddings_page_status_response import EmbeddingsPageStatusResponse from .eval_prompt import EvalPrompt from .face_inpainting_page_output import FaceInpaintingPageOutput +from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse -from .failed_reponse_model_v2 import FailedReponseModelV2 -from .failed_response_detail import FailedResponseDetail from .final_response import FinalResponse from .functions_page_output import FunctionsPageOutput from .functions_page_status_response import FunctionsPageStatusResponse from .generic_error_response import GenericErrorResponse from .generic_error_response_detail import GenericErrorResponseDetail from .google_gpt_page_output import GoogleGptPageOutput +from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType +from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel from .google_gpt_page_status_response import GoogleGptPageStatusResponse from .google_image_gen_page_output import GoogleImageGenPageOutput +from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse from .http_validation_error import HttpValidationError -from .image_from_email_request_selected_model import ImageFromEmailRequestSelectedModel -from .image_from_web_search_request_selected_model import ImageFromWebSearchRequestSelectedModel from .image_segmentation_page_output import ImageSegmentationPageOutput +from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse from .image_url import ImageUrl from .image_url_detail import ImageUrlDetail from .img2img_page_output import Img2ImgPageOutput +from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel +from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem +from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel from .img2img_page_status_response import Img2ImgPageStatusResponse from .letter_writer_page_output import LetterWriterPageOutput from .letter_writer_page_request import LetterWriterPageRequest @@ -89,47 +111,166 @@ from .lipsync_page_output import LipsyncPageOutput from .lipsync_page_status_response import LipsyncPageStatusResponse from .lipsync_tts_page_output import LipsyncTtsPageOutput +from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel +from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName +from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel +from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse -from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel -from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName -from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel -from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider -from .llm_request_response_format_type import LlmRequestResponseFormatType -from .llm_request_selected_models_item import LlmRequestSelectedModelsItem from .llm_tools import LlmTools from .message_part import MessagePart from .object_inpainting_page_output import ObjectInpaintingPageOutput +from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse -from .personalize_email_request_response_format_type import PersonalizeEmailRequestResponseFormatType -from .personalize_email_request_selected_model import PersonalizeEmailRequestSelectedModel -from .portrait_request_selected_model import PortraitRequestSelectedModel -from .product_image_request_selected_model import ProductImageRequestSelectedModel +from .post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item import ( + PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem, +) +from .post_v3art_qr_code_async_form_request_scheduler import PostV3ArtQrCodeAsyncFormRequestScheduler +from .post_v3art_qr_code_async_form_request_selected_controlnet_model_item import ( + PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem, +) +from .post_v3art_qr_code_async_form_request_selected_model import PostV3ArtQrCodeAsyncFormRequestSelectedModel +from .post_v3asr_async_form_request_output_format import PostV3AsrAsyncFormRequestOutputFormat +from .post_v3asr_async_form_request_selected_model import PostV3AsrAsyncFormRequestSelectedModel +from .post_v3asr_async_form_request_translation_model import PostV3AsrAsyncFormRequestTranslationModel +from .post_v3bulk_eval_async_form_request_response_format_type import PostV3BulkEvalAsyncFormRequestResponseFormatType +from .post_v3bulk_eval_async_form_request_selected_model import PostV3BulkEvalAsyncFormRequestSelectedModel +from .post_v3compare_ai_upscalers_async_form_request_selected_models_item import ( + PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem, +) +from .post_v3compare_llm_async_form_request_response_format_type import ( + PostV3CompareLlmAsyncFormRequestResponseFormatType, +) +from .post_v3compare_llm_async_form_request_selected_models_item import ( + PostV3CompareLlmAsyncFormRequestSelectedModelsItem, +) +from .post_v3compare_text2img_async_form_request_scheduler import PostV3CompareText2ImgAsyncFormRequestScheduler +from .post_v3compare_text2img_async_form_request_selected_models_item import ( + PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem, +) +from .post_v3deforum_sd_async_form_request_selected_model import PostV3DeforumSdAsyncFormRequestSelectedModel +from .post_v3doc_extract_async_form_request_response_format_type import ( + PostV3DocExtractAsyncFormRequestResponseFormatType, +) +from .post_v3doc_extract_async_form_request_selected_asr_model import PostV3DocExtractAsyncFormRequestSelectedAsrModel +from .post_v3doc_extract_async_form_request_selected_model import PostV3DocExtractAsyncFormRequestSelectedModel +from .post_v3doc_search_async_form_request_citation_style import PostV3DocSearchAsyncFormRequestCitationStyle +from .post_v3doc_search_async_form_request_embedding_model import PostV3DocSearchAsyncFormRequestEmbeddingModel +from .post_v3doc_search_async_form_request_keyword_query import PostV3DocSearchAsyncFormRequestKeywordQuery +from .post_v3doc_search_async_form_request_response_format_type import PostV3DocSearchAsyncFormRequestResponseFormatType +from .post_v3doc_search_async_form_request_selected_model import PostV3DocSearchAsyncFormRequestSelectedModel +from .post_v3doc_summary_async_form_request_response_format_type import ( + PostV3DocSummaryAsyncFormRequestResponseFormatType, +) +from .post_v3doc_summary_async_form_request_selected_asr_model import PostV3DocSummaryAsyncFormRequestSelectedAsrModel +from .post_v3doc_summary_async_form_request_selected_model import PostV3DocSummaryAsyncFormRequestSelectedModel +from .post_v3email_face_inpainting_async_form_request_selected_model import ( + PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel, +) +from .post_v3embeddings_async_form_request_selected_model import PostV3EmbeddingsAsyncFormRequestSelectedModel +from .post_v3face_inpainting_async_form_request_selected_model import PostV3FaceInpaintingAsyncFormRequestSelectedModel +from .post_v3google_gpt_async_form_request_embedding_model import PostV3GoogleGptAsyncFormRequestEmbeddingModel +from .post_v3google_gpt_async_form_request_response_format_type import PostV3GoogleGptAsyncFormRequestResponseFormatType +from .post_v3google_gpt_async_form_request_selected_model import PostV3GoogleGptAsyncFormRequestSelectedModel +from .post_v3google_image_gen_async_form_request_selected_model import PostV3GoogleImageGenAsyncFormRequestSelectedModel +from .post_v3image_segmentation_async_form_request_selected_model import ( + PostV3ImageSegmentationAsyncFormRequestSelectedModel, +) +from .post_v3img2img_async_form_request_selected_controlnet_model import ( + PostV3Img2ImgAsyncFormRequestSelectedControlnetModel, +) +from .post_v3img2img_async_form_request_selected_controlnet_model_item import ( + PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem, +) +from .post_v3img2img_async_form_request_selected_model import PostV3Img2ImgAsyncFormRequestSelectedModel +from .post_v3lipsync_async_form_request_selected_model import PostV3LipsyncAsyncFormRequestSelectedModel +from .post_v3lipsync_tts_async_form_request_openai_tts_model import PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel +from .post_v3lipsync_tts_async_form_request_openai_voice_name import PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName +from .post_v3lipsync_tts_async_form_request_selected_model import PostV3LipsyncTtsAsyncFormRequestSelectedModel +from .post_v3lipsync_tts_async_form_request_tts_provider import PostV3LipsyncTtsAsyncFormRequestTtsProvider +from .post_v3object_inpainting_async_form_request_selected_model import ( + PostV3ObjectInpaintingAsyncFormRequestSelectedModel, +) +from .post_v3related_qna_maker_async_form_request_embedding_model import ( + PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel, +) +from .post_v3related_qna_maker_async_form_request_response_format_type import ( + PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType, +) +from .post_v3related_qna_maker_async_form_request_selected_model import ( + PostV3RelatedQnaMakerAsyncFormRequestSelectedModel, +) +from .post_v3related_qna_maker_doc_async_form_request_citation_style import ( + PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle, +) +from .post_v3related_qna_maker_doc_async_form_request_embedding_model import ( + PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel, +) +from .post_v3related_qna_maker_doc_async_form_request_keyword_query import ( + PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery, +) +from .post_v3related_qna_maker_doc_async_form_request_response_format_type import ( + PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType, +) +from .post_v3related_qna_maker_doc_async_form_request_selected_model import ( + PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel, +) +from .post_v3seo_summary_async_form_request_response_format_type import ( + PostV3SeoSummaryAsyncFormRequestResponseFormatType, +) +from .post_v3seo_summary_async_form_request_selected_model import PostV3SeoSummaryAsyncFormRequestSelectedModel +from .post_v3smart_gpt_async_form_request_response_format_type import PostV3SmartGptAsyncFormRequestResponseFormatType +from .post_v3smart_gpt_async_form_request_selected_model import PostV3SmartGptAsyncFormRequestSelectedModel +from .post_v3social_lookup_email_async_form_request_response_format_type import ( + PostV3SocialLookupEmailAsyncFormRequestResponseFormatType, +) +from .post_v3social_lookup_email_async_form_request_selected_model import ( + PostV3SocialLookupEmailAsyncFormRequestSelectedModel, +) +from .post_v3text_to_speech_async_form_request_openai_tts_model import PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel +from .post_v3text_to_speech_async_form_request_openai_voice_name import ( + PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName, +) +from .post_v3text_to_speech_async_form_request_tts_provider import PostV3TextToSpeechAsyncFormRequestTtsProvider +from .post_v3translate_async_form_request_selected_model import PostV3TranslateAsyncFormRequestSelectedModel +from .post_v3video_bots_async_form_request_asr_model import PostV3VideoBotsAsyncFormRequestAsrModel +from .post_v3video_bots_async_form_request_citation_style import PostV3VideoBotsAsyncFormRequestCitationStyle +from .post_v3video_bots_async_form_request_embedding_model import PostV3VideoBotsAsyncFormRequestEmbeddingModel +from .post_v3video_bots_async_form_request_lipsync_model import PostV3VideoBotsAsyncFormRequestLipsyncModel +from .post_v3video_bots_async_form_request_openai_tts_model import PostV3VideoBotsAsyncFormRequestOpenaiTtsModel +from .post_v3video_bots_async_form_request_openai_voice_name import PostV3VideoBotsAsyncFormRequestOpenaiVoiceName +from .post_v3video_bots_async_form_request_response_format_type import PostV3VideoBotsAsyncFormRequestResponseFormatType +from .post_v3video_bots_async_form_request_selected_model import PostV3VideoBotsAsyncFormRequestSelectedModel +from .post_v3video_bots_async_form_request_translation_model import PostV3VideoBotsAsyncFormRequestTranslationModel +from .post_v3video_bots_async_form_request_tts_provider import PostV3VideoBotsAsyncFormRequestTtsProvider from .prompt_tree_node import PromptTreeNode from .prompt_tree_node_prompt import PromptTreeNodePrompt from .qr_code_generator_page_output import QrCodeGeneratorPageOutput +from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, +) +from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler +from .qr_code_generator_page_request_selected_controlnet_model_item import ( + QrCodeGeneratorPageRequestSelectedControlnetModelItem, +) +from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse -from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem -from .qr_code_request_scheduler import QrCodeRequestScheduler -from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem -from .qr_code_request_selected_model import QrCodeRequestSelectedModel -from .rag_request_citation_style import RagRequestCitationStyle -from .rag_request_embedding_model import RagRequestEmbeddingModel -from .rag_request_keyword_query import RagRequestKeywordQuery -from .rag_request_response_format_type import RagRequestResponseFormatType -from .rag_request_selected_model import RagRequestSelectedModel from .recipe_function import RecipeFunction from .recipe_function_trigger import RecipeFunctionTrigger from .recipe_run_state import RecipeRunState from .related_doc_search_response import RelatedDocSearchResponse from .related_google_gpt_response import RelatedGoogleGptResponse from .related_qn_a_doc_page_output import RelatedQnADocPageOutput +from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle +from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel +from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType +from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse from .related_qn_a_page_output import RelatedQnAPageOutput +from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType +from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse -from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel -from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem -from .remix_image_request_selected_model import RemixImageRequestSelectedModel -from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel from .reply_button import ReplyButton from .response_model import ResponseModel from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery @@ -140,45 +281,30 @@ from .sad_talker_settings import SadTalkerSettings from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess from .search_reference import SearchReference -from .seo_content_request_response_format_type import SeoContentRequestResponseFormatType -from .seo_content_request_selected_model import SeoContentRequestSelectedModel -from .seo_people_also_ask_doc_request_citation_style import SeoPeopleAlsoAskDocRequestCitationStyle -from .seo_people_also_ask_doc_request_embedding_model import SeoPeopleAlsoAskDocRequestEmbeddingModel -from .seo_people_also_ask_doc_request_keyword_query import SeoPeopleAlsoAskDocRequestKeywordQuery -from .seo_people_also_ask_doc_request_response_format_type import SeoPeopleAlsoAskDocRequestResponseFormatType -from .seo_people_also_ask_doc_request_selected_model import SeoPeopleAlsoAskDocRequestSelectedModel -from .seo_people_also_ask_request_embedding_model import SeoPeopleAlsoAskRequestEmbeddingModel -from .seo_people_also_ask_request_response_format_type import SeoPeopleAlsoAskRequestResponseFormatType -from .seo_people_also_ask_request_selected_model import SeoPeopleAlsoAskRequestSelectedModel from .seo_summary_page_output import SeoSummaryPageOutput +from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType +from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel from .seo_summary_page_status_response import SeoSummaryPageStatusResponse from .serp_search_location import SerpSearchLocation from .serp_search_type import SerpSearchType from .smart_gpt_page_output import SmartGptPageOutput from .smart_gpt_page_status_response import SmartGptPageStatusResponse from .social_lookup_email_page_output import SocialLookupEmailPageOutput +from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType +from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse -from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat -from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel -from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel from .stream_error import StreamError -from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType -from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel -from .synthesize_data_request_selected_model import SynthesizeDataRequestSelectedModel from .text2audio_page_output import Text2AudioPageOutput from .text2audio_page_status_response import Text2AudioPageStatusResponse -from .text_to_image_request_scheduler import TextToImageRequestScheduler -from .text_to_image_request_selected_models_item import TextToImageRequestSelectedModelsItem from .text_to_speech_page_output import TextToSpeechPageOutput +from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel +from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName +from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse -from .text_to_speech_request_openai_tts_model import TextToSpeechRequestOpenaiTtsModel -from .text_to_speech_request_openai_voice_name import TextToSpeechRequestOpenaiVoiceName -from .text_to_speech_request_tts_provider import TextToSpeechRequestTtsProvider from .training_data_model import TrainingDataModel -from .translate_request_selected_model import TranslateRequestSelectedModel from .translation_page_output import TranslationPageOutput +from .translation_page_request_selected_model import TranslationPageRequestSelectedModel from .translation_page_status_response import TranslationPageStatusResponse -from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem from .validation_error import ValidationError from .validation_error_loc_item import ValidationErrorLocItem from .vcard import Vcard @@ -186,21 +312,20 @@ from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt from .video_bots_page_status_response import VideoBotsPageStatusResponse -from .web_search_llm_request_embedding_model import WebSearchLlmRequestEmbeddingModel -from .web_search_llm_request_response_format_type import WebSearchLlmRequestResponseFormatType -from .web_search_llm_request_selected_model import WebSearchLlmRequestSelectedModel __all__ = [ "AggFunction", "AggFunctionFunction", "AggFunctionResult", "AggFunctionResultFunction", - "AnimateRequestSelectedModel", "AnimationPrompt", "AsrChunk", "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", + "AsrPageRequestOutputFormat", + "AsrPageRequestSelectedModel", + "AsrPageRequestTranslationModel", "AsrPageStatusResponse", "AsyncApiResponseModelV3", "BalanceResponse", @@ -218,10 +343,15 @@ "ChyronPlantPageRequest", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", + "CompareLlmPageRequestResponseFormatType", + "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", + "CompareText2ImgPageRequestScheduler", + "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", + "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", "ConsoleLogsLevel", @@ -234,43 +364,58 @@ "ConversationStart", "CreateStreamResponse", "DeforumSdPageOutput", + "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", + "DocExtractPageRequestResponseFormatType", + "DocExtractPageRequestSelectedAsrModel", + "DocExtractPageRequestSelectedModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", + "DocSearchPageRequestCitationStyle", + "DocSearchPageRequestEmbeddingModel", + "DocSearchPageRequestKeywordQuery", + "DocSearchPageRequestResponseFormatType", + "DocSearchPageRequestSelectedModel", "DocSearchPageStatusResponse", "DocSummaryPageOutput", + "DocSummaryPageRequestResponseFormatType", + "DocSummaryPageRequestSelectedAsrModel", + "DocSummaryPageRequestSelectedModel", "DocSummaryPageStatusResponse", - "DocSummaryRequestResponseFormatType", - "DocSummaryRequestSelectedAsrModel", - "DocSummaryRequestSelectedModel", "EmailFaceInpaintingPageOutput", + "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", - "EmbedRequestSelectedModel", "EmbeddingsPageOutput", + "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", + "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", - "FailedReponseModelV2", - "FailedResponseDetail", "FinalResponse", "FunctionsPageOutput", "FunctionsPageStatusResponse", "GenericErrorResponse", "GenericErrorResponseDetail", "GoogleGptPageOutput", + "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestResponseFormatType", + "GoogleGptPageRequestSelectedModel", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", + "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", - "ImageFromEmailRequestSelectedModel", - "ImageFromWebSearchRequestSelectedModel", "ImageSegmentationPageOutput", + "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", + "Img2ImgPageRequestSelectedControlnetModel", + "Img2ImgPageRequestSelectedControlnetModelItem", + "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", "LetterWriterPageOutput", "LetterWriterPageRequest", @@ -278,47 +423,112 @@ "LipsyncPageOutput", "LipsyncPageStatusResponse", "LipsyncTtsPageOutput", + "LipsyncTtsPageRequestOpenaiTtsModel", + "LipsyncTtsPageRequestOpenaiVoiceName", + "LipsyncTtsPageRequestSelectedModel", + "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", - "LipsyncTtsRequestOpenaiTtsModel", - "LipsyncTtsRequestOpenaiVoiceName", - "LipsyncTtsRequestSelectedModel", - "LipsyncTtsRequestTtsProvider", - "LlmRequestResponseFormatType", - "LlmRequestSelectedModelsItem", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", + "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", - "PersonalizeEmailRequestResponseFormatType", - "PersonalizeEmailRequestSelectedModel", - "PortraitRequestSelectedModel", - "ProductImageRequestSelectedModel", + "PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem", + "PostV3ArtQrCodeAsyncFormRequestScheduler", + "PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem", + "PostV3ArtQrCodeAsyncFormRequestSelectedModel", + "PostV3AsrAsyncFormRequestOutputFormat", + "PostV3AsrAsyncFormRequestSelectedModel", + "PostV3AsrAsyncFormRequestTranslationModel", + "PostV3BulkEvalAsyncFormRequestResponseFormatType", + "PostV3BulkEvalAsyncFormRequestSelectedModel", + "PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem", + "PostV3CompareLlmAsyncFormRequestResponseFormatType", + "PostV3CompareLlmAsyncFormRequestSelectedModelsItem", + "PostV3CompareText2ImgAsyncFormRequestScheduler", + "PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem", + "PostV3DeforumSdAsyncFormRequestSelectedModel", + "PostV3DocExtractAsyncFormRequestResponseFormatType", + "PostV3DocExtractAsyncFormRequestSelectedAsrModel", + "PostV3DocExtractAsyncFormRequestSelectedModel", + "PostV3DocSearchAsyncFormRequestCitationStyle", + "PostV3DocSearchAsyncFormRequestEmbeddingModel", + "PostV3DocSearchAsyncFormRequestKeywordQuery", + "PostV3DocSearchAsyncFormRequestResponseFormatType", + "PostV3DocSearchAsyncFormRequestSelectedModel", + "PostV3DocSummaryAsyncFormRequestResponseFormatType", + "PostV3DocSummaryAsyncFormRequestSelectedAsrModel", + "PostV3DocSummaryAsyncFormRequestSelectedModel", + "PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel", + "PostV3EmbeddingsAsyncFormRequestSelectedModel", + "PostV3FaceInpaintingAsyncFormRequestSelectedModel", + "PostV3GoogleGptAsyncFormRequestEmbeddingModel", + "PostV3GoogleGptAsyncFormRequestResponseFormatType", + "PostV3GoogleGptAsyncFormRequestSelectedModel", + "PostV3GoogleImageGenAsyncFormRequestSelectedModel", + "PostV3ImageSegmentationAsyncFormRequestSelectedModel", + "PostV3Img2ImgAsyncFormRequestSelectedControlnetModel", + "PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem", + "PostV3Img2ImgAsyncFormRequestSelectedModel", + "PostV3LipsyncAsyncFormRequestSelectedModel", + "PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel", + "PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName", + "PostV3LipsyncTtsAsyncFormRequestSelectedModel", + "PostV3LipsyncTtsAsyncFormRequestTtsProvider", + "PostV3ObjectInpaintingAsyncFormRequestSelectedModel", + "PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel", + "PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType", + "PostV3RelatedQnaMakerAsyncFormRequestSelectedModel", + "PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle", + "PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel", + "PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery", + "PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType", + "PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel", + "PostV3SeoSummaryAsyncFormRequestResponseFormatType", + "PostV3SeoSummaryAsyncFormRequestSelectedModel", + "PostV3SmartGptAsyncFormRequestResponseFormatType", + "PostV3SmartGptAsyncFormRequestSelectedModel", + "PostV3SocialLookupEmailAsyncFormRequestResponseFormatType", + "PostV3SocialLookupEmailAsyncFormRequestSelectedModel", + "PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel", + "PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName", + "PostV3TextToSpeechAsyncFormRequestTtsProvider", + "PostV3TranslateAsyncFormRequestSelectedModel", + "PostV3VideoBotsAsyncFormRequestAsrModel", + "PostV3VideoBotsAsyncFormRequestCitationStyle", + "PostV3VideoBotsAsyncFormRequestEmbeddingModel", + "PostV3VideoBotsAsyncFormRequestLipsyncModel", + "PostV3VideoBotsAsyncFormRequestOpenaiTtsModel", + "PostV3VideoBotsAsyncFormRequestOpenaiVoiceName", + "PostV3VideoBotsAsyncFormRequestResponseFormatType", + "PostV3VideoBotsAsyncFormRequestSelectedModel", + "PostV3VideoBotsAsyncFormRequestTranslationModel", + "PostV3VideoBotsAsyncFormRequestTtsProvider", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", + "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", + "QrCodeGeneratorPageRequestScheduler", + "QrCodeGeneratorPageRequestSelectedControlnetModelItem", + "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", - "QrCodeRequestImagePromptControlnetModelsItem", - "QrCodeRequestScheduler", - "QrCodeRequestSelectedControlnetModelItem", - "QrCodeRequestSelectedModel", - "RagRequestCitationStyle", - "RagRequestEmbeddingModel", - "RagRequestKeywordQuery", - "RagRequestResponseFormatType", - "RagRequestSelectedModel", "RecipeFunction", "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", + "RelatedQnADocPageRequestCitationStyle", + "RelatedQnADocPageRequestEmbeddingModel", + "RelatedQnADocPageRequestKeywordQuery", + "RelatedQnADocPageRequestResponseFormatType", + "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", + "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestResponseFormatType", + "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageStatusResponse", - "RemixImageRequestSelectedControlnetModel", - "RemixImageRequestSelectedControlnetModelItem", - "RemixImageRequestSelectedModel", - "RemoveBackgroundRequestSelectedModel", "ReplyButton", "ResponseModel", "ResponseModelFinalKeywordQuery", @@ -329,45 +539,30 @@ "SadTalkerSettings", "SadTalkerSettingsPreprocess", "SearchReference", - "SeoContentRequestResponseFormatType", - "SeoContentRequestSelectedModel", - "SeoPeopleAlsoAskDocRequestCitationStyle", - "SeoPeopleAlsoAskDocRequestEmbeddingModel", - "SeoPeopleAlsoAskDocRequestKeywordQuery", - "SeoPeopleAlsoAskDocRequestResponseFormatType", - "SeoPeopleAlsoAskDocRequestSelectedModel", - "SeoPeopleAlsoAskRequestEmbeddingModel", - "SeoPeopleAlsoAskRequestResponseFormatType", - "SeoPeopleAlsoAskRequestSelectedModel", "SeoSummaryPageOutput", + "SeoSummaryPageRequestResponseFormatType", + "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", + "SocialLookupEmailPageRequestResponseFormatType", + "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageStatusResponse", - "SpeechRecognitionRequestOutputFormat", - "SpeechRecognitionRequestSelectedModel", - "SpeechRecognitionRequestTranslationModel", "StreamError", - "SynthesizeDataRequestResponseFormatType", - "SynthesizeDataRequestSelectedAsrModel", - "SynthesizeDataRequestSelectedModel", "Text2AudioPageOutput", "Text2AudioPageStatusResponse", - "TextToImageRequestScheduler", - "TextToImageRequestSelectedModelsItem", "TextToSpeechPageOutput", + "TextToSpeechPageRequestOpenaiTtsModel", + "TextToSpeechPageRequestOpenaiVoiceName", + "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", - "TextToSpeechRequestOpenaiTtsModel", - "TextToSpeechRequestOpenaiVoiceName", - "TextToSpeechRequestTtsProvider", "TrainingDataModel", - "TranslateRequestSelectedModel", "TranslationPageOutput", + "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", - "UpscaleRequestSelectedModelsItem", "ValidationError", "ValidationErrorLocItem", "Vcard", @@ -375,7 +570,4 @@ "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", "VideoBotsPageStatusResponse", - "WebSearchLlmRequestEmbeddingModel", - "WebSearchLlmRequestResponseFormatType", - "WebSearchLlmRequestSelectedModel", ] diff --git a/src/gooey/types/agg_function.py b/src/gooey/types/agg_function.py index ce512c1..a35da99 100644 --- a/src/gooey/types/agg_function.py +++ b/src/gooey/types/agg_function.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .agg_function_function import AggFunctionFunction +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class AggFunction(UniversalBaseModel): diff --git a/src/gooey/types/agg_function_result.py b/src/gooey/types/agg_function_result.py index 6dcdfe0..2c762a1 100644 --- a/src/gooey/types/agg_function_result.py +++ b/src/gooey/types/agg_function_result.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from .agg_function_result_function import AggFunctionResultFunction +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .agg_function_result_function import AggFunctionResultFunction - class AggFunctionResult(UniversalBaseModel): column: str diff --git a/src/gooey/types/animate_request_selected_model.py b/src/gooey/types/animate_request_selected_model.py deleted file mode 100644 index d8ab4b0..0000000 --- a/src/gooey/types/animate_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AnimateRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/animation_prompt.py b/src/gooey/types/animation_prompt.py index 29d698f..217da8c 100644 --- a/src/gooey/types/animation_prompt.py +++ b/src/gooey/types/animation_prompt.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class AnimationPrompt(UniversalBaseModel): frame: str diff --git a/src/gooey/types/asr_chunk.py b/src/gooey/types/asr_chunk.py index 1e5ee10..c6e4b1d 100644 --- a/src/gooey/types/asr_chunk.py +++ b/src/gooey/types/asr_chunk.py @@ -1,14 +1,13 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class AsrChunk(UniversalBaseModel): - timestamp: typing.List[typing.Any] + timestamp: typing.List[typing.Optional[typing.Any]] text: str speaker: int diff --git a/src/gooey/types/asr_output_json.py b/src/gooey/types/asr_output_json.py index 0ebd5eb..57e18e6 100644 --- a/src/gooey/types/asr_output_json.py +++ b/src/gooey/types/asr_output_json.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .asr_chunk import AsrChunk +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class AsrOutputJson(UniversalBaseModel): diff --git a/src/gooey/types/asr_page_output.py b/src/gooey/types/asr_page_output.py index 1f660fc..ec692f5 100644 --- a/src/gooey/types/asr_page_output.py +++ b/src/gooey/types/asr_page_output.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class AsrPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/asr_page_output_output_text_item.py b/src/gooey/types/asr_page_output_output_text_item.py index c65822d..9cd88af 100644 --- a/src/gooey/types/asr_page_output_output_text_item.py +++ b/src/gooey/types/asr_page_output_output_text_item.py @@ -1,7 +1,6 @@ # This file was auto-generated by Fern from our API Definition. import typing - from .asr_output_json import AsrOutputJson AsrPageOutputOutputTextItem = typing.Union[str, AsrOutputJson] diff --git a/src/gooey/types/asr_page_request_output_format.py b/src/gooey/types/asr_page_request_output_format.py new file mode 100644 index 0000000..101e681 --- /dev/null +++ b/src/gooey/types/asr_page_request_output_format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrPageRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_asr_model.py b/src/gooey/types/asr_page_request_selected_model.py similarity index 89% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_asr_model.py rename to src/gooey/types/asr_page_request_selected_model.py index 0f2b04b..4e80d3c 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_asr_model.py +++ b/src/gooey/types/asr_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -VideoBotsStreamCreateRequestAsrModel = typing.Union[ +AsrPageRequestSelectedModel = typing.Union[ typing.Literal[ "whisper_large_v2", "whisper_large_v3", diff --git a/src/gooey/types/asr_page_request_translation_model.py b/src/gooey/types/asr_page_request_translation_model.py new file mode 100644 index 0000000..d5dcef6 --- /dev/null +++ b/src/gooey/types/asr_page_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/asr_page_status_response.py b/src/gooey/types/asr_page_status_response.py index c4e8f2b..46d3a20 100644 --- a/src/gooey/types/asr_page_status_response.py +++ b/src/gooey/types/asr_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .asr_page_output import AsrPageOutput from .recipe_run_state import RecipeRunState +import typing +from .asr_page_output import AsrPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class AsrPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/async_api_response_model_v3.py b/src/gooey/types/async_api_response_model_v3.py index 853fcd3..fdee834 100644 --- a/src/gooey/types/async_api_response_model_v3.py +++ b/src/gooey/types/async_api_response_model_v3.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing class AsyncApiResponseModelV3(UniversalBaseModel): diff --git a/src/gooey/types/balance_response.py b/src/gooey/types/balance_response.py index 1176a8f..46a70cc 100644 --- a/src/gooey/types/balance_response.py +++ b/src/gooey/types/balance_response.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing class BalanceResponse(UniversalBaseModel): diff --git a/src/gooey/types/bot_broadcast_filters.py b/src/gooey/types/bot_broadcast_filters.py index 9d05e81..0e13605 100644 --- a/src/gooey/types/bot_broadcast_filters.py +++ b/src/gooey/types/bot_broadcast_filters.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class BotBroadcastFilters(UniversalBaseModel): diff --git a/src/gooey/types/bulk_eval_page_output.py b/src/gooey/types/bulk_eval_page_output.py index 63897db..545f83c 100644 --- a/src/gooey/types/bulk_eval_page_output.py +++ b/src/gooey/types/bulk_eval_page_output.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .agg_function_result import AggFunctionResult from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class BulkEvalPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/bulk_eval_page_status_response.py b/src/gooey/types/bulk_eval_page_status_response.py index e788c4a..939f938 100644 --- a/src/gooey/types/bulk_eval_page_status_response.py +++ b/src/gooey/types/bulk_eval_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .bulk_eval_page_output import BulkEvalPageOutput from .recipe_run_state import RecipeRunState +import typing +from .bulk_eval_page_output import BulkEvalPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class BulkEvalPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/bulk_runner_page_output.py b/src/gooey/types/bulk_runner_page_output.py index ab68672..e84ed51 100644 --- a/src/gooey/types/bulk_runner_page_output.py +++ b/src/gooey/types/bulk_runner_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class BulkRunnerPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/bulk_runner_page_status_response.py b/src/gooey/types/bulk_runner_page_status_response.py index dfb46e0..786def3 100644 --- a/src/gooey/types/bulk_runner_page_status_response.py +++ b/src/gooey/types/bulk_runner_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .bulk_runner_page_output import BulkRunnerPageOutput from .recipe_run_state import RecipeRunState +import typing +from .bulk_runner_page_output import BulkRunnerPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class BulkRunnerPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/button_pressed.py b/src/gooey/types/button_pressed.py index a8be53e..b271bab 100644 --- a/src/gooey/types/button_pressed.py +++ b/src/gooey/types/button_pressed.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing class ButtonPressed(UniversalBaseModel): diff --git a/src/gooey/types/called_function_response.py b/src/gooey/types/called_function_response.py index 42ea912..9076f2d 100644 --- a/src/gooey/types/called_function_response.py +++ b/src/gooey/types/called_function_response.py @@ -1,17 +1,16 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from .called_function_response_trigger import CalledFunctionResponseTrigger import typing - +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response_trigger import CalledFunctionResponseTrigger - class CalledFunctionResponse(UniversalBaseModel): url: str trigger: CalledFunctionResponseTrigger - return_value: typing.Optional[typing.Any] = None + return_value: typing.Optional[typing.Optional[typing.Any]] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/gooey/types/chat_completion_content_part_image_param.py b/src/gooey/types/chat_completion_content_part_image_param.py index 41d8b02..0df6e59 100644 --- a/src/gooey/types/chat_completion_content_part_image_param.py +++ b/src/gooey/types/chat_completion_content_part_image_param.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .image_url import ImageUrl +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class ChatCompletionContentPartImageParam(UniversalBaseModel): diff --git a/src/gooey/types/chat_completion_content_part_text_param.py b/src/gooey/types/chat_completion_content_part_text_param.py index 5ed73af..9461761 100644 --- a/src/gooey/types/chat_completion_content_part_text_param.py +++ b/src/gooey/types/chat_completion_content_part_text_param.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class ChatCompletionContentPartTextParam(UniversalBaseModel): text: typing.Optional[str] = None diff --git a/src/gooey/types/chyron_plant_page_output.py b/src/gooey/types/chyron_plant_page_output.py index 6ecc7f0..2c49759 100644 --- a/src/gooey/types/chyron_plant_page_output.py +++ b/src/gooey/types/chyron_plant_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class ChyronPlantPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/chyron_plant_page_request.py b/src/gooey/types/chyron_plant_page_request.py index abdd922..e0733de 100644 --- a/src/gooey/types/chyron_plant_page_request.py +++ b/src/gooey/types/chyron_plant_page_request.py @@ -1,17 +1,16 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_function import RecipeFunction +import pydantic from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ChyronPlantPageRequest(UniversalBaseModel): functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) """ Variables to be used as Jinja prompt templates and in functions as arguments """ diff --git a/src/gooey/types/chyron_plant_page_status_response.py b/src/gooey/types/chyron_plant_page_status_response.py index c699269..a118086 100644 --- a/src/gooey/types/chyron_plant_page_status_response.py +++ b/src/gooey/types/chyron_plant_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .chyron_plant_page_output import ChyronPlantPageOutput from .recipe_run_state import RecipeRunState +import typing +from .chyron_plant_page_output import ChyronPlantPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ChyronPlantPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/compare_llm_page_output.py b/src/gooey/types/compare_llm_page_output.py index 0c4191b..5bd84b8 100644 --- a/src/gooey/types/compare_llm_page_output.py +++ b/src/gooey/types/compare_llm_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class CompareLlmPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/synthesize_data_request_response_format_type.py b/src/gooey/types/compare_llm_page_request_response_format_type.py similarity index 65% rename from src/gooey/types/synthesize_data_request_response_format_type.py rename to src/gooey/types/compare_llm_page_request_response_format_type.py index 3ab37a9..a846068 100644 --- a/src/gooey/types/synthesize_data_request_response_format_type.py +++ b/src/gooey/types/compare_llm_page_request_response_format_type.py @@ -2,4 +2,4 @@ import typing -SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] +CompareLlmPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py b/src/gooey/types/compare_llm_page_request_selected_models_item.py similarity index 95% rename from src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py rename to src/gooey/types/compare_llm_page_request_selected_models_item.py index 72d3fcd..d3564b6 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_selected_model.py +++ b/src/gooey/types/compare_llm_page_request_selected_models_item.py @@ -2,7 +2,7 @@ import typing -AsyncFormVideoBotsRequestSelectedModel = typing.Union[ +CompareLlmPageRequestSelectedModelsItem = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/types/compare_llm_page_status_response.py b/src/gooey/types/compare_llm_page_status_response.py index b7d0d23..35cc935 100644 --- a/src/gooey/types/compare_llm_page_status_response.py +++ b/src/gooey/types/compare_llm_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .compare_llm_page_output import CompareLlmPageOutput from .recipe_run_state import RecipeRunState +import typing +from .compare_llm_page_output import CompareLlmPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class CompareLlmPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/compare_text2img_page_output.py b/src/gooey/types/compare_text2img_page_output.py index 3aa4cc5..1673eea 100644 --- a/src/gooey/types/compare_text2img_page_output.py +++ b/src/gooey/types/compare_text2img_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class CompareText2ImgPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/qr_code_request_scheduler.py b/src/gooey/types/compare_text2img_page_request_scheduler.py similarity index 89% rename from src/gooey/types/qr_code_request_scheduler.py rename to src/gooey/types/compare_text2img_page_request_scheduler.py index 890b204..29ce840 100644 --- a/src/gooey/types/qr_code_request_scheduler.py +++ b/src/gooey/types/compare_text2img_page_request_scheduler.py @@ -2,7 +2,7 @@ import typing -QrCodeRequestScheduler = typing.Union[ +CompareText2ImgPageRequestScheduler = typing.Union[ typing.Literal[ "singlestep_dpm_solver", "multistep_dpm_solver", diff --git a/src/gooey/types/text_to_image_request_selected_models_item.py b/src/gooey/types/compare_text2img_page_request_selected_models_item.py similarity index 87% rename from src/gooey/types/text_to_image_request_selected_models_item.py rename to src/gooey/types/compare_text2img_page_request_selected_models_item.py index 06aef80..4154491 100644 --- a/src/gooey/types/text_to_image_request_selected_models_item.py +++ b/src/gooey/types/compare_text2img_page_request_selected_models_item.py @@ -2,7 +2,7 @@ import typing -TextToImageRequestSelectedModelsItem = typing.Union[ +CompareText2ImgPageRequestSelectedModelsItem = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/compare_text2img_page_status_response.py b/src/gooey/types/compare_text2img_page_status_response.py index 73c070b..15b89d8 100644 --- a/src/gooey/types/compare_text2img_page_status_response.py +++ b/src/gooey/types/compare_text2img_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .compare_text2img_page_output import CompareText2ImgPageOutput from .recipe_run_state import RecipeRunState +import typing +from .compare_text2img_page_output import CompareText2ImgPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class CompareText2ImgPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/compare_upscaler_page_output.py b/src/gooey/types/compare_upscaler_page_output.py index ada63f8..6c7fc31 100644 --- a/src/gooey/types/compare_upscaler_page_output.py +++ b/src/gooey/types/compare_upscaler_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class CompareUpscalerPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/upscale_request_selected_models_item.py b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py similarity index 74% rename from src/gooey/types/upscale_request_selected_models_item.py rename to src/gooey/types/compare_upscaler_page_request_selected_models_item.py index 1a8362e..eff4f6e 100644 --- a/src/gooey/types/upscale_request_selected_models_item.py +++ b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py @@ -2,6 +2,6 @@ import typing -UpscaleRequestSelectedModelsItem = typing.Union[ +CompareUpscalerPageRequestSelectedModelsItem = typing.Union[ typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any ] diff --git a/src/gooey/types/compare_upscaler_page_status_response.py b/src/gooey/types/compare_upscaler_page_status_response.py index 179e2c5..eb7da90 100644 --- a/src/gooey/types/compare_upscaler_page_status_response.py +++ b/src/gooey/types/compare_upscaler_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .compare_upscaler_page_output import CompareUpscalerPageOutput from .recipe_run_state import RecipeRunState +import typing +from .compare_upscaler_page_output import CompareUpscalerPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class CompareUpscalerPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/console_logs.py b/src/gooey/types/console_logs.py index c988354..f9dfb22 100644 --- a/src/gooey/types/console_logs.py +++ b/src/gooey/types/console_logs.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from .console_logs_level import ConsoleLogsLevel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .console_logs_level import ConsoleLogsLevel - class ConsoleLogs(UniversalBaseModel): level: ConsoleLogsLevel diff --git a/src/gooey/types/conversation_entry.py b/src/gooey/types/conversation_entry.py index 8980442..c33cbfa 100644 --- a/src/gooey/types/conversation_entry.py +++ b/src/gooey/types/conversation_entry.py @@ -1,13 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from .conversation_entry_role import ConversationEntryRole +from .conversation_entry_content import ConversationEntryContent import typing - +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .conversation_entry_content import ConversationEntryContent -from .conversation_entry_role import ConversationEntryRole - class ConversationEntry(UniversalBaseModel): role: ConversationEntryRole diff --git a/src/gooey/types/conversation_entry_content.py b/src/gooey/types/conversation_entry_content.py index 377c128..1a73a44 100644 --- a/src/gooey/types/conversation_entry_content.py +++ b/src/gooey/types/conversation_entry_content.py @@ -1,7 +1,6 @@ # This file was auto-generated by Fern from our API Definition. import typing - from .conversation_entry_content_item import ConversationEntryContentItem ConversationEntryContent = typing.Union[str, typing.List[ConversationEntryContentItem]] diff --git a/src/gooey/types/conversation_entry_content_item.py b/src/gooey/types/conversation_entry_content_item.py index 18cbdb8..46337b9 100644 --- a/src/gooey/types/conversation_entry_content_item.py +++ b/src/gooey/types/conversation_entry_content_item.py @@ -1,12 +1,10 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations - +from ..core.pydantic_utilities import UniversalBaseModel import typing - +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .image_url import ImageUrl diff --git a/src/gooey/types/conversation_start.py b/src/gooey/types/conversation_start.py index c0bb43f..47dd8dc 100644 --- a/src/gooey/types/conversation_start.py +++ b/src/gooey/types/conversation_start.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ConversationStart(UniversalBaseModel): diff --git a/src/gooey/types/create_stream_response.py b/src/gooey/types/create_stream_response.py index eefadbc..905b4c4 100644 --- a/src/gooey/types/create_stream_response.py +++ b/src/gooey/types/create_stream_response.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing class CreateStreamResponse(UniversalBaseModel): diff --git a/src/gooey/types/deforum_sd_page_output.py b/src/gooey/types/deforum_sd_page_output.py index ef74362..78c03c5 100644 --- a/src/gooey/types/deforum_sd_page_output.py +++ b/src/gooey/types/deforum_sd_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class DeforumSdPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/deforum_sd_page_request_selected_model.py b/src/gooey/types/deforum_sd_page_request_selected_model.py new file mode 100644 index 0000000..3af657a --- /dev/null +++ b/src/gooey/types/deforum_sd_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DeforumSdPageRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/deforum_sd_page_status_response.py b/src/gooey/types/deforum_sd_page_status_response.py index 9376f4f..362cb07 100644 --- a/src/gooey/types/deforum_sd_page_status_response.py +++ b/src/gooey/types/deforum_sd_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .deforum_sd_page_output import DeforumSdPageOutput from .recipe_run_state import RecipeRunState +import typing +from .deforum_sd_page_output import DeforumSdPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class DeforumSdPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/doc_extract_page_output.py b/src/gooey/types/doc_extract_page_output.py index afc0077..7fb92f7 100644 --- a/src/gooey/types/doc_extract_page_output.py +++ b/src/gooey/types/doc_extract_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class DocExtractPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/doc_extract_page_request_response_format_type.py b/src/gooey/types/doc_extract_page_request_response_format_type.py new file mode 100644 index 0000000..0ad7c14 --- /dev/null +++ b/src/gooey/types/doc_extract_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocExtractPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_selected_model.py b/src/gooey/types/doc_extract_page_request_selected_asr_model.py similarity index 89% rename from src/gooey/types/speech_recognition_request_selected_model.py rename to src/gooey/types/doc_extract_page_request_selected_asr_model.py index 9d2d28f..a358400 100644 --- a/src/gooey/types/speech_recognition_request_selected_model.py +++ b/src/gooey/types/doc_extract_page_request_selected_asr_model.py @@ -2,7 +2,7 @@ import typing -SpeechRecognitionRequestSelectedModel = typing.Union[ +DocExtractPageRequestSelectedAsrModel = typing.Union[ typing.Literal[ "whisper_large_v2", "whisper_large_v3", diff --git a/src/gooey/types/doc_extract_page_request_selected_model.py b/src/gooey/types/doc_extract_page_request_selected_model.py new file mode 100644 index 0000000..1872929 --- /dev/null +++ b/src/gooey/types/doc_extract_page_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocExtractPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_extract_page_status_response.py b/src/gooey/types/doc_extract_page_status_response.py index 409989f..1bb5cc9 100644 --- a/src/gooey/types/doc_extract_page_status_response.py +++ b/src/gooey/types/doc_extract_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .doc_extract_page_output import DocExtractPageOutput from .recipe_run_state import RecipeRunState +import typing +from .doc_extract_page_output import DocExtractPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class DocExtractPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/doc_search_page_output.py b/src/gooey/types/doc_search_page_output.py index a306fd1..9fee844 100644 --- a/src/gooey/types/doc_search_page_output.py +++ b/src/gooey/types/doc_search_page_output.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse from .search_reference import SearchReference +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class DocSearchPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/seo_people_also_ask_doc_request_citation_style.py b/src/gooey/types/doc_search_page_request_citation_style.py similarity index 89% rename from src/gooey/types/seo_people_also_ask_doc_request_citation_style.py rename to src/gooey/types/doc_search_page_request_citation_style.py index c5aaac3..b47b3be 100644 --- a/src/gooey/types/seo_people_also_ask_doc_request_citation_style.py +++ b/src/gooey/types/doc_search_page_request_citation_style.py @@ -2,7 +2,7 @@ import typing -SeoPeopleAlsoAskDocRequestCitationStyle = typing.Union[ +DocSearchPageRequestCitationStyle = typing.Union[ typing.Literal[ "number", "title", diff --git a/src/gooey/types/rag_request_embedding_model.py b/src/gooey/types/doc_search_page_request_embedding_model.py similarity index 87% rename from src/gooey/types/rag_request_embedding_model.py rename to src/gooey/types/doc_search_page_request_embedding_model.py index 0b9fb13..fb35612 100644 --- a/src/gooey/types/rag_request_embedding_model.py +++ b/src/gooey/types/doc_search_page_request_embedding_model.py @@ -2,7 +2,7 @@ import typing -RagRequestEmbeddingModel = typing.Union[ +DocSearchPageRequestEmbeddingModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/types/rag_request_keyword_query.py b/src/gooey/types/doc_search_page_request_keyword_query.py similarity index 52% rename from src/gooey/types/rag_request_keyword_query.py rename to src/gooey/types/doc_search_page_request_keyword_query.py index 894beca..8083b3d 100644 --- a/src/gooey/types/rag_request_keyword_query.py +++ b/src/gooey/types/doc_search_page_request_keyword_query.py @@ -2,4 +2,4 @@ import typing -RagRequestKeywordQuery = typing.Union[str, typing.List[str]] +DocSearchPageRequestKeywordQuery = typing.Union[str, typing.List[str]] diff --git a/src/gooey/types/doc_search_page_request_response_format_type.py b/src/gooey/types/doc_search_page_request_response_format_type.py new file mode 100644 index 0000000..856b641 --- /dev/null +++ b/src/gooey/types/doc_search_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSearchPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_search_page_request_selected_model.py b/src/gooey/types/doc_search_page_request_selected_model.py new file mode 100644 index 0000000..3b793b6 --- /dev/null +++ b/src/gooey/types/doc_search_page_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSearchPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_search_page_status_response.py b/src/gooey/types/doc_search_page_status_response.py index dcbb56a..5b20494 100644 --- a/src/gooey/types/doc_search_page_status_response.py +++ b/src/gooey/types/doc_search_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .doc_search_page_output import DocSearchPageOutput from .recipe_run_state import RecipeRunState +import typing +from .doc_search_page_output import DocSearchPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class DocSearchPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/doc_summary_page_output.py b/src/gooey/types/doc_summary_page_output.py index 9b006ce..4607fdf 100644 --- a/src/gooey/types/doc_summary_page_output.py +++ b/src/gooey/types/doc_summary_page_output.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse from .prompt_tree_node import PromptTreeNode +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class DocSummaryPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/doc_summary_page_request_response_format_type.py b/src/gooey/types/doc_summary_page_request_response_format_type.py new file mode 100644 index 0000000..318ad7f --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_page_request_selected_asr_model.py b/src/gooey/types/doc_summary_page_request_selected_asr_model.py new file mode 100644 index 0000000..c04cc7a --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryPageRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_summary_page_request_selected_model.py b/src/gooey/types/doc_summary_page_request_selected_model.py new file mode 100644 index 0000000..6da70f6 --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_summary_page_status_response.py b/src/gooey/types/doc_summary_page_status_response.py index 7899397..3d367a9 100644 --- a/src/gooey/types/doc_summary_page_status_response.py +++ b/src/gooey/types/doc_summary_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .doc_summary_page_output import DocSummaryPageOutput from .recipe_run_state import RecipeRunState +import typing +from .doc_summary_page_output import DocSummaryPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class DocSummaryPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py deleted file mode 100644 index 8fabf9b..0000000 --- a/src/gooey/types/doc_summary_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/email_face_inpainting_page_output.py b/src/gooey/types/email_face_inpainting_page_output.py index 1f18737..e604c43 100644 --- a/src/gooey/types/email_face_inpainting_page_output.py +++ b/src/gooey/types/email_face_inpainting_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class EmailFaceInpaintingPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/email_face_inpainting_page_request_selected_model.py b/src/gooey/types/email_face_inpainting_page_request_selected_model.py new file mode 100644 index 0000000..822b5a6 --- /dev/null +++ b/src/gooey/types/email_face_inpainting_page_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EmailFaceInpaintingPageRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/types/email_face_inpainting_page_status_response.py b/src/gooey/types/email_face_inpainting_page_status_response.py index 97e62e0..8e73499 100644 --- a/src/gooey/types/email_face_inpainting_page_status_response.py +++ b/src/gooey/types/email_face_inpainting_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput from .recipe_run_state import RecipeRunState +import typing +from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class EmailFaceInpaintingPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/embeddings_page_output.py b/src/gooey/types/embeddings_page_output.py index 1618bf3..46b1282 100644 --- a/src/gooey/types/embeddings_page_output.py +++ b/src/gooey/types/embeddings_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class EmbeddingsPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/seo_people_also_ask_request_embedding_model.py b/src/gooey/types/embeddings_page_request_selected_model.py similarity index 86% rename from src/gooey/types/seo_people_also_ask_request_embedding_model.py rename to src/gooey/types/embeddings_page_request_selected_model.py index 9ab6037..a03ecc8 100644 --- a/src/gooey/types/seo_people_also_ask_request_embedding_model.py +++ b/src/gooey/types/embeddings_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -SeoPeopleAlsoAskRequestEmbeddingModel = typing.Union[ +EmbeddingsPageRequestSelectedModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/types/embeddings_page_status_response.py b/src/gooey/types/embeddings_page_status_response.py index ff72003..f399978 100644 --- a/src/gooey/types/embeddings_page_status_response.py +++ b/src/gooey/types/embeddings_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .embeddings_page_output import EmbeddingsPageOutput from .recipe_run_state import RecipeRunState +import typing +from .embeddings_page_output import EmbeddingsPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class EmbeddingsPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/eval_prompt.py b/src/gooey/types/eval_prompt.py index af92511..5a7e09f 100644 --- a/src/gooey/types/eval_prompt.py +++ b/src/gooey/types/eval_prompt.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class EvalPrompt(UniversalBaseModel): name: str diff --git a/src/gooey/types/face_inpainting_page_output.py b/src/gooey/types/face_inpainting_page_output.py index 122d045..46f3576 100644 --- a/src/gooey/types/face_inpainting_page_output.py +++ b/src/gooey/types/face_inpainting_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class FaceInpaintingPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/image_from_email_request_selected_model.py b/src/gooey/types/face_inpainting_page_request_selected_model.py similarity index 74% rename from src/gooey/types/image_from_email_request_selected_model.py rename to src/gooey/types/face_inpainting_page_request_selected_model.py index ba5bb3f..9b8eab6 100644 --- a/src/gooey/types/image_from_email_request_selected_model.py +++ b/src/gooey/types/face_inpainting_page_request_selected_model.py @@ -2,6 +2,6 @@ import typing -ImageFromEmailRequestSelectedModel = typing.Union[ +FaceInpaintingPageRequestSelectedModel = typing.Union[ typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any ] diff --git a/src/gooey/types/face_inpainting_page_status_response.py b/src/gooey/types/face_inpainting_page_status_response.py index 42c0b2e..5b06e62 100644 --- a/src/gooey/types/face_inpainting_page_status_response.py +++ b/src/gooey/types/face_inpainting_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .face_inpainting_page_output import FaceInpaintingPageOutput from .recipe_run_state import RecipeRunState +import typing +from .face_inpainting_page_output import FaceInpaintingPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class FaceInpaintingPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/failed_reponse_model_v2.py b/src/gooey/types/failed_reponse_model_v2.py deleted file mode 100644 index 918acbe..0000000 --- a/src/gooey/types/failed_reponse_model_v2.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .failed_response_detail import FailedResponseDetail - - -class FailedReponseModelV2(UniversalBaseModel): - detail: FailedResponseDetail - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/failed_response_detail.py b/src/gooey/types/failed_response_detail.py deleted file mode 100644 index 1b3b3cc..0000000 --- a/src/gooey/types/failed_response_detail.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - - -class FailedResponseDetail(UniversalBaseModel): - id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique ID for this run - """ - - url: typing.Optional[str] = pydantic.Field(default=None) - """ - Web URL for this run - """ - - created_at: typing.Optional[str] = pydantic.Field(default=None) - """ - Time when the run was created as ISO format - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the run failed - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/gooey/types/final_response.py b/src/gooey/types/final_response.py index 3987c27..0493f7f 100644 --- a/src/gooey/types/final_response.py +++ b/src/gooey/types/final_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .response_model import ResponseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class FinalResponse(UniversalBaseModel): diff --git a/src/gooey/types/functions_page_output.py b/src/gooey/types/functions_page_output.py index c0be0ef..9ba6ac7 100644 --- a/src/gooey/types/functions_page_output.py +++ b/src/gooey/types/functions_page_output.py @@ -1,16 +1,15 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse from .console_logs import ConsoleLogs +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class FunctionsPageOutput(UniversalBaseModel): - return_value: typing.Optional[typing.Any] = None + return_value: typing.Optional[typing.Optional[typing.Any]] = None error: typing.Optional[str] = pydantic.Field(default=None) """ JS Error from the code. If there are no errors, this will be null diff --git a/src/gooey/types/functions_page_status_response.py b/src/gooey/types/functions_page_status_response.py index 0f5d4c2..597c7ea 100644 --- a/src/gooey/types/functions_page_status_response.py +++ b/src/gooey/types/functions_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .functions_page_output import FunctionsPageOutput from .recipe_run_state import RecipeRunState +import typing +from .functions_page_output import FunctionsPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class FunctionsPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/generic_error_response.py b/src/gooey/types/generic_error_response.py index 200bf74..fced9c2 100644 --- a/src/gooey/types/generic_error_response.py +++ b/src/gooey/types/generic_error_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from .generic_error_response_detail import GenericErrorResponseDetail +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .generic_error_response_detail import GenericErrorResponseDetail - class GenericErrorResponse(UniversalBaseModel): detail: GenericErrorResponseDetail diff --git a/src/gooey/types/generic_error_response_detail.py b/src/gooey/types/generic_error_response_detail.py index 30d57c2..45205b9 100644 --- a/src/gooey/types/generic_error_response_detail.py +++ b/src/gooey/types/generic_error_response_detail.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class GenericErrorResponseDetail(UniversalBaseModel): error: str diff --git a/src/gooey/types/google_gpt_page_output.py b/src/gooey/types/google_gpt_page_output.py index 926ca84..40bec31 100644 --- a/src/gooey/types/google_gpt_page_output.py +++ b/src/gooey/types/google_gpt_page_output.py @@ -1,17 +1,16 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse from .search_reference import SearchReference +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class GoogleGptPageOutput(UniversalBaseModel): output_text: typing.List[str] - serp_results: typing.Dict[str, typing.Any] + serp_results: typing.Dict[str, typing.Optional[typing.Any]] references: typing.List[SearchReference] final_prompt: str final_search_query: typing.Optional[str] = None diff --git a/src/gooey/types/google_gpt_page_request_embedding_model.py b/src/gooey/types/google_gpt_page_request_embedding_model.py new file mode 100644 index 0000000..66f060f --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleGptPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/google_gpt_page_request_response_format_type.py b/src/gooey/types/google_gpt_page_request_response_format_type.py new file mode 100644 index 0000000..dd04dec --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/google_gpt_page_request_selected_model.py b/src/gooey/types/google_gpt_page_request_selected_model.py new file mode 100644 index 0000000..719ae61 --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleGptPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/google_gpt_page_status_response.py b/src/gooey/types/google_gpt_page_status_response.py index 43ea5a7..b3bf200 100644 --- a/src/gooey/types/google_gpt_page_status_response.py +++ b/src/gooey/types/google_gpt_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .google_gpt_page_output import GoogleGptPageOutput from .recipe_run_state import RecipeRunState +import typing +from .google_gpt_page_output import GoogleGptPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class GoogleGptPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/google_image_gen_page_output.py b/src/gooey/types/google_image_gen_page_output.py index 29ee791..b0dee00 100644 --- a/src/gooey/types/google_image_gen_page_output.py +++ b/src/gooey/types/google_image_gen_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class GoogleImageGenPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/image_from_web_search_request_selected_model.py b/src/gooey/types/google_image_gen_page_request_selected_model.py similarity index 88% rename from src/gooey/types/image_from_web_search_request_selected_model.py rename to src/gooey/types/google_image_gen_page_request_selected_model.py index f4d498f..c872962 100644 --- a/src/gooey/types/image_from_web_search_request_selected_model.py +++ b/src/gooey/types/google_image_gen_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -ImageFromWebSearchRequestSelectedModel = typing.Union[ +GoogleImageGenPageRequestSelectedModel = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/google_image_gen_page_status_response.py b/src/gooey/types/google_image_gen_page_status_response.py index 9aac44f..189f01f 100644 --- a/src/gooey/types/google_image_gen_page_status_response.py +++ b/src/gooey/types/google_image_gen_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .google_image_gen_page_output import GoogleImageGenPageOutput from .recipe_run_state import RecipeRunState +import typing +from .google_image_gen_page_output import GoogleImageGenPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class GoogleImageGenPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/http_validation_error.py b/src/gooey/types/http_validation_error.py index 970dbf8..f52507f 100644 --- a/src/gooey/types/http_validation_error.py +++ b/src/gooey/types/http_validation_error.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .validation_error import ValidationError +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class HttpValidationError(UniversalBaseModel): diff --git a/src/gooey/types/image_segmentation_page_output.py b/src/gooey/types/image_segmentation_page_output.py index 2915320..2fa2226 100644 --- a/src/gooey/types/image_segmentation_page_output.py +++ b/src/gooey/types/image_segmentation_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class ImageSegmentationPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/image_segmentation_page_request_selected_model.py b/src/gooey/types/image_segmentation_page_request_selected_model.py new file mode 100644 index 0000000..9b4b8d7 --- /dev/null +++ b/src/gooey/types/image_segmentation_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ImageSegmentationPageRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/image_segmentation_page_status_response.py b/src/gooey/types/image_segmentation_page_status_response.py index 6ea0ca9..47a8144 100644 --- a/src/gooey/types/image_segmentation_page_status_response.py +++ b/src/gooey/types/image_segmentation_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .image_segmentation_page_output import ImageSegmentationPageOutput from .recipe_run_state import RecipeRunState +import typing +from .image_segmentation_page_output import ImageSegmentationPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ImageSegmentationPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/image_url.py b/src/gooey/types/image_url.py index 0cd421c..946cc54 100644 --- a/src/gooey/types/image_url.py +++ b/src/gooey/types/image_url.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .image_url_detail import ImageUrlDetail +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class ImageUrl(UniversalBaseModel): diff --git a/src/gooey/types/img2img_page_output.py b/src/gooey/types/img2img_page_output.py index 987728c..b1e0ee4 100644 --- a/src/gooey/types/img2img_page_output.py +++ b/src/gooey/types/img2img_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class Img2ImgPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model.py b/src/gooey/types/img2img_page_request_selected_controlnet_model.py similarity index 71% rename from src/gooey/types/remix_image_request_selected_controlnet_model.py rename to src/gooey/types/img2img_page_request_selected_controlnet_model.py index 1b60b48..df9cb36 100644 --- a/src/gooey/types/remix_image_request_selected_controlnet_model.py +++ b/src/gooey/types/img2img_page_request_selected_controlnet_model.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. import typing +from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem -from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem - -RemixImageRequestSelectedControlnetModel = typing.Union[ - typing.List[RemixImageRequestSelectedControlnetModelItem], +Img2ImgPageRequestSelectedControlnetModel = typing.Union[ + typing.List[Img2ImgPageRequestSelectedControlnetModelItem], typing.Literal["sd_controlnet_canny"], typing.Literal["sd_controlnet_depth"], typing.Literal["sd_controlnet_hed"], diff --git a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py similarity index 88% rename from src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py rename to src/gooey/types/img2img_page_request_selected_controlnet_model_item.py index 3be2ab6..1569cf5 100644 --- a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py +++ b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py @@ -2,7 +2,7 @@ import typing -QrCodeRequestImagePromptControlnetModelsItem = typing.Union[ +Img2ImgPageRequestSelectedControlnetModelItem = typing.Union[ typing.Literal[ "sd_controlnet_canny", "sd_controlnet_depth", diff --git a/src/gooey/types/remix_image_request_selected_model.py b/src/gooey/types/img2img_page_request_selected_model.py similarity index 89% rename from src/gooey/types/remix_image_request_selected_model.py rename to src/gooey/types/img2img_page_request_selected_model.py index 245d6b0..506c2b1 100644 --- a/src/gooey/types/remix_image_request_selected_model.py +++ b/src/gooey/types/img2img_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -RemixImageRequestSelectedModel = typing.Union[ +Img2ImgPageRequestSelectedModel = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/img2img_page_status_response.py b/src/gooey/types/img2img_page_status_response.py index 811697d..eecfae9 100644 --- a/src/gooey/types/img2img_page_status_response.py +++ b/src/gooey/types/img2img_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .img2img_page_output import Img2ImgPageOutput from .recipe_run_state import RecipeRunState +import typing +from .img2img_page_output import Img2ImgPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class Img2ImgPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/letter_writer_page_output.py b/src/gooey/types/letter_writer_page_output.py index de7d93a..83be763 100644 --- a/src/gooey/types/letter_writer_page_output.py +++ b/src/gooey/types/letter_writer_page_output.py @@ -1,16 +1,15 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class LetterWriterPageOutput(UniversalBaseModel): output_letters: typing.List[str] - response_json: typing.Optional[typing.Any] = None + response_json: typing.Optional[typing.Optional[typing.Any]] = None generated_input_prompt: str final_prompt: str called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None diff --git a/src/gooey/types/letter_writer_page_request.py b/src/gooey/types/letter_writer_page_request.py index 8566afe..5706083 100644 --- a/src/gooey/types/letter_writer_page_request.py +++ b/src/gooey/types/letter_writer_page_request.py @@ -1,18 +1,17 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_function import RecipeFunction -from .run_settings import RunSettings +import pydantic from .training_data_model import TrainingDataModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class LetterWriterPageRequest(UniversalBaseModel): functions: typing.Optional[typing.List[RecipeFunction]] = None - variables: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) """ Variables to be used as Jinja prompt templates and in functions as arguments """ diff --git a/src/gooey/types/letter_writer_page_status_response.py b/src/gooey/types/letter_writer_page_status_response.py index 63e8505..1917067 100644 --- a/src/gooey/types/letter_writer_page_status_response.py +++ b/src/gooey/types/letter_writer_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .letter_writer_page_output import LetterWriterPageOutput from .recipe_run_state import RecipeRunState +import typing +from .letter_writer_page_output import LetterWriterPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class LetterWriterPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/lipsync_page_output.py b/src/gooey/types/lipsync_page_output.py index 70469b4..e3b08c3 100644 --- a/src/gooey/types/lipsync_page_output.py +++ b/src/gooey/types/lipsync_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class LipsyncPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/lipsync_page_status_response.py b/src/gooey/types/lipsync_page_status_response.py index 7060e62..1ec928e 100644 --- a/src/gooey/types/lipsync_page_status_response.py +++ b/src/gooey/types/lipsync_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .lipsync_page_output import LipsyncPageOutput from .recipe_run_state import RecipeRunState +import typing +from .lipsync_page_output import LipsyncPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class LipsyncPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/lipsync_tts_page_output.py b/src/gooey/types/lipsync_tts_page_output.py index 268b57f..e687416 100644 --- a/src/gooey/types/lipsync_tts_page_output.py +++ b/src/gooey/types/lipsync_tts_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class LipsyncTtsPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py new file mode 100644 index 0000000..453ab4a --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py similarity index 74% rename from src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py rename to src/gooey/types/lipsync_tts_page_request_openai_voice_name.py index 59f2cc3..4873924 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_openai_voice_name.py +++ b/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py @@ -2,6 +2,6 @@ import typing -AsyncFormVideoBotsRequestOpenaiVoiceName = typing.Union[ +LipsyncTtsPageRequestOpenaiVoiceName = typing.Union[ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any ] diff --git a/src/gooey/types/lipsync_tts_page_request_selected_model.py b/src/gooey/types/lipsync_tts_page_request_selected_model.py new file mode 100644 index 0000000..538058b --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py b/src/gooey/types/lipsync_tts_page_request_tts_provider.py similarity index 77% rename from src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py rename to src/gooey/types/lipsync_tts_page_request_tts_provider.py index 4142fc5..7e73fda 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_tts_provider.py +++ b/src/gooey/types/lipsync_tts_page_request_tts_provider.py @@ -2,6 +2,6 @@ import typing -AsyncFormVideoBotsRequestTtsProvider = typing.Union[ +LipsyncTtsPageRequestTtsProvider = typing.Union[ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any ] diff --git a/src/gooey/types/lipsync_tts_page_status_response.py b/src/gooey/types/lipsync_tts_page_status_response.py index 6cb65b3..6502e07 100644 --- a/src/gooey/types/lipsync_tts_page_status_response.py +++ b/src/gooey/types/lipsync_tts_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .lipsync_tts_page_output import LipsyncTtsPageOutput from .recipe_run_state import RecipeRunState +import typing +from .lipsync_tts_page_output import LipsyncTtsPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class LipsyncTtsPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/lipsync_tts_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_request_openai_tts_model.py deleted file mode 100644 index 510dcfb..0000000 --- a/src/gooey/types/lipsync_tts_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py deleted file mode 100644 index 9ece5a9..0000000 --- a/src/gooey/types/lipsync_tts_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/llm_request_response_format_type.py b/src/gooey/types/llm_request_response_format_type.py deleted file mode 100644 index aa0e5e2..0000000 --- a/src/gooey/types/llm_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LlmRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/message_part.py b/src/gooey/types/message_part.py index 03c4ab3..13b4a10 100644 --- a/src/gooey/types/message_part.py +++ b/src/gooey/types/message_part.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.pydantic_utilities import UniversalBaseModel from .recipe_run_state import RecipeRunState +import pydantic +import typing from .reply_button import ReplyButton +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class MessagePart(UniversalBaseModel): diff --git a/src/gooey/types/object_inpainting_page_output.py b/src/gooey/types/object_inpainting_page_output.py index ca27c77..e7ba200 100644 --- a/src/gooey/types/object_inpainting_page_output.py +++ b/src/gooey/types/object_inpainting_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class ObjectInpaintingPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/object_inpainting_page_request_selected_model.py b/src/gooey/types/object_inpainting_page_request_selected_model.py new file mode 100644 index 0000000..92f1302 --- /dev/null +++ b/src/gooey/types/object_inpainting_page_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ObjectInpaintingPageRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/types/object_inpainting_page_status_response.py b/src/gooey/types/object_inpainting_page_status_response.py index 96da6e3..1df53cc 100644 --- a/src/gooey/types/object_inpainting_page_status_response.py +++ b/src/gooey/types/object_inpainting_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .object_inpainting_page_output import ObjectInpaintingPageOutput from .recipe_run_state import RecipeRunState +import typing +from .object_inpainting_page_output import ObjectInpaintingPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ObjectInpaintingPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/personalize_email_request_response_format_type.py b/src/gooey/types/personalize_email_request_response_format_type.py deleted file mode 100644 index 1bedf2e..0000000 --- a/src/gooey/types/personalize_email_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PersonalizeEmailRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/personalize_email_request_selected_model.py b/src/gooey/types/personalize_email_request_selected_model.py deleted file mode 100644 index 3a01b07..0000000 --- a/src/gooey/types/personalize_email_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PersonalizeEmailRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py deleted file mode 100644 index 6c4a5ce..0000000 --- a/src/gooey/types/portrait_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item.py b/src/gooey/types/post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item.py new file mode 100644 index 0000000..6a287e6 --- /dev/null +++ b/src/gooey/types/post_v3art_qr_code_async_form_request_image_prompt_controlnet_models_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3ArtQrCodeAsyncFormRequestImagePromptControlnetModelsItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_scheduler.py b/src/gooey/types/post_v3art_qr_code_async_form_request_scheduler.py new file mode 100644 index 0000000..fb1ad97 --- /dev/null +++ b/src/gooey/types/post_v3art_qr_code_async_form_request_scheduler.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3ArtQrCodeAsyncFormRequestScheduler = typing.Union[ + typing.Literal[ + "singlestep_dpm_solver", + "multistep_dpm_solver", + "dpm_sde", + "dpm_discrete", + "dpm_discrete_ancestral", + "unipc", + "lms_discrete", + "heun", + "euler", + "euler_ancestral", + "pndm", + "ddpm", + "ddim", + "deis", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_selected_controlnet_model_item.py b/src/gooey/types/post_v3art_qr_code_async_form_request_selected_controlnet_model_item.py new file mode 100644 index 0000000..b36bff7 --- /dev/null +++ b/src/gooey/types/post_v3art_qr_code_async_form_request_selected_controlnet_model_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3ArtQrCodeAsyncFormRequestSelectedControlnetModelItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3art_qr_code_async_form_request_selected_model.py b/src/gooey/types/post_v3art_qr_code_async_form_request_selected_model.py new file mode 100644 index 0000000..5334908 --- /dev/null +++ b/src/gooey/types/post_v3art_qr_code_async_form_request_selected_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3ArtQrCodeAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "dall_e_3", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + "deepfloyd_if", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3asr_async_form_request_output_format.py b/src/gooey/types/post_v3asr_async_form_request_output_format.py new file mode 100644 index 0000000..dad1d11 --- /dev/null +++ b/src/gooey/types/post_v3asr_async_form_request_output_format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3AsrAsyncFormRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/post_v3asr_async_form_request_selected_model.py b/src/gooey/types/post_v3asr_async_form_request_selected_model.py new file mode 100644 index 0000000..270207d --- /dev/null +++ b/src/gooey/types/post_v3asr_async_form_request_selected_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3AsrAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py b/src/gooey/types/post_v3asr_async_form_request_translation_model.py similarity index 65% rename from src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py rename to src/gooey/types/post_v3asr_async_form_request_translation_model.py index 38d5296..2a42d5c 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_translation_model.py +++ b/src/gooey/types/post_v3asr_async_form_request_translation_model.py @@ -2,4 +2,4 @@ import typing -AsyncFormVideoBotsRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] +PostV3AsrAsyncFormRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/post_v3bulk_eval_async_form_request_response_format_type.py b/src/gooey/types/post_v3bulk_eval_async_form_request_response_format_type.py new file mode 100644 index 0000000..05fa75a --- /dev/null +++ b/src/gooey/types/post_v3bulk_eval_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3BulkEvalAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/smart_gpt/types/async_form_smart_gpt_request_selected_model.py b/src/gooey/types/post_v3bulk_eval_async_form_request_selected_model.py similarity index 95% rename from src/gooey/smart_gpt/types/async_form_smart_gpt_request_selected_model.py rename to src/gooey/types/post_v3bulk_eval_async_form_request_selected_model.py index 864c97b..d70df69 100644 --- a/src/gooey/smart_gpt/types/async_form_smart_gpt_request_selected_model.py +++ b/src/gooey/types/post_v3bulk_eval_async_form_request_selected_model.py @@ -2,7 +2,7 @@ import typing -AsyncFormSmartGptRequestSelectedModel = typing.Union[ +PostV3BulkEvalAsyncFormRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/types/post_v3compare_ai_upscalers_async_form_request_selected_models_item.py b/src/gooey/types/post_v3compare_ai_upscalers_async_form_request_selected_models_item.py new file mode 100644 index 0000000..7d53c0d --- /dev/null +++ b/src/gooey/types/post_v3compare_ai_upscalers_async_form_request_selected_models_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3CompareAiUpscalersAsyncFormRequestSelectedModelsItem = typing.Union[ + typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any +] diff --git a/src/gooey/types/post_v3compare_llm_async_form_request_response_format_type.py b/src/gooey/types/post_v3compare_llm_async_form_request_response_format_type.py new file mode 100644 index 0000000..b7f1372 --- /dev/null +++ b/src/gooey/types/post_v3compare_llm_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3CompareLlmAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3compare_llm_async_form_request_selected_models_item.py b/src/gooey/types/post_v3compare_llm_async_form_request_selected_models_item.py new file mode 100644 index 0000000..58f240c --- /dev/null +++ b/src/gooey/types/post_v3compare_llm_async_form_request_selected_models_item.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3CompareLlmAsyncFormRequestSelectedModelsItem = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3compare_text2img_async_form_request_scheduler.py b/src/gooey/types/post_v3compare_text2img_async_form_request_scheduler.py new file mode 100644 index 0000000..ea82032 --- /dev/null +++ b/src/gooey/types/post_v3compare_text2img_async_form_request_scheduler.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3CompareText2ImgAsyncFormRequestScheduler = typing.Union[ + typing.Literal[ + "singlestep_dpm_solver", + "multistep_dpm_solver", + "dpm_sde", + "dpm_discrete", + "dpm_discrete_ancestral", + "unipc", + "lms_discrete", + "heun", + "euler", + "euler_ancestral", + "pndm", + "ddpm", + "ddim", + "deis", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3compare_text2img_async_form_request_selected_models_item.py b/src/gooey/types/post_v3compare_text2img_async_form_request_selected_models_item.py new file mode 100644 index 0000000..abe78ed --- /dev/null +++ b/src/gooey/types/post_v3compare_text2img_async_form_request_selected_models_item.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3CompareText2ImgAsyncFormRequestSelectedModelsItem = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "dall_e_3", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + "deepfloyd_if", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3deforum_sd_async_form_request_selected_model.py b/src/gooey/types/post_v3deforum_sd_async_form_request_selected_model.py new file mode 100644 index 0000000..8561b6f --- /dev/null +++ b/src/gooey/types/post_v3deforum_sd_async_form_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DeforumSdAsyncFormRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/post_v3doc_extract_async_form_request_response_format_type.py b/src/gooey/types/post_v3doc_extract_async_form_request_response_format_type.py new file mode 100644 index 0000000..759e46c --- /dev/null +++ b/src/gooey/types/post_v3doc_extract_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocExtractAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3doc_extract_async_form_request_selected_asr_model.py b/src/gooey/types/post_v3doc_extract_async_form_request_selected_asr_model.py new file mode 100644 index 0000000..7ac96ae --- /dev/null +++ b/src/gooey/types/post_v3doc_extract_async_form_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocExtractAsyncFormRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3doc_extract_async_form_request_selected_model.py b/src/gooey/types/post_v3doc_extract_async_form_request_selected_model.py new file mode 100644 index 0000000..7d72e68 --- /dev/null +++ b/src/gooey/types/post_v3doc_extract_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocExtractAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_citation_style.py b/src/gooey/types/post_v3doc_search_async_form_request_citation_style.py new file mode 100644 index 0000000..cf1bb3c --- /dev/null +++ b/src/gooey/types/post_v3doc_search_async_form_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocSearchAsyncFormRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_embedding_model.py b/src/gooey/types/post_v3doc_search_async_form_request_embedding_model.py similarity index 85% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_embedding_model.py rename to src/gooey/types/post_v3doc_search_async_form_request_embedding_model.py index 56f2399..642358a 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_embedding_model.py +++ b/src/gooey/types/post_v3doc_search_async_form_request_embedding_model.py @@ -2,7 +2,7 @@ import typing -VideoBotsStreamCreateRequestEmbeddingModel = typing.Union[ +PostV3DocSearchAsyncFormRequestEmbeddingModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/types/post_v3doc_search_async_form_request_keyword_query.py b/src/gooey/types/post_v3doc_search_async_form_request_keyword_query.py new file mode 100644 index 0000000..47e1ead --- /dev/null +++ b/src/gooey/types/post_v3doc_search_async_form_request_keyword_query.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocSearchAsyncFormRequestKeywordQuery = typing.Union[str, typing.List[str]] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_response_format_type.py b/src/gooey/types/post_v3doc_search_async_form_request_response_format_type.py new file mode 100644 index 0000000..852b2c4 --- /dev/null +++ b/src/gooey/types/post_v3doc_search_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocSearchAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3doc_search_async_form_request_selected_model.py b/src/gooey/types/post_v3doc_search_async_form_request_selected_model.py new file mode 100644 index 0000000..dff6941 --- /dev/null +++ b/src/gooey/types/post_v3doc_search_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocSearchAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3doc_summary_async_form_request_response_format_type.py b/src/gooey/types/post_v3doc_summary_async_form_request_response_format_type.py new file mode 100644 index 0000000..6d4d724 --- /dev/null +++ b/src/gooey/types/post_v3doc_summary_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocSummaryAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3doc_summary_async_form_request_selected_asr_model.py b/src/gooey/types/post_v3doc_summary_async_form_request_selected_asr_model.py new file mode 100644 index 0000000..a696f62 --- /dev/null +++ b/src/gooey/types/post_v3doc_summary_async_form_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocSummaryAsyncFormRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3doc_summary_async_form_request_selected_model.py b/src/gooey/types/post_v3doc_summary_async_form_request_selected_model.py new file mode 100644 index 0000000..599fac8 --- /dev/null +++ b/src/gooey/types/post_v3doc_summary_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3DocSummaryAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3email_face_inpainting_async_form_request_selected_model.py b/src/gooey/types/post_v3email_face_inpainting_async_form_request_selected_model.py new file mode 100644 index 0000000..1ae0620 --- /dev/null +++ b/src/gooey/types/post_v3email_face_inpainting_async_form_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3EmailFaceInpaintingAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py b/src/gooey/types/post_v3embeddings_async_form_request_selected_model.py similarity index 85% rename from src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py rename to src/gooey/types/post_v3embeddings_async_form_request_selected_model.py index f66aed4..c5bdb16 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_form_video_bots_request_embedding_model.py +++ b/src/gooey/types/post_v3embeddings_async_form_request_selected_model.py @@ -2,7 +2,7 @@ import typing -AsyncFormVideoBotsRequestEmbeddingModel = typing.Union[ +PostV3EmbeddingsAsyncFormRequestSelectedModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/types/post_v3face_inpainting_async_form_request_selected_model.py b/src/gooey/types/post_v3face_inpainting_async_form_request_selected_model.py new file mode 100644 index 0000000..2824ac1 --- /dev/null +++ b/src/gooey/types/post_v3face_inpainting_async_form_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3FaceInpaintingAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/types/post_v3google_gpt_async_form_request_embedding_model.py b/src/gooey/types/post_v3google_gpt_async_form_request_embedding_model.py new file mode 100644 index 0000000..f6a3714 --- /dev/null +++ b/src/gooey/types/post_v3google_gpt_async_form_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3GoogleGptAsyncFormRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3google_gpt_async_form_request_response_format_type.py b/src/gooey/types/post_v3google_gpt_async_form_request_response_format_type.py new file mode 100644 index 0000000..b9e609e --- /dev/null +++ b/src/gooey/types/post_v3google_gpt_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3GoogleGptAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3google_gpt_async_form_request_selected_model.py b/src/gooey/types/post_v3google_gpt_async_form_request_selected_model.py new file mode 100644 index 0000000..da55a8d --- /dev/null +++ b/src/gooey/types/post_v3google_gpt_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3GoogleGptAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3google_image_gen_async_form_request_selected_model.py b/src/gooey/types/post_v3google_image_gen_async_form_request_selected_model.py new file mode 100644 index 0000000..4cc02ae --- /dev/null +++ b/src/gooey/types/post_v3google_image_gen_async_form_request_selected_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3GoogleImageGenAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "instruct_pix2pix", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3image_segmentation_async_form_request_selected_model.py b/src/gooey/types/post_v3image_segmentation_async_form_request_selected_model.py new file mode 100644 index 0000000..2ce98fe --- /dev/null +++ b/src/gooey/types/post_v3image_segmentation_async_form_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3ImageSegmentationAsyncFormRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model.py b/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model.py new file mode 100644 index 0000000..8605c47 --- /dev/null +++ b/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .post_v3img2img_async_form_request_selected_controlnet_model_item import ( + PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem, +) + +PostV3Img2ImgAsyncFormRequestSelectedControlnetModel = typing.Union[ + typing.List[PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem], + typing.Literal["sd_controlnet_canny"], + typing.Literal["sd_controlnet_depth"], + typing.Literal["sd_controlnet_hed"], + typing.Literal["sd_controlnet_mlsd"], + typing.Literal["sd_controlnet_normal"], + typing.Literal["sd_controlnet_openpose"], + typing.Literal["sd_controlnet_scribble"], + typing.Literal["sd_controlnet_seg"], + typing.Literal["sd_controlnet_tile"], + typing.Literal["sd_controlnet_brightness"], + typing.Literal["control_v1p_sd15_qrcode_monster_v2"], +] diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py b/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model_item.py similarity index 86% rename from src/gooey/types/remix_image_request_selected_controlnet_model_item.py rename to src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model_item.py index b4f3ff0..e56303b 100644 --- a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py +++ b/src/gooey/types/post_v3img2img_async_form_request_selected_controlnet_model_item.py @@ -2,7 +2,7 @@ import typing -RemixImageRequestSelectedControlnetModelItem = typing.Union[ +PostV3Img2ImgAsyncFormRequestSelectedControlnetModelItem = typing.Union[ typing.Literal[ "sd_controlnet_canny", "sd_controlnet_depth", diff --git a/src/gooey/types/post_v3img2img_async_form_request_selected_model.py b/src/gooey/types/post_v3img2img_async_form_request_selected_model.py new file mode 100644 index 0000000..6218c7b --- /dev/null +++ b/src/gooey/types/post_v3img2img_async_form_request_selected_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3Img2ImgAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "instruct_pix2pix", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3lipsync_async_form_request_selected_model.py b/src/gooey/types/post_v3lipsync_async_form_request_selected_model.py new file mode 100644 index 0000000..19c0255 --- /dev/null +++ b/src/gooey/types/post_v3lipsync_async_form_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3LipsyncAsyncFormRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_tts_model.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_tts_model.py new file mode 100644 index 0000000..a0eff55 --- /dev/null +++ b/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3LipsyncTtsAsyncFormRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_voice_name.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_voice_name.py new file mode 100644 index 0000000..2bbbd54 --- /dev/null +++ b/src/gooey/types/post_v3lipsync_tts_async_form_request_openai_voice_name.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3LipsyncTtsAsyncFormRequestOpenaiVoiceName = typing.Union[ + typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any +] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_selected_model.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_selected_model.py new file mode 100644 index 0000000..bcfe20e --- /dev/null +++ b/src/gooey/types/post_v3lipsync_tts_async_form_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3LipsyncTtsAsyncFormRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/post_v3lipsync_tts_async_form_request_tts_provider.py b/src/gooey/types/post_v3lipsync_tts_async_form_request_tts_provider.py new file mode 100644 index 0000000..25be098 --- /dev/null +++ b/src/gooey/types/post_v3lipsync_tts_async_form_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3LipsyncTtsAsyncFormRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/types/post_v3object_inpainting_async_form_request_selected_model.py b/src/gooey/types/post_v3object_inpainting_async_form_request_selected_model.py new file mode 100644 index 0000000..8dec227 --- /dev/null +++ b/src/gooey/types/post_v3object_inpainting_async_form_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3ObjectInpaintingAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/types/post_v3related_qna_maker_async_form_request_embedding_model.py b/src/gooey/types/post_v3related_qna_maker_async_form_request_embedding_model.py new file mode 100644 index 0000000..9390765 --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_async_form_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerAsyncFormRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3related_qna_maker_async_form_request_response_format_type.py b/src/gooey/types/post_v3related_qna_maker_async_form_request_response_format_type.py new file mode 100644 index 0000000..9e6ca22 --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_async_form_request_response_format_type.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerAsyncFormRequestResponseFormatType = typing.Union[ + typing.Literal["text", "json_object"], typing.Any +] diff --git a/src/gooey/types/post_v3related_qna_maker_async_form_request_selected_model.py b/src/gooey/types/post_v3related_qna_maker_async_form_request_selected_model.py new file mode 100644 index 0000000..1000455 --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_citation_style.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_citation_style.py new file mode 100644 index 0000000..f391080 --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerDocAsyncFormRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_embedding_model.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_embedding_model.py new file mode 100644 index 0000000..3af393d --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerDocAsyncFormRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_keyword_query.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_keyword_query.py new file mode 100644 index 0000000..3268b32 --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_keyword_query.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerDocAsyncFormRequestKeywordQuery = typing.Union[str, typing.List[str]] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_response_format_type.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_response_format_type.py new file mode 100644 index 0000000..732cda0 --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_response_format_type.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerDocAsyncFormRequestResponseFormatType = typing.Union[ + typing.Literal["text", "json_object"], typing.Any +] diff --git a/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_selected_model.py b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_selected_model.py new file mode 100644 index 0000000..7fbdfaf --- /dev/null +++ b/src/gooey/types/post_v3related_qna_maker_doc_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3RelatedQnaMakerDocAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3seo_summary_async_form_request_response_format_type.py b/src/gooey/types/post_v3seo_summary_async_form_request_response_format_type.py new file mode 100644 index 0000000..a16607e --- /dev/null +++ b/src/gooey/types/post_v3seo_summary_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3SeoSummaryAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3seo_summary_async_form_request_selected_model.py b/src/gooey/types/post_v3seo_summary_async_form_request_selected_model.py new file mode 100644 index 0000000..eb67839 --- /dev/null +++ b/src/gooey/types/post_v3seo_summary_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3SeoSummaryAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3smart_gpt_async_form_request_response_format_type.py b/src/gooey/types/post_v3smart_gpt_async_form_request_response_format_type.py new file mode 100644 index 0000000..2ec153e --- /dev/null +++ b/src/gooey/types/post_v3smart_gpt_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3SmartGptAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3smart_gpt_async_form_request_selected_model.py b/src/gooey/types/post_v3smart_gpt_async_form_request_selected_model.py new file mode 100644 index 0000000..70d34bc --- /dev/null +++ b/src/gooey/types/post_v3smart_gpt_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3SmartGptAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3social_lookup_email_async_form_request_response_format_type.py b/src/gooey/types/post_v3social_lookup_email_async_form_request_response_format_type.py new file mode 100644 index 0000000..d65902e --- /dev/null +++ b/src/gooey/types/post_v3social_lookup_email_async_form_request_response_format_type.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3SocialLookupEmailAsyncFormRequestResponseFormatType = typing.Union[ + typing.Literal["text", "json_object"], typing.Any +] diff --git a/src/gooey/types/post_v3social_lookup_email_async_form_request_selected_model.py b/src/gooey/types/post_v3social_lookup_email_async_form_request_selected_model.py new file mode 100644 index 0000000..9defab1 --- /dev/null +++ b/src/gooey/types/post_v3social_lookup_email_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3SocialLookupEmailAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3text_to_speech_async_form_request_openai_tts_model.py b/src/gooey/types/post_v3text_to_speech_async_form_request_openai_tts_model.py new file mode 100644 index 0000000..5b996d2 --- /dev/null +++ b/src/gooey/types/post_v3text_to_speech_async_form_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3TextToSpeechAsyncFormRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/post_v3text_to_speech_async_form_request_openai_voice_name.py b/src/gooey/types/post_v3text_to_speech_async_form_request_openai_voice_name.py new file mode 100644 index 0000000..5e87d41 --- /dev/null +++ b/src/gooey/types/post_v3text_to_speech_async_form_request_openai_voice_name.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3TextToSpeechAsyncFormRequestOpenaiVoiceName = typing.Union[ + typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any +] diff --git a/src/gooey/types/post_v3text_to_speech_async_form_request_tts_provider.py b/src/gooey/types/post_v3text_to_speech_async_form_request_tts_provider.py new file mode 100644 index 0000000..066bcc0 --- /dev/null +++ b/src/gooey/types/post_v3text_to_speech_async_form_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3TextToSpeechAsyncFormRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_translation_model.py b/src/gooey/types/post_v3translate_async_form_request_selected_model.py similarity index 66% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_translation_model.py rename to src/gooey/types/post_v3translate_async_form_request_selected_model.py index db21082..8314363 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_translation_model.py +++ b/src/gooey/types/post_v3translate_async_form_request_selected_model.py @@ -2,4 +2,4 @@ import typing -VideoBotsStreamCreateRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] +PostV3TranslateAsyncFormRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_asr_model.py b/src/gooey/types/post_v3video_bots_async_form_request_asr_model.py new file mode 100644 index 0000000..bc1c1e4 --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_citation_style.py b/src/gooey/types/post_v3video_bots_async_form_request_citation_style.py new file mode 100644 index 0000000..b98a7c6 --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_embedding_model.py b/src/gooey/types/post_v3video_bots_async_form_request_embedding_model.py new file mode 100644 index 0000000..bd68603 --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_lipsync_model.py b/src/gooey/types/post_v3video_bots_async_form_request_lipsync_model.py new file mode 100644 index 0000000..13db430 --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_openai_tts_model.py b/src/gooey/types/post_v3video_bots_async_form_request_openai_tts_model.py new file mode 100644 index 0000000..5a921f4 --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_openai_voice_name.py b/src/gooey/types/post_v3video_bots_async_form_request_openai_voice_name.py new file mode 100644 index 0000000..b945a73 --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_openai_voice_name.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestOpenaiVoiceName = typing.Union[ + typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any +] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_response_format_type.py b/src/gooey/types/post_v3video_bots_async_form_request_response_format_type.py new file mode 100644 index 0000000..8b486eb --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_selected_model.py b/src/gooey/types/post_v3video_bots_async_form_request_selected_model.py new file mode 100644 index 0000000..8448c26 --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_translation_model.py b/src/gooey/types/post_v3video_bots_async_form_request_translation_model.py new file mode 100644 index 0000000..3c7d0ae --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/post_v3video_bots_async_form_request_tts_provider.py b/src/gooey/types/post_v3video_bots_async_form_request_tts_provider.py new file mode 100644 index 0000000..c223beb --- /dev/null +++ b/src/gooey/types/post_v3video_bots_async_form_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostV3VideoBotsAsyncFormRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py deleted file mode 100644 index f1ce039..0000000 --- a/src/gooey/types/product_image_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/prompt_tree_node.py b/src/gooey/types/prompt_tree_node.py index de44ce4..a678201 100644 --- a/src/gooey/types/prompt_tree_node.py +++ b/src/gooey/types/prompt_tree_node.py @@ -1,18 +1,17 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations - +from ..core.pydantic_utilities import UniversalBaseModel +from .prompt_tree_node_prompt import PromptTreeNodePrompt import typing - +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel, update_forward_refs -from .prompt_tree_node_prompt import PromptTreeNodePrompt +from ..core.pydantic_utilities import update_forward_refs class PromptTreeNode(UniversalBaseModel): prompt: PromptTreeNodePrompt - children: typing.List[PromptTreeNode] + children: typing.List["PromptTreeNode"] if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/gooey/types/prompt_tree_node_prompt.py b/src/gooey/types/prompt_tree_node_prompt.py index d27c578..7000a96 100644 --- a/src/gooey/types/prompt_tree_node_prompt.py +++ b/src/gooey/types/prompt_tree_node_prompt.py @@ -1,7 +1,6 @@ # This file was auto-generated by Fern from our API Definition. import typing - from .conversation_entry import ConversationEntry PromptTreeNodePrompt = typing.Union[str, typing.List[ConversationEntry]] diff --git a/src/gooey/types/qr_code_generator_page_output.py b/src/gooey/types/qr_code_generator_page_output.py index f33cf31..8d947da 100644 --- a/src/gooey/types/qr_code_generator_page_output.py +++ b/src/gooey/types/qr_code_generator_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class QrCodeGeneratorPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py new file mode 100644 index 0000000..508e7e9 --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeGeneratorPageRequestImagePromptControlnetModelsItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/text_to_image_request_scheduler.py b/src/gooey/types/qr_code_generator_page_request_scheduler.py similarity index 89% rename from src/gooey/types/text_to_image_request_scheduler.py rename to src/gooey/types/qr_code_generator_page_request_scheduler.py index 4283b8c..e30308a 100644 --- a/src/gooey/types/text_to_image_request_scheduler.py +++ b/src/gooey/types/qr_code_generator_page_request_scheduler.py @@ -2,7 +2,7 @@ import typing -TextToImageRequestScheduler = typing.Union[ +QrCodeGeneratorPageRequestScheduler = typing.Union[ typing.Literal[ "singlestep_dpm_solver", "multistep_dpm_solver", diff --git a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py similarity index 87% rename from src/gooey/types/qr_code_request_selected_controlnet_model_item.py rename to src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py index c5cdc8d..c6f1967 100644 --- a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py +++ b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py @@ -2,7 +2,7 @@ import typing -QrCodeRequestSelectedControlnetModelItem = typing.Union[ +QrCodeGeneratorPageRequestSelectedControlnetModelItem = typing.Union[ typing.Literal[ "sd_controlnet_canny", "sd_controlnet_depth", diff --git a/src/gooey/types/qr_code_request_selected_model.py b/src/gooey/types/qr_code_generator_page_request_selected_model.py similarity index 88% rename from src/gooey/types/qr_code_request_selected_model.py rename to src/gooey/types/qr_code_generator_page_request_selected_model.py index 7ea963c..97282cb 100644 --- a/src/gooey/types/qr_code_request_selected_model.py +++ b/src/gooey/types/qr_code_generator_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -QrCodeRequestSelectedModel = typing.Union[ +QrCodeGeneratorPageRequestSelectedModel = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/qr_code_generator_page_status_response.py b/src/gooey/types/qr_code_generator_page_status_response.py index 6d89074..215f583 100644 --- a/src/gooey/types/qr_code_generator_page_status_response.py +++ b/src/gooey/types/qr_code_generator_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .qr_code_generator_page_output import QrCodeGeneratorPageOutput from .recipe_run_state import RecipeRunState +import typing +from .qr_code_generator_page_output import QrCodeGeneratorPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class QrCodeGeneratorPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/rag_request_response_format_type.py b/src/gooey/types/rag_request_response_format_type.py deleted file mode 100644 index 76eae86..0000000 --- a/src/gooey/types/rag_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RagRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/recipe_function.py b/src/gooey/types/recipe_function.py index 82f6a86..ed79772 100644 --- a/src/gooey/types/recipe_function.py +++ b/src/gooey/types/recipe_function.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_function_trigger import RecipeFunctionTrigger +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing class RecipeFunction(UniversalBaseModel): diff --git a/src/gooey/types/related_doc_search_response.py b/src/gooey/types/related_doc_search_response.py index 2c783ac..3964afb 100644 --- a/src/gooey/types/related_doc_search_response.py +++ b/src/gooey/types/related_doc_search_response.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .search_reference import SearchReference +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class RelatedDocSearchResponse(UniversalBaseModel): diff --git a/src/gooey/types/related_google_gpt_response.py b/src/gooey/types/related_google_gpt_response.py index 4575325..58eeb8a 100644 --- a/src/gooey/types/related_google_gpt_response.py +++ b/src/gooey/types/related_google_gpt_response.py @@ -1,16 +1,15 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .search_reference import SearchReference +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class RelatedGoogleGptResponse(UniversalBaseModel): output_text: typing.List[str] - serp_results: typing.Dict[str, typing.Any] + serp_results: typing.Dict[str, typing.Optional[typing.Any]] references: typing.List[SearchReference] final_prompt: str final_search_query: typing.Optional[str] = None diff --git a/src/gooey/types/related_qn_a_doc_page_output.py b/src/gooey/types/related_qn_a_doc_page_output.py index 06f51d8..5f10fb6 100644 --- a/src/gooey/types/related_qn_a_doc_page_output.py +++ b/src/gooey/types/related_qn_a_doc_page_output.py @@ -1,17 +1,16 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse from .related_doc_search_response import RelatedDocSearchResponse +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class RelatedQnADocPageOutput(UniversalBaseModel): output_queries: typing.List[RelatedDocSearchResponse] - serp_results: typing.Dict[str, typing.Any] + serp_results: typing.Dict[str, typing.Optional[typing.Any]] called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_citation_style.py b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py similarity index 89% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_citation_style.py rename to src/gooey/types/related_qn_a_doc_page_request_citation_style.py index eb80dca..b98f002 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_citation_style.py +++ b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py @@ -2,7 +2,7 @@ import typing -VideoBotsStreamCreateRequestCitationStyle = typing.Union[ +RelatedQnADocPageRequestCitationStyle = typing.Union[ typing.Literal[ "number", "title", diff --git a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py new file mode 100644 index 0000000..680bbb5 --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnADocPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/seo_people_also_ask_doc_request_keyword_query.py b/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py similarity index 50% rename from src/gooey/types/seo_people_also_ask_doc_request_keyword_query.py rename to src/gooey/types/related_qn_a_doc_page_request_keyword_query.py index 8ba6efb..4f35322 100644 --- a/src/gooey/types/seo_people_also_ask_doc_request_keyword_query.py +++ b/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py @@ -2,4 +2,4 @@ import typing -SeoPeopleAlsoAskDocRequestKeywordQuery = typing.Union[str, typing.List[str]] +RelatedQnADocPageRequestKeywordQuery = typing.Union[str, typing.List[str]] diff --git a/src/gooey/evaluator/types/async_form_bulk_eval_request_response_format_type.py b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py similarity index 66% rename from src/gooey/evaluator/types/async_form_bulk_eval_request_response_format_type.py rename to src/gooey/types/related_qn_a_doc_page_request_response_format_type.py index a4489a8..c65a896 100644 --- a/src/gooey/evaluator/types/async_form_bulk_eval_request_response_format_type.py +++ b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py @@ -2,4 +2,4 @@ import typing -AsyncFormBulkEvalRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] +RelatedQnADocPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/evaluator/types/async_form_bulk_eval_request_selected_model.py b/src/gooey/types/related_qn_a_doc_page_request_selected_model.py similarity index 95% rename from src/gooey/evaluator/types/async_form_bulk_eval_request_selected_model.py rename to src/gooey/types/related_qn_a_doc_page_request_selected_model.py index 52046e6..2591cf1 100644 --- a/src/gooey/evaluator/types/async_form_bulk_eval_request_selected_model.py +++ b/src/gooey/types/related_qn_a_doc_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -AsyncFormBulkEvalRequestSelectedModel = typing.Union[ +RelatedQnADocPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/types/related_qn_a_doc_page_status_response.py b/src/gooey/types/related_qn_a_doc_page_status_response.py index 9a269fa..29b89e0 100644 --- a/src/gooey/types/related_qn_a_doc_page_status_response.py +++ b/src/gooey/types/related_qn_a_doc_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .related_qn_a_doc_page_output import RelatedQnADocPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class RelatedQnADocPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/related_qn_a_page_output.py b/src/gooey/types/related_qn_a_page_output.py index 42b106b..272ca1e 100644 --- a/src/gooey/types/related_qn_a_page_output.py +++ b/src/gooey/types/related_qn_a_page_output.py @@ -1,17 +1,16 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse from .related_google_gpt_response import RelatedGoogleGptResponse +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class RelatedQnAPageOutput(UniversalBaseModel): output_queries: typing.List[RelatedGoogleGptResponse] - serp_results: typing.Dict[str, typing.Any] + serp_results: typing.Dict[str, typing.Optional[typing.Any]] called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/related_qn_a_page_request_embedding_model.py b/src/gooey/types/related_qn_a_page_request_embedding_model.py new file mode 100644 index 0000000..a591920 --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnAPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/related_qn_a_page_request_response_format_type.py b/src/gooey/types/related_qn_a_page_request_response_format_type.py new file mode 100644 index 0000000..7bada87 --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnAPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/related_qn_a_page_request_selected_model.py b/src/gooey/types/related_qn_a_page_request_selected_model.py new file mode 100644 index 0000000..211bdbc --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnAPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/related_qn_a_page_status_response.py b/src/gooey/types/related_qn_a_page_status_response.py index f6a20eb..41280b1 100644 --- a/src/gooey/types/related_qn_a_page_status_response.py +++ b/src/gooey/types/related_qn_a_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .related_qn_a_page_output import RelatedQnAPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class RelatedQnAPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py deleted file mode 100644 index c84f0e7..0000000 --- a/src/gooey/types/remove_background_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/reply_button.py b/src/gooey/types/reply_button.py index f226ec6..4ec8881 100644 --- a/src/gooey/types/reply_button.py +++ b/src/gooey/types/reply_button.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class ReplyButton(UniversalBaseModel): id: str diff --git a/src/gooey/types/response_model.py b/src/gooey/types/response_model.py index cacc115..94dcd3d 100644 --- a/src/gooey/types/response_model.py +++ b/src/gooey/types/response_model.py @@ -1,14 +1,13 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .reply_button import ReplyButton -from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery from .response_model_final_prompt import ResponseModelFinalPrompt from .search_reference import SearchReference +from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery +from .reply_button import ReplyButton +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class ResponseModel(UniversalBaseModel): diff --git a/src/gooey/types/response_model_final_prompt.py b/src/gooey/types/response_model_final_prompt.py index 99bf39f..be7dff1 100644 --- a/src/gooey/types/response_model_final_prompt.py +++ b/src/gooey/types/response_model_final_prompt.py @@ -1,7 +1,6 @@ # This file was auto-generated by Fern from our API Definition. import typing - from .conversation_entry import ConversationEntry ResponseModelFinalPrompt = typing.Union[str, typing.List[ConversationEntry]] diff --git a/src/gooey/types/run_settings.py b/src/gooey/types/run_settings.py index 8c07363..232e22b 100644 --- a/src/gooey/types/run_settings.py +++ b/src/gooey/types/run_settings.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .run_settings_retention_policy import RunSettingsRetentionPolicy +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class RunSettings(UniversalBaseModel): diff --git a/src/gooey/types/run_start.py b/src/gooey/types/run_start.py index 23616cc..bfa1a70 100644 --- a/src/gooey/types/run_start.py +++ b/src/gooey/types/run_start.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class RunStart(UniversalBaseModel): diff --git a/src/gooey/types/sad_talker_settings.py b/src/gooey/types/sad_talker_settings.py index a0c40e3..85464e7 100644 --- a/src/gooey/types/sad_talker_settings.py +++ b/src/gooey/types/sad_talker_settings.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class SadTalkerSettings(UniversalBaseModel): diff --git a/src/gooey/types/search_reference.py b/src/gooey/types/search_reference.py index 7403258..22dde4e 100644 --- a/src/gooey/types/search_reference.py +++ b/src/gooey/types/search_reference.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class SearchReference(UniversalBaseModel): url: str diff --git a/src/gooey/types/seo_content_request_response_format_type.py b/src/gooey/types/seo_content_request_response_format_type.py deleted file mode 100644 index 8511b19..0000000 --- a/src/gooey/types/seo_content_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SeoContentRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py b/src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py deleted file mode 100644 index 0628779..0000000 --- a/src/gooey/types/seo_people_also_ask_doc_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SeoPeopleAlsoAskDocRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py b/src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py deleted file mode 100644 index c205eff..0000000 --- a/src/gooey/types/seo_people_also_ask_doc_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SeoPeopleAlsoAskDocRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/seo_people_also_ask_doc_request_selected_model.py b/src/gooey/types/seo_people_also_ask_doc_request_selected_model.py deleted file mode 100644 index 1877420..0000000 --- a/src/gooey/types/seo_people_also_ask_doc_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SeoPeopleAlsoAskDocRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/seo_people_also_ask_request_response_format_type.py b/src/gooey/types/seo_people_also_ask_request_response_format_type.py deleted file mode 100644 index 5a67007..0000000 --- a/src/gooey/types/seo_people_also_ask_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SeoPeopleAlsoAskRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/seo_people_also_ask_request_selected_model.py b/src/gooey/types/seo_people_also_ask_request_selected_model.py deleted file mode 100644 index e315d12..0000000 --- a/src/gooey/types/seo_people_also_ask_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SeoPeopleAlsoAskRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/seo_summary_page_output.py b/src/gooey/types/seo_summary_page_output.py index 9043028..22c116d 100644 --- a/src/gooey/types/seo_summary_page_output.py +++ b/src/gooey/types/seo_summary_page_output.py @@ -1,18 +1,17 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class SeoSummaryPageOutput(UniversalBaseModel): output_content: typing.List[str] - serp_results: typing.Dict[str, typing.Any] + serp_results: typing.Dict[str, typing.Optional[typing.Any]] search_urls: typing.List[str] - summarized_urls: typing.List[typing.Dict[str, typing.Any]] + summarized_urls: typing.List[typing.Dict[str, typing.Optional[typing.Any]]] final_prompt: str called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None diff --git a/src/gooey/types/seo_summary_page_request_response_format_type.py b/src/gooey/types/seo_summary_page_request_response_format_type.py new file mode 100644 index 0000000..26f948b --- /dev/null +++ b/src/gooey/types/seo_summary_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SeoSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/seo_summary_page_request_selected_model.py b/src/gooey/types/seo_summary_page_request_selected_model.py new file mode 100644 index 0000000..7030bfd --- /dev/null +++ b/src/gooey/types/seo_summary_page_request_selected_model.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SeoSummaryPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "gpt_4_o", + "gpt_4_o_mini", + "chatgpt_4_o", + "gpt_4_turbo_vision", + "gpt_4_vision", + "gpt_4_turbo", + "gpt_4", + "gpt_4_32k", + "gpt_3_5_turbo", + "gpt_3_5_turbo_16k", + "gpt_3_5_turbo_instruct", + "llama3_70b", + "llama_3_groq_70b_tool_use", + "llama3_8b", + "llama_3_groq_8b_tool_use", + "llama2_70b_chat", + "mixtral_8x7b_instruct_0_1", + "gemma_2_9b_it", + "gemma_7b_it", + "gemini_1_5_flash", + "gemini_1_5_pro", + "gemini_1_pro_vision", + "gemini_1_pro", + "palm2_chat", + "palm2_text", + "claude_3_5_sonnet", + "claude_3_opus", + "claude_3_sonnet", + "claude_3_haiku", + "sea_lion_7b_instruct", + "llama3_8b_cpt_sea_lion_v2_instruct", + "sarvam_2b", + "text_davinci_003", + "text_davinci_002", + "code_davinci_002", + "text_curie_001", + "text_babbage_001", + "text_ada_001", + ], + typing.Any, +] diff --git a/src/gooey/types/seo_summary_page_status_response.py b/src/gooey/types/seo_summary_page_status_response.py index e12a38a..15e6a00 100644 --- a/src/gooey/types/seo_summary_page_status_response.py +++ b/src/gooey/types/seo_summary_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .seo_summary_page_output import SeoSummaryPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class SeoSummaryPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/smart_gpt_page_output.py b/src/gooey/types/smart_gpt_page_output.py index 6fce6c9..f082630 100644 --- a/src/gooey/types/smart_gpt_page_output.py +++ b/src/gooey/types/smart_gpt_page_output.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse from .prompt_tree_node import PromptTreeNode +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class SmartGptPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/smart_gpt_page_status_response.py b/src/gooey/types/smart_gpt_page_status_response.py index 51366a2..f5b3811 100644 --- a/src/gooey/types/smart_gpt_page_status_response.py +++ b/src/gooey/types/smart_gpt_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .smart_gpt_page_output import SmartGptPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class SmartGptPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/social_lookup_email_page_output.py b/src/gooey/types/social_lookup_email_page_output.py index c054d92..93ce982 100644 --- a/src/gooey/types/social_lookup_email_page_output.py +++ b/src/gooey/types/social_lookup_email_page_output.py @@ -1,15 +1,14 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class SocialLookupEmailPageOutput(UniversalBaseModel): - person_data: typing.Dict[str, typing.Any] + person_data: typing.Dict[str, typing.Optional[typing.Any]] final_prompt: str output_text: typing.List[str] called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_response_format_type.py b/src/gooey/types/social_lookup_email_page_request_response_format_type.py similarity index 66% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_response_format_type.py rename to src/gooey/types/social_lookup_email_page_request_response_format_type.py index fbf245e..46c50db 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_response_format_type.py +++ b/src/gooey/types/social_lookup_email_page_request_response_format_type.py @@ -2,4 +2,4 @@ import typing -VideoBotsStreamCreateRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] +SocialLookupEmailPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_selected_model.py b/src/gooey/types/social_lookup_email_page_request_selected_model.py similarity index 95% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_selected_model.py rename to src/gooey/types/social_lookup_email_page_request_selected_model.py index 52c9f20..1a0cba7 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_selected_model.py +++ b/src/gooey/types/social_lookup_email_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -VideoBotsStreamCreateRequestSelectedModel = typing.Union[ +SocialLookupEmailPageRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/types/social_lookup_email_page_status_response.py b/src/gooey/types/social_lookup_email_page_status_response.py index 45899a5..4aab5c7 100644 --- a/src/gooey/types/social_lookup_email_page_status_response.py +++ b/src/gooey/types/social_lookup_email_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .social_lookup_email_page_output import SocialLookupEmailPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class SocialLookupEmailPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py deleted file mode 100644 index 4d2cf2b..0000000 --- a/src/gooey/types/speech_recognition_request_output_format.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py deleted file mode 100644 index 886ab92..0000000 --- a/src/gooey/types/speech_recognition_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/stream_error.py b/src/gooey/types/stream_error.py index eb9758c..1476f8a 100644 --- a/src/gooey/types/stream_error.py +++ b/src/gooey/types/stream_error.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class StreamError(UniversalBaseModel): diff --git a/src/gooey/types/synthesize_data_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py deleted file mode 100644 index 6c1bc21..0000000 --- a/src/gooey/types/synthesize_data_request_selected_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SynthesizeDataRequestSelectedAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/synthesize_data_request_selected_model.py b/src/gooey/types/synthesize_data_request_selected_model.py deleted file mode 100644 index 42bde95..0000000 --- a/src/gooey/types/synthesize_data_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SynthesizeDataRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/types/text2audio_page_output.py b/src/gooey/types/text2audio_page_output.py index dd6e48f..535dfa4 100644 --- a/src/gooey/types/text2audio_page_output.py +++ b/src/gooey/types/text2audio_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class Text2AudioPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/text2audio_page_status_response.py b/src/gooey/types/text2audio_page_status_response.py index e7fb600..22af025 100644 --- a/src/gooey/types/text2audio_page_status_response.py +++ b/src/gooey/types/text2audio_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .text2audio_page_output import Text2AudioPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class Text2AudioPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/text_to_speech_page_output.py b/src/gooey/types/text_to_speech_page_output.py index b27e4cc..e47c942 100644 --- a/src/gooey/types/text_to_speech_page_output.py +++ b/src/gooey/types/text_to_speech_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class TextToSpeechPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/text_to_speech_page_request_openai_tts_model.py b/src/gooey/types/text_to_speech_page_request_openai_tts_model.py new file mode 100644 index 0000000..685dfff --- /dev/null +++ b/src/gooey/types/text_to_speech_page_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TextToSpeechPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py b/src/gooey/types/text_to_speech_page_request_openai_voice_name.py similarity index 73% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py rename to src/gooey/types/text_to_speech_page_request_openai_voice_name.py index 60a9be7..efd862f 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_openai_voice_name.py +++ b/src/gooey/types/text_to_speech_page_request_openai_voice_name.py @@ -2,6 +2,6 @@ import typing -VideoBotsStreamCreateRequestOpenaiVoiceName = typing.Union[ +TextToSpeechPageRequestOpenaiVoiceName = typing.Union[ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any ] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_tts_provider.py b/src/gooey/types/text_to_speech_page_request_tts_provider.py similarity index 76% rename from src/gooey/copilot_integrations/types/video_bots_stream_create_request_tts_provider.py rename to src/gooey/types/text_to_speech_page_request_tts_provider.py index 581f80a..a6b8938 100644 --- a/src/gooey/copilot_integrations/types/video_bots_stream_create_request_tts_provider.py +++ b/src/gooey/types/text_to_speech_page_request_tts_provider.py @@ -2,6 +2,6 @@ import typing -VideoBotsStreamCreateRequestTtsProvider = typing.Union[ +TextToSpeechPageRequestTtsProvider = typing.Union[ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any ] diff --git a/src/gooey/types/text_to_speech_page_status_response.py b/src/gooey/types/text_to_speech_page_status_response.py index a8daeba..20070f3 100644 --- a/src/gooey/types/text_to_speech_page_status_response.py +++ b/src/gooey/types/text_to_speech_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .text_to_speech_page_output import TextToSpeechPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class TextToSpeechPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/training_data_model.py b/src/gooey/types/training_data_model.py index 9aa9596..9ad85c5 100644 --- a/src/gooey/types/training_data_model.py +++ b/src/gooey/types/training_data_model.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing - import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class TrainingDataModel(UniversalBaseModel): prompt: str diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py deleted file mode 100644 index b774b56..0000000 --- a/src/gooey/types/translate_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/translation_page_output.py b/src/gooey/types/translation_page_output.py index f79244f..00df5d7 100644 --- a/src/gooey/types/translation_page_output.py +++ b/src/gooey/types/translation_page_output.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class TranslationPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/translation_page_request_selected_model.py b/src/gooey/types/translation_page_request_selected_model.py new file mode 100644 index 0000000..62ae9ab --- /dev/null +++ b/src/gooey/types/translation_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TranslationPageRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/translation_page_status_response.py b/src/gooey/types/translation_page_status_response.py index 0f94d1b..ef6c530 100644 --- a/src/gooey/types/translation_page_status_response.py +++ b/src/gooey/types/translation_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .translation_page_output import TranslationPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class TranslationPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/validation_error.py b/src/gooey/types/validation_error.py index 732944a..93a2d80 100644 --- a/src/gooey/types/validation_error.py +++ b/src/gooey/types/validation_error.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .validation_error_loc_item import ValidationErrorLocItem +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class ValidationError(UniversalBaseModel): diff --git a/src/gooey/types/vcard.py b/src/gooey/types/vcard.py index 352009d..a9e97e5 100644 --- a/src/gooey/types/vcard.py +++ b/src/gooey/types/vcard.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel - class Vcard(UniversalBaseModel): format_name: str diff --git a/src/gooey/types/video_bots_page_output.py b/src/gooey/types/video_bots_page_output.py index 5fb0c12..84c9428 100644 --- a/src/gooey/types/video_bots_page_output.py +++ b/src/gooey/types/video_bots_page_output.py @@ -1,15 +1,14 @@ # This file was auto-generated by Fern from our API Definition. +from ..core.pydantic_utilities import UniversalBaseModel import typing - -import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .called_function_response import CalledFunctionResponse -from .reply_button import ReplyButton +from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt from .search_reference import SearchReference from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery -from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt +from .reply_button import ReplyButton +from .called_function_response import CalledFunctionResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic class VideoBotsPageOutput(UniversalBaseModel): diff --git a/src/gooey/types/video_bots_page_output_final_prompt.py b/src/gooey/types/video_bots_page_output_final_prompt.py index 59fcaab..eefc1ca 100644 --- a/src/gooey/types/video_bots_page_output_final_prompt.py +++ b/src/gooey/types/video_bots_page_output_final_prompt.py @@ -1,7 +1,6 @@ # This file was auto-generated by Fern from our API Definition. import typing - from .conversation_entry import ConversationEntry VideoBotsPageOutputFinalPrompt = typing.Union[str, typing.List[ConversationEntry]] diff --git a/src/gooey/types/video_bots_page_status_response.py b/src/gooey/types/video_bots_page_status_response.py index d6ce8f2..d5d385f 100644 --- a/src/gooey/types/video_bots_page_status_response.py +++ b/src/gooey/types/video_bots_page_status_response.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -import typing - +from ..core.pydantic_utilities import UniversalBaseModel import pydantic - -from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .recipe_run_state import RecipeRunState +import typing from .video_bots_page_output import VideoBotsPageOutput +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class VideoBotsPageStatusResponse(UniversalBaseModel): diff --git a/src/gooey/types/web_search_llm_request_selected_model.py b/src/gooey/types/web_search_llm_request_selected_model.py deleted file mode 100644 index d43a330..0000000 --- a/src/gooey/types/web_search_llm_request_selected_model.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -WebSearchLlmRequestSelectedModel = typing.Union[ - typing.Literal[ - "gpt_4_o", - "gpt_4_o_mini", - "chatgpt_4_o", - "gpt_4_turbo_vision", - "gpt_4_vision", - "gpt_4_turbo", - "gpt_4", - "gpt_4_32k", - "gpt_3_5_turbo", - "gpt_3_5_turbo_16k", - "gpt_3_5_turbo_instruct", - "llama3_70b", - "llama_3_groq_70b_tool_use", - "llama3_8b", - "llama_3_groq_8b_tool_use", - "llama2_70b_chat", - "mixtral_8x7b_instruct_0_1", - "gemma_2_9b_it", - "gemma_7b_it", - "gemini_1_5_flash", - "gemini_1_5_pro", - "gemini_1_pro_vision", - "gemini_1_pro", - "palm2_chat", - "palm2_text", - "claude_3_5_sonnet", - "claude_3_opus", - "claude_3_sonnet", - "claude_3_haiku", - "sea_lion_7b_instruct", - "llama3_8b_cpt_sea_lion_v2_instruct", - "sarvam_2b", - "text_davinci_003", - "text_davinci_002", - "code_davinci_002", - "text_curie_001", - "text_babbage_001", - "text_ada_001", - ], - typing.Any, -] diff --git a/src/gooey/version.py b/src/gooey/version.py index e16ae65..ebbf8d2 100644 --- a/src/gooey/version.py +++ b/src/gooey/version.py @@ -1,4 +1,3 @@ - from importlib import metadata __version__ = metadata.version("gooeyai") diff --git a/src/gooey/web_search_gpt3/__init__.py b/src/gooey/web_search_gpt3/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/web_search_gpt3/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/web_search_gpt3/client.py b/src/gooey/web_search_gpt3/client.py deleted file mode 100644 index 5b2b824..0000000 --- a/src/gooey/web_search_gpt3/client.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pydantic_utilities import parse_obj_as -from ..core.request_options import RequestOptions -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.generic_error_response import GenericErrorResponse -from ..types.google_gpt_page_status_response import GoogleGptPageStatusResponse -from ..types.http_validation_error import HttpValidationError - - -class WebSearchGpt3Client: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def status_google_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleGptPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.web_search_gpt3.status_google_gpt( - run_id="run_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/google-gpt/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncWebSearchGpt3Client: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def status_google_gpt( - self, *, run_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> GoogleGptPageStatusResponse: - """ - Parameters - ---------- - run_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GoogleGptPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.web_search_gpt3.status_google_gpt( - run_id="run_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/status", method="GET", params={"run_id": run_id}, request_options=request_options - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast(GoogleGptPageStatusResponse, parse_obj_as(type_=GoogleGptPageStatusResponse, object_=_response.json())) # type: ignore - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast(typing.Any, parse_obj_as(type_=typing.Any, object_=_response.json())) # type: ignore - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast(HttpValidationError, parse_obj_as(type_=HttpValidationError, object_=_response.json())) # type: ignore - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast(GenericErrorResponse, parse_obj_as(type_=GenericErrorResponse, object_=_response.json())) # type: ignore - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/tests/custom/test_client.py b/tests/custom/test_client.py index 60a58e6..73f811f 100644 --- a/tests/custom/test_client.py +++ b/tests/custom/test_client.py @@ -1,5 +1,6 @@ import pytest + # Get started with writing tests with pytest at https://docs.pytest.org @pytest.mark.skip(reason="Unimplemented") def test_client() -> None: diff --git a/tests/utils/assets/models/__init__.py b/tests/utils/assets/models/__init__.py index 2cf0126..3a1c852 100644 --- a/tests/utils/assets/models/__init__.py +++ b/tests/utils/assets/models/__init__.py @@ -5,7 +5,7 @@ from .circle import CircleParams from .object_with_defaults import ObjectWithDefaultsParams from .object_with_optional_field import ObjectWithOptionalFieldParams -from .shape import Shape_CircleParams, Shape_SquareParams, ShapeParams +from .shape import ShapeParams, Shape_CircleParams, Shape_SquareParams from .square import SquareParams from .undiscriminated_shape import UndiscriminatedShapeParams diff --git a/tests/utils/assets/models/circle.py b/tests/utils/assets/models/circle.py index ad286fd..2f5b4b6 100644 --- a/tests/utils/assets/models/circle.py +++ b/tests/utils/assets/models/circle.py @@ -2,6 +2,7 @@ # This file was auto-generated by Fern from our API Definition. +import typing_extensions import typing_extensions from gooey.core.serialization import FieldMetadata diff --git a/tests/utils/assets/models/object_with_defaults.py b/tests/utils/assets/models/object_with_defaults.py index a977b1d..ef14f7b 100644 --- a/tests/utils/assets/models/object_with_defaults.py +++ b/tests/utils/assets/models/object_with_defaults.py @@ -3,6 +3,7 @@ # This file was auto-generated by Fern from our API Definition. import typing_extensions +import typing_extensions class ObjectWithDefaultsParams(typing_extensions.TypedDict): diff --git a/tests/utils/assets/models/object_with_optional_field.py b/tests/utils/assets/models/object_with_optional_field.py index e85eaca..fc1c218 100644 --- a/tests/utils/assets/models/object_with_optional_field.py +++ b/tests/utils/assets/models/object_with_optional_field.py @@ -2,13 +2,12 @@ # This file was auto-generated by Fern from our API Definition. -import datetime as dt +import typing_extensions import typing -import uuid - import typing_extensions from gooey.core.serialization import FieldMetadata - +import datetime as dt +import uuid from .color import Color from .shape import ShapeParams from .undiscriminated_shape import UndiscriminatedShapeParams @@ -32,4 +31,4 @@ class ObjectWithOptionalFieldParams(typing_extensions.TypedDict): union: typing_extensions.NotRequired[ShapeParams] second_union: typing_extensions.NotRequired[ShapeParams] undiscriminated_union: typing_extensions.NotRequired[UndiscriminatedShapeParams] - any: typing.Any + any: typing.Optional[typing.Any] diff --git a/tests/utils/assets/models/shape.py b/tests/utils/assets/models/shape.py index a8d62e6..17bda01 100644 --- a/tests/utils/assets/models/shape.py +++ b/tests/utils/assets/models/shape.py @@ -3,10 +3,9 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations - -import typing - import typing_extensions +import typing_extensions +import typing from gooey.core.serialization import FieldMetadata diff --git a/tests/utils/assets/models/square.py b/tests/utils/assets/models/square.py index 86f923f..f8482a9 100644 --- a/tests/utils/assets/models/square.py +++ b/tests/utils/assets/models/square.py @@ -2,6 +2,7 @@ # This file was auto-generated by Fern from our API Definition. +import typing_extensions import typing_extensions from gooey.core.serialization import FieldMetadata diff --git a/tests/utils/assets/models/undiscriminated_shape.py b/tests/utils/assets/models/undiscriminated_shape.py index 99f12b3..68876a2 100644 --- a/tests/utils/assets/models/undiscriminated_shape.py +++ b/tests/utils/assets/models/undiscriminated_shape.py @@ -3,7 +3,6 @@ # This file was auto-generated by Fern from our API Definition. import typing - from .circle import CircleParams from .square import SquareParams diff --git a/tests/utils/test_http_client.py b/tests/utils/test_http_client.py index b57dffd..50c69bd 100644 --- a/tests/utils/test_http_client.py +++ b/tests/utils/test_http_client.py @@ -45,3 +45,17 @@ def test_get_none_request_body() -> None: assert json_body_extras == {"see you": "later"} assert data_body_extras is None + + +def test_get_empty_json_request_body() -> None: + unrelated_request_options: RequestOptions = {"max_retries": 3} + json_body, data_body = get_request_body(json=None, data=None, request_options=unrelated_request_options, omit=None) + assert json_body is None + assert data_body is None + + json_body_extras, data_body_extras = get_request_body( + json={}, data=None, request_options=unrelated_request_options, omit=None + ) + + assert json_body_extras is None + assert data_body_extras is None diff --git a/tests/utils/test_query_encoding.py b/tests/utils/test_query_encoding.py index c41d8d8..57e34fb 100644 --- a/tests/utils/test_query_encoding.py +++ b/tests/utils/test_query_encoding.py @@ -1,16 +1,35 @@ # This file was auto-generated by Fern from our API Definition. + from gooey.core.query_encoder import encode_query -def test_query_encoding() -> None: - assert encode_query({"hello world": "hello world"}) == {"hello world": "hello world"} - assert encode_query({"hello_world": {"hello": "world"}}) == {"hello_world[hello]": "world"} - assert encode_query({"hello_world": {"hello": {"world": "today"}, "test": "this"}, "hi": "there"}) == { - "hello_world[hello][world]": "today", - "hello_world[test]": "this", - "hi": "there", - } +def test_query_encoding_deep_objects() -> None: + assert encode_query({"hello world": "hello world"}) == [("hello world", "hello world")] + assert encode_query({"hello_world": {"hello": "world"}}) == [("hello_world[hello]", "world")] + assert encode_query({"hello_world": {"hello": {"world": "today"}, "test": "this"}, "hi": "there"}) == [ + ("hello_world[hello][world]", "today"), + ("hello_world[test]", "this"), + ("hi", "there"), + ] + + +def test_query_encoding_deep_object_arrays() -> None: + assert encode_query({"objects": [{"key": "hello", "value": "world"}, {"key": "foo", "value": "bar"}]}) == [ + ("objects[key]", "hello"), + ("objects[value]", "world"), + ("objects[key]", "foo"), + ("objects[value]", "bar"), + ] + assert encode_query( + {"users": [{"name": "string", "tags": ["string"]}, {"name": "string2", "tags": ["string2", "string3"]}]} + ) == [ + ("users[name]", "string"), + ("users[tags]", "string"), + ("users[name]", "string2"), + ("users[tags]", "string2"), + ("users[tags]", "string3"), + ] def test_encode_query_with_none() -> None: diff --git a/tests/utils/test_serialization.py b/tests/utils/test_serialization.py index 512cc18..fdb5c1a 100644 --- a/tests/utils/test_serialization.py +++ b/tests/utils/test_serialization.py @@ -1,10 +1,10 @@ # This file was auto-generated by Fern from our API Definition. -from typing import Any, List +from typing import List, Any from gooey.core.serialization import convert_and_respect_annotation_metadata +from .assets.models import ShapeParams, ObjectWithOptionalFieldParams -from .assets.models import ObjectWithOptionalFieldParams, ShapeParams UNION_TEST: ShapeParams = {"radius_measurement": 1.0, "shape_type": "circle", "id": "1"} UNION_TEST_CONVERTED = {"shapeType": "circle", "radiusMeasurement": 1.0, "id": "1"}