From 3d7b416327aae18e8625c251ddf918ca1a3cdf4d Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 7 Nov 2023 10:52:04 -0800 Subject: [PATCH 001/138] include calendar url in urls --- recipes/QRCodeGenerator.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index ed504bbbc..a6e69e6b3 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -560,6 +560,8 @@ def generate_and_upload_qr_code( user: AppUser, ) -> tuple[str, str, bool]: if request.qr_code_vcard: + if request.qr_code_vcard.urls and request.qr_code_vcard.calendar_url: + request.qr_code_vcard.urls += [request.qr_code_vcard.calendar_url] vcf_str = request.qr_code_vcard.to_vcf_str() qr_code_data = ShortenedURL.objects.get_or_create_for_workflow( content=vcf_str, From 33fe11aa6312af6a4c6c4e982108f64a762289df Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 7 Nov 2023 11:06:57 -0800 Subject: [PATCH 002/138] pretty replicate buttons with radio interface --- gooey_ui/components.py | 39 ++++++++++++++++++++++++++++++++++++++ recipes/QRCodeGenerator.py | 2 +- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/gooey_ui/components.py b/gooey_ui/components.py index bc44880be..cb129bc04 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -553,6 +553,45 @@ def table(df: "pd.DataFrame"): ).mount() +def horizontal_radio( + label: str, + options: typing.Sequence[T], + format_func: typing.Callable[[T], typing.Any] = _default_format, + key: str = None, + help: str = None, + *, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", +) -> T | None: + if not options: + return None + options = list(options) + if not key: + key = md5_values("radio", label, options, help, label_visibility) + value = state.session_state.get(key) + if key not in state.session_state or value not in options: + value = options[0] + state.session_state.setdefault(key, value) + if label_visibility != "visible": + label = None + markdown(label) + for option in options: + if button( + format_func(option), + key=f"tab-{key}-{option}", + type="primary", + className="replicate-nav", + style={ + "background": "black" if value == option else "white", + "color": "white" if value == option else "black", + }, + disabled=disabled, + ): + state.session_state[key] = value = option + state.experimental_rerun() + return value + + def radio( label: str, options: typing.Sequence[T], diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index a6e69e6b3..6e7c75bde 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -136,7 +136,7 @@ def render_form_v2(self): if st.session_state.get(key): st.session_state[qr_code_source_key] = key break - source = st.radio( + source = st.horizontal_radio( "", options=QrSources._member_names_, key=qr_code_source_key, From bc4509346dfeab27c86abc0d71939ee84ff5df03 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 7 Nov 2023 11:07:48 -0800 Subject: [PATCH 003/138] emoji update --- recipes/QRCodeGenerator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index 6e7c75bde..bfa82eaa1 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -44,9 +44,9 @@ class QrSources(Enum): qr_code_data = "🔗 URL or Text" - qr_code_vcard = "👩‍🦰 Contact Info" + qr_code_vcard = "📇 Contact Card" qr_code_file = "📄 Upload File" - qr_code_input_image = "📷 Existing QR Code" + qr_code_input_image = "🏁 Existing QR Code" class QRCodeGeneratorPage(BasePage): From edb9a0ac6a50d99c5ec9913099fa955d53fd2e7e Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 7 Nov 2023 11:15:08 -0800 Subject: [PATCH 004/138] use active class instead of hardcoded styles --- gooey_ui/components.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/gooey_ui/components.py b/gooey_ui/components.py index cb129bc04..e4caef355 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -580,11 +580,7 @@ def horizontal_radio( format_func(option), key=f"tab-{key}-{option}", type="primary", - className="replicate-nav", - style={ - "background": "black" if value == option else "white", - "color": "white" if value == option else "black", - }, + className="replicate-nav " + ("active" if value == option else ""), disabled=disabled, ): state.session_state[key] = value = option From 86ecc1567ed3be3a32e0032709ca6573027de8f0 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 7 Nov 2023 11:20:39 -0800 Subject: [PATCH 005/138] fix meta preview image --- recipes/QRCodeGenerator.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index bfa82eaa1..572dc0bd2 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -104,6 +104,8 @@ class ResponseModel(BaseModel): cleaned_qr_code: str def preview_image(self, state: dict) -> str | None: + if len(state.get("output_images") or []) > 0: + return state["output_images"][0] return DEFAULT_QR_CODE_META_IMG def related_workflows(self) -> list: From 15c61f10de022497060d423df8183ddd4adc3e3f Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 14 Nov 2023 01:50:52 -0800 Subject: [PATCH 006/138] added non square resolutions for dalle3 --- daras_ai_v2/img_model_settings_widgets.py | 20 ++++++++++++++++++-- daras_ai_v2/stable_diffusion.py | 14 +++++++++++++- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/daras_ai_v2/img_model_settings_widgets.py b/daras_ai_v2/img_model_settings_widgets.py index fea120051..17f5ea1c1 100644 --- a/daras_ai_v2/img_model_settings_widgets.py +++ b/daras_ai_v2/img_model_settings_widgets.py @@ -223,7 +223,10 @@ def quality_setting(selected_model=None): ) -RESOLUTIONS = { +RESOLUTIONS: dict[int, dict[str, str]] = { + 256: { + "256, 256": "square", + }, 512: { "512, 512": "square", "576, 448": "A4", @@ -247,6 +250,7 @@ def quality_setting(selected_model=None): "1536, 512": "smartphone", "1792, 512": "cinema", "2048, 512": "panorama", + "1792, 1024": "wide", }, } LANDSCAPE = "Landscape" @@ -281,12 +285,19 @@ def output_resolution_setting(): st.session_state.get("selected_model", st.session_state.get("selected_models")) or "" ) + allowed_shapes = RESOLUTIONS[st.session_state["__pixels"]].values() if not isinstance(selected_models, list): selected_models = [selected_models] if "jack_qiao" in selected_models or "sd_1_4" in selected_models: pixel_options = [512] elif selected_models == ["deepfloyd_if"]: pixel_options = [1024] + elif selected_models == ["dall_e"]: + pixel_options = [256, 512, 1024] + allowed_shapes = ["square"] + elif selected_models == ["dall_e_3"]: + pixel_options = [1024] + allowed_shapes = ["square", "wide"] else: pixel_options = [512, 768] @@ -298,11 +309,16 @@ def output_resolution_setting(): options=pixel_options, ) with col2: + res_options = [ + key + for key, val in RESOLUTIONS[pixels or pixel_options[0]].items() + if val in allowed_shapes + ] res = st.selectbox( "##### Resolution", key="__res", format_func=lambda r: f"{r.split(', ')[0]} x {r.split(', ')[1]} ({RESOLUTIONS[pixels][r]})", - options=list(RESOLUTIONS[pixels].keys()), + options=res_options, ) res = tuple(map(int, res.split(", "))) diff --git a/daras_ai_v2/stable_diffusion.py b/daras_ai_v2/stable_diffusion.py index 4e5b09b0d..ee3ce21de 100644 --- a/daras_ai_v2/stable_diffusion.py +++ b/daras_ai_v2/stable_diffusion.py @@ -268,18 +268,21 @@ def text2img( negative_prompt: str = None, scheduler: str = None, ): - _resolution_check(width, height, max_size=(1024, 1024)) + if selected_model != Text2ImgModels.dall_e_3.name: + _resolution_check(width, height, max_size=(1024, 1024)) match selected_model: case Text2ImgModels.dall_e_3.name: from openai import OpenAI client = OpenAI() + width, height = _get_dalle_3_img_size(width, height) response = client.images.generate( model=text2img_model_ids[Text2ImgModels[selected_model]], n=num_outputs, prompt=prompt, response_format="b64_json", + size=f"{width}x{height}", ) out_imgs = [b64_img_decode(part.b64_json) for part in response.data] case Text2ImgModels.dall_e.name: @@ -332,6 +335,15 @@ def _get_dalle_img_size(width: int, height: int) -> int: return edge +def _get_dalle_3_img_size(width: int, height: int) -> tuple[int, int]: + if height == width: + return 1024, 1024 + elif width < height: + return 1024, 1792 + else: + return 1792, 1024 + + def img2img( *, selected_model: str, From 33ef6c0326957d9080c355e860c6cfcccc21ed11 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 14 Nov 2023 02:38:53 -0800 Subject: [PATCH 007/138] add sd and hd settings --- daras_ai_v2/img_model_settings_widgets.py | 31 ++++++++++++++++++++--- daras_ai_v2/stable_diffusion.py | 6 ++++- recipes/CompareText2Img.py | 5 +++- 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/daras_ai_v2/img_model_settings_widgets.py b/daras_ai_v2/img_model_settings_widgets.py index 17f5ea1c1..7e0e35d22 100644 --- a/daras_ai_v2/img_model_settings_widgets.py +++ b/daras_ai_v2/img_model_settings_widgets.py @@ -181,7 +181,7 @@ def controlnet_weight_setting( ) -def num_outputs_setting(selected_model: str = None): +def num_outputs_setting(selected_models: str | list[str] = None): col1, col2 = st.columns(2, gap="medium") with col1: st.slider( @@ -200,12 +200,35 @@ def num_outputs_setting(selected_model: str = None): """ ) with col2: - quality_setting(selected_model) + quality_setting(selected_models) -def quality_setting(selected_model=None): - if selected_model in [InpaintingModels.dall_e.name]: +def quality_setting(selected_models=None): + if not isinstance(selected_models, list): + selected_models = [selected_models] + if any( + [ + selected_model in [InpaintingModels.dall_e.name] + for selected_model in selected_models + ] + ): return + if any( + [ + selected_model in [Text2ImgModels.dall_e_3.name] + for selected_model in selected_models + ] + ): + st.selectbox( + """##### Quality""", + options=[ + "standard, natural", + "hd, natural", + "standard, vivid", + "hd, vivid", + ], + key="dalle_3_quality", + ) st.slider( label=""" ##### Quality diff --git a/daras_ai_v2/stable_diffusion.py b/daras_ai_v2/stable_diffusion.py index ee3ce21de..7fd640c2b 100644 --- a/daras_ai_v2/stable_diffusion.py +++ b/daras_ai_v2/stable_diffusion.py @@ -261,6 +261,7 @@ def text2img( prompt: str, num_outputs: int, num_inference_steps: int, + dalle_3_quality: str, width: int, height: int, seed: int = 42, @@ -277,11 +278,14 @@ def text2img( client = OpenAI() width, height = _get_dalle_3_img_size(width, height) + quality, style = dalle_3_quality.split(", ") response = client.images.generate( model=text2img_model_ids[Text2ImgModels[selected_model]], - n=num_outputs, + n=1, # num_outputs, not supported yet prompt=prompt, response_format="b64_json", + quality=quality, + style=style, size=f"{width}x{height}", ) out_imgs = [b64_img_decode(part.b64_json) for part in response.data] diff --git a/recipes/CompareText2Img.py b/recipes/CompareText2Img.py index 05fd37057..9c3ec4124 100644 --- a/recipes/CompareText2Img.py +++ b/recipes/CompareText2Img.py @@ -40,6 +40,7 @@ class CompareText2ImgPage(BasePage): "seed": 42, "sd_2_upscaling": False, "image_guidance_scale": 1.2, + "dalle_3_quality": "standard, vivid", } class RequestModel(BaseModel): @@ -51,6 +52,7 @@ class RequestModel(BaseModel): num_outputs: int | None quality: int | None + dalle_3_quality: str | None guidance_scale: float | None seed: int | None @@ -152,7 +154,7 @@ def render_settings(self): negative_prompt_setting() output_resolution_setting() - num_outputs_setting() + num_outputs_setting(st.session_state.get("selected_models", [])) sd_2_upscaling_setting() col1, col2 = st.columns(2) with col1: @@ -178,6 +180,7 @@ def run(self, state: dict) -> typing.Iterator[str | None]: prompt=request.text_prompt, num_outputs=request.num_outputs, num_inference_steps=request.quality, + dalle_3_quality=request.dalle_3_quality, width=request.output_width, height=request.output_height, guidance_scale=request.guidance_scale, From 060805c4a9ae9a6d52f3db4e6c3817ef70b3395b Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:32:42 +0530 Subject: [PATCH 008/138] Add countdown timer component Additionally, also refactor base.py for readability and clear run states (waiting / success / error / recipe_root). This PR also adds a render_extra_waiting_output method on base.py, that can be updated separately for each recipe. --- bots/models.py | 4 +-- daras_ai_v2/base.py | 81 ++++++++++++++++++++++++++++++++---------- gooey_ui/components.py | 12 +++++++ recipes/DeforumSD.py | 22 ++++++++++++ 4 files changed, 98 insertions(+), 21 deletions(-) diff --git a/bots/models.py b/bots/models.py index b527cd998..155a1c40a 100644 --- a/bots/models.py +++ b/bots/models.py @@ -189,9 +189,9 @@ def to_dict(self) -> dict: ret = self.state.copy() if self.updated_at: - ret[StateKeys.updated_at] = self.updated_at + ret[StateKeys.updated_at] = self.updated_at.isoformat() if self.created_at: - ret[StateKeys.created_at] = self.created_at + ret[StateKeys.created_at] = self.created_at.isoformat() if self.error_msg: ret[StateKeys.error_msg] = self.error_msg if self.run_time: diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index f8899b172..58f6ae427 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -146,6 +146,15 @@ def api_url(self, example_id=None, run_id=None, uid=None) -> furl: def endpoint(self) -> str: return f"/v2/{self.slug_versions[0]}/" + def get_tab_url(self, tab: str) -> str: + example_id, run_id, uid = extract_query_params(gooey_get_query_params()) + return self.app_url( + example_id=example_id, + run_id=run_id, + uid=uid, + tab_name=MenuTabs.paths[tab], + ) + def render(self): with sentry_sdk.configure_scope() as scope: scope.set_extra("base_url", self.app_url()) @@ -191,10 +200,7 @@ def render(self): with st.nav_tabs(): tab_names = self.get_tabs() for name in tab_names: - url = self.app_url( - *extract_query_params(gooey_get_query_params()), - tab_name=MenuTabs.paths[name], - ) + url = self.get_tab_url(name) with st.nav_item(url, active=name == selected_tab): st.html(name) with st.nav_tab_content(): @@ -634,6 +640,19 @@ def _render_input_col(self): submitted = self.render_submit_button() return submitted + def get_run_state( + self, + ) -> typing.Literal["success", "error", "waiting", "recipe_root"]: + if st.session_state.get(StateKeys.run_status): + return "waiting" + elif st.session_state.get(StateKeys.error_msg): + return "error" + elif st.session_state.get(StateKeys.run_time): + return "success" + else: + # when user is at a recipe root, and not running anything + return "recipe_root" + def _render_output_col(self, submitted: bool): assert inspect.isgeneratorfunction(self.run) @@ -647,27 +666,40 @@ def _render_output_col(self, submitted: bool): self._render_before_output() - run_status = st.session_state.get(StateKeys.run_status) - if run_status: - st.caption("Your changes are saved in the above URL. Save it for later!") - html_spinner(run_status) - else: - err_msg = st.session_state.get(StateKeys.error_msg) - run_time = st.session_state.get(StateKeys.run_time, 0) - - # render errors - if err_msg is not None: - st.error(err_msg) - # render run time - elif run_time: - st.success(f"Success! Run Time: `{run_time:.2f}` seconds.") + run_state = self.get_run_state() + match run_state: + case "success": + self._render_success_output() + case "error": + self._render_error_output() + case "waiting": + self._render_waiting_output() + case "recipe_root": + pass # render outputs self.render_output() - if not run_status: + if run_state != "waiting": self._render_after_output() + def _render_success_output(self): + run_time = st.session_state.get(StateKeys.run_time, 0) + st.success(f"Success! Run Time: `{run_time:.2f}` seconds.") + + def _render_error_output(self): + err_msg = st.session_state.get(StateKeys.error_msg) + st.error(err_msg) + + def _render_waiting_output(self): + run_status = st.session_state.get(StateKeys.run_status) + st.caption("Your changes are saved in the above URL. Save it for later!") + html_spinner(run_status) + self.render_extra_waiting_output() + + def render_extra_waiting_output(self): + pass + def on_submit(self): example_id, run_id, uid = self.create_new_run() if settings.CREDITS_TO_DEDUCT_PER_RUN and not self.check_credits(): @@ -1150,6 +1182,17 @@ def is_current_user_admin(self) -> bool: def is_current_user_paying(self) -> bool: return bool(self.request and self.request.user and self.request.user.is_paying) + def is_current_user_owner(self) -> bool: + """ + Did the current user create this run? + """ + return bool( + self.request + and self.request.user + and self.run_user + and self.request.user.uid == self.run_user.uid + ) + def get_example_request_body( request_model: typing.Type[BaseModel], diff --git a/gooey_ui/components.py b/gooey_ui/components.py index b6919a487..297c753aa 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -2,6 +2,7 @@ import math import textwrap import typing +from datetime import datetime, timezone import numpy as np @@ -32,6 +33,17 @@ def dummy(*args, **kwargs): dataframe = dummy +def countdown_timer( + end_time: datetime, + delay_text: str, +) -> state.NestingCtx: + return _node( + "countdown-timer", + endTime=end_time.astimezone(timezone.utc).isoformat(), + delayText=delay_text, + ) + + def nav_tabs(): return _node("nav-tabs") diff --git a/recipes/DeforumSD.py b/recipes/DeforumSD.py index c2ebd231c..c045f2966 100644 --- a/recipes/DeforumSD.py +++ b/recipes/DeforumSD.py @@ -1,5 +1,6 @@ import typing import uuid +from datetime import datetime, timedelta from django.db.models import TextChoices from pydantic import BaseModel @@ -12,6 +13,7 @@ from daras_ai_v2.gpu_server import call_celery_task_outfile from daras_ai_v2.loom_video_widget import youtube_video from daras_ai_v2.safety_checker import safety_checker +from daras_ai_v2.tabs_widget import MenuTabs class AnimationModels(TextChoices): @@ -27,6 +29,7 @@ class _AnimationPrompt(TypedDict): AnimationPrompts = list[_AnimationPrompt] CREDITS_PER_FRAME = 1.5 +MODEL_ESTIMATED_TIME_PER_FRAME = 2.4 # seconds def input_prompt_to_animation_prompts(input_prompt: str): @@ -417,6 +420,25 @@ def render_output(self): st.write("Output Video") st.video(output_video, autoplay=True) + def render_extra_waiting_output(self): + if created_at := st.session_state.get("created_at"): + start_time = datetime.fromisoformat(created_at) + with st.countdown_timer( + end_time=start_time + timedelta(seconds=self.estimate_run_duration()), + delay_text="Sorry for the wait. Your run is taking longer than we expected.", + ): + if self.is_current_user_owner() and self.request.user.email: + st.write( + f"""We'll email **{self.request.user.email}** when your workflow is done.""" + ) + st.write( + f"""In the meantime, check out [🚀 Examples]({self.get_tab_url(MenuTabs.examples)}) for inspiration.""" + ) + + def estimate_run_duration(self): + # in seconds + return st.session_state.get("max_frames", 100) * MODEL_ESTIMATED_TIME_PER_FRAME + def render_example(self, state: dict): display = self.preview_input(state) st.markdown("```lua\n" + display + "\n```") From f2bec729ce54457328e63b79f709caacf563f9a4 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 21 Nov 2023 11:47:29 -0800 Subject: [PATCH 009/138] update deepgram supported lang list --- daras_ai_v2/asr.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index bcbfa495b..d43e37ad0 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -41,6 +41,11 @@ AZURE_SUPPORTED = {"af-ZA", "am-ET", "ar-AE", "ar-BH", "ar-DZ", "ar-EG", "ar-IL", "ar-IQ", "ar-JO", "ar-KW", "ar-LB", "ar-LY", "ar-MA", "ar-OM", "ar-PS", "ar-QA", "ar-SA", "ar-SY", "ar-TN", "ar-YE", "az-AZ", "bg-BG", "bn-IN", "bs-BA", "ca-ES", "cs-CZ", "cy-GB", "da-DK", "de-AT", "de-CH", "de-DE", "el-GR", "en-AU", "en-CA", "en-GB", "en-GH", "en-HK", "en-IE", "en-IN", "en-KE", "en-NG", "en-NZ", "en-PH", "en-SG", "en-TZ", "en-US", "en-ZA", "es-AR", "es-BO", "es-CL", "es-CO", "es-CR", "es-CU", "es-DO", "es-EC", "es-ES", "es-GQ", "es-GT", "es-HN", "es-MX", "es-NI", "es-PA", "es-PE", "es-PR", "es-PY", "es-SV", "es-US", "es-UY", "es-VE", "et-EE", "eu-ES", "fa-IR", "fi-FI", "fil-PH", "fr-BE", "fr-CA", "fr-CH", "fr-FR", "ga-IE", "gl-ES", "gu-IN", "he-IL", "hi-IN", "hr-HR", "hu-HU", "hy-AM", "id-ID", "is-IS", "it-CH", "it-IT", "ja-JP", "jv-ID", "ka-GE", "kk-KZ", "km-KH", "kn-IN", "ko-KR", "lo-LA", "lt-LT", "lv-LV", "mk-MK", "ml-IN", "mn-MN", "mr-IN", "ms-MY", "mt-MT", "my-MM", "nb-NO", "ne-NP", "nl-BE", "nl-NL", "pa-IN", "pl-PL", "ps-AF", "pt-BR", "pt-PT", "ro-RO", "ru-RU", "si-LK", "sk-SK", "sl-SI", "so-SO", "sq-AL", "sr-RS", "sv-SE", "sw-KE", "sw-TZ", "ta-IN", "te-IN", "th-TH", "tr-TR", "uk-UA", "ur-IN", "uz-UZ", "vi-VN", "wuu-CN", "yue-CN", "zh-CN", "zh-CN-shandong", "zh-CN-sichuan", "zh-HK", "zh-TW", "zu-ZA"} # fmt: skip MAX_POLLS = 100 +# https://deepgram.com/product/languages for the "general" model: +# DEEPGRAM_SUPPORTED = {"nl","en","en-AU","en-US","en-GB","en-NZ","en-IN","fr","fr-CA","de","hi","hi-Latn","id","it","ja","ko","cmn-Hans-CN","cmn-Hant-TW","no","pl","pt","pt-PT","pt-BR","ru","es","es-419","sv","tr","uk"} # fmt: skip +# but we only have the Nova tier so these are our languages (https://developers.deepgram.com/docs/models-languages-overview): +DEEPGRAM_SUPPORTED = {"en", "en-US", "en-AU", "en-GB", "en-NZ", "en-IN", "es", "es-419"} # fmt: skip + class AsrModels(Enum): whisper_large_v2 = "Whisper Large v2 (openai)" @@ -76,7 +81,7 @@ class AsrModels(Enum): asr_supported_languages = { AsrModels.whisper_large_v2: WHISPER_SUPPORTED, AsrModels.usm: CHIRP_SUPPORTED, - AsrModels.deepgram: WHISPER_SUPPORTED, + AsrModels.deepgram: DEEPGRAM_SUPPORTED, AsrModels.seamless_m4t: SEAMLESS_SUPPORTED, AsrModels.azure: AZURE_SUPPORTED, } From 2344a9f3b310f76943f19eda161d182c486498c9 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 21 Nov 2023 11:54:26 -0800 Subject: [PATCH 010/138] remove azure asr autodetect since it is extremely slow and does not provide accurate results --- daras_ai_v2/asr.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index d43e37ad0..081bdebcc 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -86,6 +86,10 @@ class AsrModels(Enum): AsrModels.azure: AZURE_SUPPORTED, } +does_not_support_auto_detect = { + AsrModels.azure, +} + class AsrChunk(typing_extensions.TypedDict): timestamp: tuple[float, float] @@ -160,7 +164,9 @@ def asr_language_selector( st.session_state[key] = forced_lang return forced_lang - options = [None, *asr_supported_languages.get(selected_model, [])] + options = ([] if selected_model in does_not_support_auto_detect else [None]) + [ + *asr_supported_languages.get(selected_model, []) + ] # handle non-canonical language codes old_val = st.session_state.get(key) @@ -555,19 +561,6 @@ def azure_asr(audio_url: str, language: str): }, "locale": language or "en-US", } - if not language: - payload["properties"]["languageIdentification"] = { - "candidateLocales": [ - "en-US", - "en-IN", - "hi-IN", - "te-IN", - "ta-IN", - "kn-IN", - "es-ES", - "de-DE", - ] - } r = requests.post( str(furl(settings.AZURE_SPEECH_ENDPOINT) / "speechtotext/v3.1/transcriptions"), headers={ From 4b62dabd78225fdc8fd5df9d418548a14a4cc1c9 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Tue, 21 Nov 2023 11:55:27 -0800 Subject: [PATCH 011/138] longer polling wait --- daras_ai_v2/asr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index 081bdebcc..beda96274 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -581,7 +581,7 @@ def azure_asr(audio_url: str, language: str): }, ) if not r.ok or not r.json()["status"] == "Succeeded": - sleep(1) + sleep(5) continue r = requests.get( r.json()["links"]["files"], From 6acb7b9136f43e4ddc7ba7b4b13f1801d7f3036e Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Fri, 24 Nov 2023 19:49:58 +0530 Subject: [PATCH 012/138] Add UI component for save menu --- daras_ai_v2/base.py | 72 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 4 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 8a10f06de..a1331a1b2 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -176,8 +176,12 @@ def render(self): StateKeys.page_notes, self.preview_description(st.session_state) ) - self._render_page_title_with_breadcrumbs(example_id, run_id, uid) - st.write(st.session_state.get(StateKeys.page_notes)) + with st.div(className="d-lg-flex d-md-block justify-content-between"): + with st.div(): + self._render_page_title_with_breadcrumbs(example_id, run_id, uid) + st.write(st.session_state.get(StateKeys.page_notes)) + with st.div(): + self._render_save_menu() try: selected_tab = MenuTabs.paths_reverse[self.tab] @@ -197,6 +201,66 @@ def render(self): with st.nav_tab_content(): self.render_selected_tab(selected_tab) + def _render_save_menu(self): + if not self.is_current_user_owner(): + return + + with st.div(className="d-flex justify-content-end"): + save_button_space, cancel_button_space = st.tag("span"), st.tag("span") + with save_button_space: + save_button = st.button("💾 Save", className="mb-0") + with cancel_button_space: + cancel_button = st.button("❌ Cancel", className="mb-0") + if save_button or cancel_button: + st.session_state["__save_mode"] = not st.session_state.get( + "__save_mode", False + ) + + is_save_mode = st.session_state.get("__save_mode") + if not is_save_mode: + cancel_button_space.empty() + else: + save_button_space.empty() + with st.div(className="d-flex justify-content-end"): + st.html( + """ + + """ + ) + with st.div(className="bg-light border p-4 save-button-menu"): + st.radio( + "Publish to", + options=[ + "Only me + people with a link", + "Public", + ], + ) + st.radio( + "", + options=[ + "Anyone at my org (coming soon)", + ], + disabled=True, + checked_by_default=False, + ) + with st.div(className="mt-4"): + st.text_input( + "Title", + key="published_run_title", + value=st.session_state[StateKeys.page_title], + ) + + with st.div(className="mt-4 d-flex justify-content-center"): + publish_button = st.button("🌻 Publish") + def _render_page_title_with_breadcrumbs( self, example_id: str, run_id: str, uid: str ): @@ -538,7 +602,7 @@ def render_submit_button(self, key="--submit-1"): cost_note = f"({cost_note.strip()})" st.caption( f""" -Run cost = {self.get_price_roundoff(st.session_state)} credits {cost_note} +Run cost = {self.get_price_roundoff(st.session_state)} credits {cost_note} {self.additional_notes() or ""} """, unsafe_allow_html=True, @@ -811,7 +875,7 @@ def generate_credit_error_message(self, example_id, run_id, uid) -> str: Doh! Please login to run more Gooey.AI workflows.

-You’ll receive {settings.LOGIN_USER_FREE_CREDITS} Credits when you sign up via your phone #, Google, Apple or GitHub account +You’ll receive {settings.LOGIN_USER_FREE_CREDITS} Credits when you sign up via your phone #, Google, Apple or GitHub account and can purchase more for $1/100 Credits. """ else: From 8ae5705259c36c7d31c29d53de0a3cdfb089f84b Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Mon, 27 Nov 2023 08:48:33 -0800 Subject: [PATCH 013/138] cleaned up auto_detect support logic --- daras_ai_v2/asr.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index beda96274..6552310bd 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -164,9 +164,9 @@ def asr_language_selector( st.session_state[key] = forced_lang return forced_lang - options = ([] if selected_model in does_not_support_auto_detect else [None]) + [ - *asr_supported_languages.get(selected_model, []) - ] + options = list(asr_supported_languages.get(selected_model, [])) + if selected_model not in does_not_support_auto_detect: + options.insert(0, None) # handle non-canonical language codes old_val = st.session_state.get(key) From 9afe1914fce0650f3b4e7bb9ed0c4e5c33258594 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Mon, 27 Nov 2023 09:21:01 -0800 Subject: [PATCH 014/138] split dalle_3 style and quality inputs --- daras_ai_v2/img_model_settings_widgets.py | 16 +++++++++++----- daras_ai_v2/stable_diffusion.py | 8 ++++---- recipes/CompareText2Img.py | 5 ++++- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/daras_ai_v2/img_model_settings_widgets.py b/daras_ai_v2/img_model_settings_widgets.py index 7e0e35d22..cae8d053a 100644 --- a/daras_ai_v2/img_model_settings_widgets.py +++ b/daras_ai_v2/img_model_settings_widgets.py @@ -220,15 +220,21 @@ def quality_setting(selected_models=None): ] ): st.selectbox( - """##### Quality""", + """##### Dalle 3 Quality""", options=[ - "standard, natural", - "hd, natural", - "standard, vivid", - "hd, vivid", + "standard", + "hd", ], key="dalle_3_quality", ) + st.selectbox( + """##### Dalle 3 Style""", + options=[ + "natural", + "vivid", + ], + key="dalle_3_style", + ) st.slider( label=""" ##### Quality diff --git a/daras_ai_v2/stable_diffusion.py b/daras_ai_v2/stable_diffusion.py index 7fd640c2b..9a439b648 100644 --- a/daras_ai_v2/stable_diffusion.py +++ b/daras_ai_v2/stable_diffusion.py @@ -261,13 +261,14 @@ def text2img( prompt: str, num_outputs: int, num_inference_steps: int, - dalle_3_quality: str, width: int, height: int, seed: int = 42, guidance_scale: float = None, negative_prompt: str = None, scheduler: str = None, + dalle_3_quality: str | None = None, + dalle_3_style: str | None = None, ): if selected_model != Text2ImgModels.dall_e_3.name: _resolution_check(width, height, max_size=(1024, 1024)) @@ -278,14 +279,13 @@ def text2img( client = OpenAI() width, height = _get_dalle_3_img_size(width, height) - quality, style = dalle_3_quality.split(", ") response = client.images.generate( model=text2img_model_ids[Text2ImgModels[selected_model]], n=1, # num_outputs, not supported yet prompt=prompt, response_format="b64_json", - quality=quality, - style=style, + quality=dalle_3_quality, + style=dalle_3_style, size=f"{width}x{height}", ) out_imgs = [b64_img_decode(part.b64_json) for part in response.data] diff --git a/recipes/CompareText2Img.py b/recipes/CompareText2Img.py index 9c3ec4124..bf71fc0b4 100644 --- a/recipes/CompareText2Img.py +++ b/recipes/CompareText2Img.py @@ -40,7 +40,8 @@ class CompareText2ImgPage(BasePage): "seed": 42, "sd_2_upscaling": False, "image_guidance_scale": 1.2, - "dalle_3_quality": "standard, vivid", + "dalle_3_quality": "standard", + "dalle_3_style": "vivid", } class RequestModel(BaseModel): @@ -53,6 +54,7 @@ class RequestModel(BaseModel): num_outputs: int | None quality: int | None dalle_3_quality: str | None + dalle_3_style: str | None guidance_scale: float | None seed: int | None @@ -181,6 +183,7 @@ def run(self, state: dict) -> typing.Iterator[str | None]: num_outputs=request.num_outputs, num_inference_steps=request.quality, dalle_3_quality=request.dalle_3_quality, + dalle_3_style=request.dalle_3_style, width=request.output_width, height=request.output_height, guidance_scale=request.guidance_scale, From cb348d7946e27c677f4372f8e6a2ab95b4c85df0 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Mon, 27 Nov 2023 11:02:00 -0800 Subject: [PATCH 015/138] fix merge --- gooey_ui/components.py | 5 ++++- recipes/QRCodeGenerator.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/gooey_ui/components.py b/gooey_ui/components.py index 375e0133e..9bc9cb45d 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -418,6 +418,9 @@ def button( """ if not key: key = md5_values("button", label, help, type, props) + className = "btn-" + type + if "className" in props: + className += " " + props.pop("className") state.RenderTreeNode( name="gui-button", props=dict( @@ -427,7 +430,7 @@ def button( label=dedent(label), help=help, disabled=disabled, - className="btn-" + type, + className=className, **props, ), ).mount() diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index 82b4fb5fa..558328a82 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -438,7 +438,7 @@ def vcard_form(*, key: str) -> VCARD: ) if vcard.email and st.button( - "Import other contact info from my email - magic!", + "Import other contact info from my email - magic!", type="link", ): imported_vcard = get_vcard_from_email(vcard.email) From c7581a6fb3c8da0ab89d4bb73a475770a979a040 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 28 Nov 2023 12:22:16 +0530 Subject: [PATCH 016/138] Fix allow additional class names for buttons --- gooey_ui/components.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gooey_ui/components.py b/gooey_ui/components.py index a1496dc45..19d8b42b1 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -418,6 +418,7 @@ def button( """ if not key: key = md5_values("button", label, help, type, props) + className = f"btn-{type} " + props.pop("className", "") state.RenderTreeNode( name="gui-button", props=dict( @@ -427,7 +428,7 @@ def button( label=dedent(label), help=help, disabled=disabled, - className="btn-" + type, + className=className, **props, ), ).mount() From 549d0587175b96e5c513266d7b589367cdd177f5 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Tue, 28 Nov 2023 12:57:36 +0530 Subject: [PATCH 017/138] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index beb3748f9..a0a41c537 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ gooey.ai (dev) App ID: 228027632918921 ``` -Create a [meta developer account](https://developers.facebook.com/docs/development/register/) & ask someone to add you to the test app [here](https://developers.facebook.com/apps/228027632918921/roles/roles/?business_id=549319917267066) +Create a [meta developer account](https://developers.facebook.com/docs/development/register/) & send admin your **facebook ID** to add you to the test app [here](https://developers.facebook.com/apps/228027632918921/roles/roles/?business_id=549319917267066) 1. start ngrok From ddbbfc5df937f3b5a89607a21b352d91bc777644 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 28 Nov 2023 20:54:02 +0530 Subject: [PATCH 018/138] gooey-ui: Allow unchecked radio buttons with flag --- gooey_ui/components.py | 3 +- gooey_ui/components/__init__.py | 852 ++++++++++++++++++++++++++++++++ gooey_ui/components/modal.py | 169 +++++++ 3 files changed, 1023 insertions(+), 1 deletion(-) create mode 100644 gooey_ui/components/__init__.py create mode 100644 gooey_ui/components/modal.py diff --git a/gooey_ui/components.py b/gooey_ui/components.py index 19d8b42b1..bf5c1ef20 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -571,6 +571,7 @@ def radio( help: str = None, *, disabled: bool = False, + checked_by_default: bool = True, label_visibility: LabelVisibility = "visible", ) -> T | None: if not options: @@ -579,7 +580,7 @@ def radio( if not key: key = md5_values("radio", label, options, help, label_visibility) value = state.session_state.get(key) - if key not in state.session_state or value not in options: + if (key not in state.session_state or value not in options) and checked_by_default: value = options[0] state.session_state.setdefault(key, value) if label_visibility != "visible": diff --git a/gooey_ui/components/__init__.py b/gooey_ui/components/__init__.py new file mode 100644 index 000000000..ce801dbce --- /dev/null +++ b/gooey_ui/components/__init__.py @@ -0,0 +1,852 @@ +import base64 +import math +import textwrap +import typing +from dataclasses import asdict, dataclass + +import numpy as np + +from furl import furl + +from daras_ai.image_input import resize_img_scale +from gooey_ui import state +from gooey_ui.pubsub import md5_values + +T = typing.TypeVar("T") +LabelVisibility = typing.Literal["visible", "collapsed"] + + +def _default_format(value: typing.Any) -> str: + if value is None: + return "---" + return str(value) + + +def dummy(*args, **kwargs): + return state.NestingCtx() + + +spinner = dummy +set_page_config = dummy +form = dummy +plotly_chart = dummy +dataframe = dummy + + +def nav_tabs(): + return _node("nav-tabs") + + +def nav_item(href: str, *, active: bool): + return _node("nav-item", to=href, active="true" if active else None) + + +def nav_tab_content(): + return _node("nav-tab-content") + + +def div(**props) -> state.NestingCtx: + return tag("div", **props) + + +def link(*, to: str, **props) -> state.NestingCtx: + return _node("Link", to=to, **props) + + +def tag(tag_name: str, **props) -> state.NestingCtx: + props["__reactjsxelement"] = tag_name + return _node("tag", **props) + + +def html(body: str, **props): + props["className"] = props.get("className", "") + " gui-html-container" + return _node("html", body=body, **props) + + +def write(*objs: typing.Any, unsafe_allow_html=False, **props): + for obj in objs: + markdown( + obj if isinstance(obj, str) else repr(obj), + unsafe_allow_html=unsafe_allow_html, + **props, + ) + + +def markdown(body: str, *, unsafe_allow_html=False, **props): + if body is None: + return _node("markdown", body="", **props) + props["className"] = ( + props.get("className", "") + " gui-html-container gui-md-container" + ) + return _node("markdown", body=dedent(body).strip(), **props) + + +def _node(name: str, **props): + node = state.RenderTreeNode(name=name, props=props) + node.mount() + return state.NestingCtx(node) + + +def text(body: str, *, unsafe_allow_html=False, **props): + state.RenderTreeNode( + name="pre", + props=dict(body=dedent(body), **props), + ).mount() + + +def error(body: str, icon: str = "🔥", *, unsafe_allow_html=False): + if not isinstance(body, str): + body = repr(body) + with div( + style=dict( + backgroundColor="rgba(255, 108, 108, 0.2)", + padding="1rem", + paddingBottom="0", + marginBottom="0.5rem", + borderRadius="0.25rem", + display="flex", + gap="0.5rem", + ) + ): + markdown(icon) + with div(): + markdown(dedent(body), unsafe_allow_html=unsafe_allow_html) + + +def success(body: str, icon: str = "✅", *, unsafe_allow_html=False): + if not isinstance(body, str): + body = repr(body) + with div( + style=dict( + backgroundColor="rgba(108, 255, 108, 0.2)", + padding="1rem", + paddingBottom="0", + marginBottom="0.5rem", + borderRadius="0.25rem", + display="flex", + gap="0.5rem", + ) + ): + markdown(icon) + markdown(dedent(body), unsafe_allow_html=unsafe_allow_html) + + +def caption(body: str, **props): + style = props.setdefault("style", {"fontSize": "0.9rem"}) + markdown(body, className="text-muted", **props) + + +def option_menu(*args, options, **kwargs): + return tabs(options) + + +def tabs(labels: list[str]) -> list[state.NestingCtx]: + parent = state.RenderTreeNode( + name="tabs", + children=[ + state.RenderTreeNode( + name="tab", + props=dict(label=dedent(label)), + ) + for label in labels + ], + ).mount() + return [state.NestingCtx(tab) for tab in parent.children] + + +def controllable_tabs( + labels: list[str], key: str +) -> tuple[list[state.NestingCtx], int]: + index = state.session_state.get(key, 0) + for i, label in enumerate(labels): + if button( + label, + key=f"tab-{i}", + type="primary", + className="replicate-nav", + style={ + "background": "black" if i == index else "white", + "color": "white" if i == index else "black", + }, + ): + state.session_state[key] = index = i + state.experimental_rerun() + ctxs = [] + for i, label in enumerate(labels): + if i == index: + ctxs += [div(className="tab-content")] + else: + ctxs += [div(className="tab-content", style={"display": "none"})] + return ctxs, index + + +def columns( + spec, + *, + gap: str = None, + responsive: bool = True, + **props, +) -> tuple[state.NestingCtx, ...]: + if isinstance(spec, int): + spec = [1] * spec + total_weight = sum(spec) + props.setdefault("className", "row") + with div(**props): + return tuple( + div(className=f"col-lg-{p} {'col-12' if responsive else f'col-{p}'}") + for w in spec + if (p := f"{round(w / total_weight * 12)}") + ) + + +def image( + src: str | np.ndarray, + caption: str = None, + alt: str = None, + **props, +): + if isinstance(src, np.ndarray): + from daras_ai.image_input import cv2_img_to_bytes + + if not src.shape: + return + # ensure image is not too large + data = resize_img_scale(cv2_img_to_bytes(src), (128, 128)) + # convert to base64 + b64 = base64.b64encode(data).decode("utf-8") + src = "data:image/png;base64," + b64 + if not src: + return + state.RenderTreeNode( + name="img", + props=dict( + src=src, + caption=dedent(caption), + alt=alt or caption, + **props, + ), + ).mount() + + +def video(src: str, caption: str = None, autoplay: bool = False): + autoplay_props = {} + if autoplay: + autoplay_props = { + "preload": "auto", + "controls": True, + "autoPlay": True, + "loop": True, + "muted": True, + "playsInline": True, + } + + if not src: + return + if isinstance(src, str): + # https://muffinman.io/blog/hack-for-ios-safari-to-display-html-video-thumbnail/ + f = furl(src) + f.fragment.args["t"] = "0.001" + src = f.url + state.RenderTreeNode( + name="video", + props=dict(src=src, caption=dedent(caption), **autoplay_props), + ).mount() + + +def audio(src: str, caption: str = None): + if not src: + return + state.RenderTreeNode( + name="audio", + props=dict(src=src, caption=dedent(caption)), + ).mount() + + +def text_area( + label: str, + value: str = "", + height: int = 100, + key: str = None, + help: str = None, + placeholder: str = None, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", + **props, +) -> str: + style = props.setdefault("style", {}) + if key: + assert not value, "only one of value or key can be provided" + else: + key = md5_values( + "textarea", label, height, help, value, placeholder, label_visibility + ) + value = str(state.session_state.setdefault(key, value)) + if label_visibility != "visible": + label = None + if disabled: + max_height = f"{height}px" + rows = nrows_for_text(value, height, min_rows=1) + else: + max_height = "90vh" + rows = nrows_for_text(value, height) + style.setdefault("maxHeight", max_height) + props.setdefault("rows", rows) + state.RenderTreeNode( + name="textarea", + props=dict( + name=key, + label=dedent(label), + defaultValue=value, + help=help, + placeholder=placeholder, + disabled=disabled, + **props, + ), + ).mount() + return value or "" + + +def nrows_for_text( + text: str, + max_height_px: int, + min_rows: int = 2, + row_height_px: int = 30, + row_width_px: int = 80, +) -> int: + max_rows = max_height_px // row_height_px + nrows = math.ceil( + sum(len(line) / row_width_px for line in (text or "").strip().splitlines()) + ) + nrows = min(max(nrows, min_rows), max_rows) + return nrows + + +def multiselect( + label: str, + options: typing.Sequence[T], + format_func: typing.Callable[[T], typing.Any] = _default_format, + key: str = None, + help: str = None, + allow_none: bool = False, + *, + disabled: bool = False, +) -> list[T]: + if not options: + return [] + options = list(options) + if not key: + key = md5_values("multiselect", label, options, help) + value = state.session_state.get(key) or [] + if not isinstance(value, list): + value = [value] + value = [o if o in options else options[0] for o in value] + if not allow_none and not value: + value = [options[0]] + state.session_state[key] = value + state.RenderTreeNode( + name="select", + props=dict( + name=key, + label=dedent(label), + help=help, + isDisabled=disabled, + isMulti=True, + defaultValue=value, + allow_none=allow_none, + options=[ + {"value": option, "label": str(format_func(option))} + for option in options + ], + ), + ).mount() + return value + + +def selectbox( + label: str, + options: typing.Sequence[T], + format_func: typing.Callable[[T], typing.Any] = _default_format, + key: str = None, + help: str = None, + *, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", + default_value: T = None, +) -> T | None: + if not options: + return None + if label_visibility != "visible": + label = None + options = list(options) + if not key: + key = md5_values("select", label, options, help, label_visibility) + value = state.session_state.get(key) + if key not in state.session_state or value not in options: + value = default_value or options[0] + state.session_state.setdefault(key, value) + state.RenderTreeNode( + name="select", + props=dict( + name=key, + label=dedent(label), + help=help, + isDisabled=disabled, + defaultValue=value, + options=[ + {"value": option, "label": str(format_func(option))} + for option in options + ], + ), + ).mount() + return value + + +@dataclass +class Option: + label: str + value: typing.Any = None + isDisabled: bool = False + + def __post_init__(self): + if self.value is None: + self.value = self.label + + +def rich_selectbox( + label: str, + options: typing.Sequence[Option], + key: str | None = None, + help: str | None = None, + *, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", + default_value: T | None = None, +) -> T | None: + if label_visibility != "visible": + label = None + options = list(options) + if not key: + key = md5_values("rich_select", label, options, help, label_visibility) + value = state.session_state.get(key) + if key not in state.session_state or value not in options: + value = default_value or options[0] + state.session_state.setdefault(key, value) + state.RenderTreeNode( + name="select", + props=dict( + name=key, + label=dedent(label), + help=help, + isDisabled=disabled, + defaultValue=value, + options=[asdict(option) for option in options], + ), + ).mount() + return value + + +def button( + label: str, + key: str = None, + help: str = None, + *, + type: typing.Literal["primary", "secondary", "tertiary", "link"] = "secondary", + disabled: bool = False, + **props, +) -> bool: + """ + Example: + st.button("Primary", key="test0", type="primary") + st.button("Secondary", key="test1") + st.button("Tertiary", key="test3", type="tertiary") + st.button("Link Button", key="test3", type="link") + """ + if not key: + key = md5_values("button", label, help, type, props) + className = f"btn-{type} " + props.pop("className", "") + state.RenderTreeNode( + name="gui-button", + props=dict( + type="submit", + value="yes", + name=key, + label=dedent(label), + help=help, + disabled=disabled, + className=className, + **props, + ), + ).mount() + return bool(state.session_state.pop(key, False)) + + +form_submit_button = button + + +def expander(label: str, *, expanded: bool = False, **props): + node = state.RenderTreeNode( + name="expander", + props=dict( + label=dedent(label), + open=expanded, + **props, + ), + ) + node.mount() + return state.NestingCtx(node) + + +def file_uploader( + label: str, + accept: list[str] = None, + accept_multiple_files=False, + key: str = None, + upload_key: str = None, + help: str = None, + *, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", + upload_meta: dict = None, +): + if label_visibility != "visible": + label = None + key = upload_key or key + if not key: + key = md5_values( + "file_uploader", + label, + accept, + accept_multiple_files, + help, + label_visibility, + ) + value = state.session_state.get(key) + if not value: + if accept_multiple_files: + value = [] + else: + value = "" + state.session_state[key] = value + state.RenderTreeNode( + name="input", + props=dict( + type="file", + name=key, + label=dedent(label), + help=help, + disabled=disabled, + accept=accept, + multiple=accept_multiple_files, + defaultValue=value, + uploadMeta=upload_meta, + ), + ).mount() + return value or "" + + +def json(value: typing.Any, expanded: bool = False, depth: int = 1): + state.RenderTreeNode( + name="json", + props=dict( + value=value, + expanded=expanded, + defaultInspectDepth=3 if expanded else depth, + ), + ).mount() + + +def data_table(file_url: str): + return _node("data-table", fileUrl=file_url) + + +def table(df: "pd.DataFrame"): + state.RenderTreeNode( + name="table", + children=[ + state.RenderTreeNode( + name="thead", + children=[ + state.RenderTreeNode( + name="tr", + children=[ + state.RenderTreeNode( + name="th", + children=[ + state.RenderTreeNode( + name="markdown", + props=dict(body=dedent(col)), + ), + ], + ) + for col in df.columns + ], + ), + ], + ), + state.RenderTreeNode( + name="tbody", + children=[ + state.RenderTreeNode( + name="tr", + children=[ + state.RenderTreeNode( + name="td", + children=[ + state.RenderTreeNode( + name="markdown", + props=dict(body=dedent(str(value))), + ), + ], + ) + for value in row + ], + ) + for row in df.itertuples(index=False) + ], + ), + ], + ).mount() + + +def radio( + label: str, + options: typing.Sequence[T], + format_func: typing.Callable[[T], typing.Any] = _default_format, + key: str = None, + help: str = None, + *, + disabled: bool = False, + checked_by_default: bool = True, + label_visibility: LabelVisibility = "visible", +) -> T | None: + if not options: + return None + options = list(options) + if not key: + key = md5_values("radio", label, options, help, label_visibility) + value = state.session_state.get(key) + if (key not in state.session_state or value not in options) and checked_by_default: + value = options[0] + state.session_state.setdefault(key, value) + if label_visibility != "visible": + label = None + markdown(label) + for option in options: + state.RenderTreeNode( + name="input", + props=dict( + type="radio", + name=key, + label=dedent(str(format_func(option))), + value=option, + defaultChecked=bool(value == option), + help=help, + disabled=disabled, + ), + ).mount() + return value + + +def text_input( + label: str, + value: str = "", + max_chars: str = None, + key: str = None, + help: str = None, + *, + placeholder: str = None, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", + **props, +) -> str: + value = _input_widget( + input_type="text", + label=label, + value=value, + key=key, + help=help, + disabled=disabled, + label_visibility=label_visibility, + maxLength=max_chars, + placeholder=placeholder, + **props, + ) + return value or "" + + +def password_input( + label: str, + value: str = "", + max_chars: str = None, + key: str = None, + help: str = None, + *, + placeholder: str = None, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", + **props, +) -> str: + value = _input_widget( + input_type="password", + label=label, + value=value, + key=key, + help=help, + disabled=disabled, + label_visibility=label_visibility, + maxLength=max_chars, + placeholder=placeholder, + **props, + ) + return value or "" + + +def slider( + label: str, + min_value: float = None, + max_value: float = None, + value: float = None, + step: float = None, + key: str = None, + help: str = None, + *, + disabled: bool = False, +) -> float: + value = _input_widget( + input_type="range", + label=label, + value=value, + key=key, + help=help, + disabled=disabled, + min=min_value, + max=max_value, + step=_step_value(min_value, max_value, step), + ) + return value or 0 + + +def number_input( + label: str, + min_value: float = None, + max_value: float = None, + value: float = None, + step: float = None, + key: str = None, + help: str = None, + *, + disabled: bool = False, +) -> float: + value = _input_widget( + input_type="number", + inputMode="decimal", + label=label, + value=value, + key=key, + help=help, + disabled=disabled, + min=min_value, + max=max_value, + step=_step_value(min_value, max_value, step), + ) + return value or 0 + + +def _step_value( + min_value: float | None, max_value: float | None, step: float | None +) -> float: + if step: + return step + elif isinstance(min_value, float) or isinstance(max_value, float): + return 0.1 + else: + return 1 + + +def checkbox( + label: str, + value: bool = False, + key: str = None, + help: str = None, + *, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", +) -> bool: + value = _input_widget( + input_type="checkbox", + label=label, + value=value, + key=key, + help=help, + disabled=disabled, + label_visibility=label_visibility, + default_value_attr="defaultChecked", + ) + return bool(value) + + +def _input_widget( + *, + input_type: str, + label: str, + value: typing.Any = None, + key: str = None, + help: str = None, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", + default_value_attr: str = "defaultValue", + **kwargs, +) -> typing.Any: + # if key: + # assert not value, "only one of value or key can be provided" + # else: + if not key: + key = md5_values("input", input_type, label, help, label_visibility) + value = state.session_state.setdefault(key, value) + if label_visibility != "visible": + label = None + state.RenderTreeNode( + name="input", + props={ + "type": input_type, + "name": key, + "label": dedent(label), + default_value_attr: value, + "help": help, + "disabled": disabled, + **kwargs, + }, + ).mount() + return value + + +def breadcrumbs(divider: str = "/", **props) -> state.NestingCtx: + style = props.pop("style", {}) | {"--bs-breadcrumb-divider": f"'{divider}'"} + with tag("nav", style=style, **props): + return tag("ol", className="breadcrumb mb-0") + + +def breadcrumb_item(inner_html: str, link_to: str | None = None, **props): + className = "breadcrumb-item lead " + props.pop("className", "") + with tag("li", className=className, **props): + if link_to: + with tag("a", href=link_to): + html(inner_html) + else: + html(inner_html) + + +def dedent(text: str | None) -> str | None: + if not text: + return text + return textwrap.dedent(text) + + +def js(src: str, **kwargs): + state.RenderTreeNode( + name="script", + props=dict( + src=src, + args=kwargs, + ), + ).mount() diff --git a/gooey_ui/components/modal.py b/gooey_ui/components/modal.py new file mode 100644 index 000000000..fa9b905a6 --- /dev/null +++ b/gooey_ui/components/modal.py @@ -0,0 +1,169 @@ +from contextlib import contextmanager + +import gooey_ui as st +from gooey_ui import experimental_rerun as rerun + + +class Modal: + def __init__(self, title, key, padding=20, max_width=744): + """ + :param title: title of the Modal shown in the h1 + :param key: unique key identifying this modal instance + :param padding: padding of the content within the modal + :param max_width: maximum width this modal should use + """ + self.title = title + self.padding = padding + self.max_width = str(max_width) + "px" + self.key = key + + def is_open(self): + return st.session_state.get(f"{self.key}-opened", False) + + def open(self): + st.session_state[f"{self.key}-opened"] = True + rerun() + + def close(self, rerun_condition=True): + st.session_state[f"{self.key}-opened"] = False + if rerun_condition: + rerun() + + @contextmanager + def container(self, **props): + st.html( + f""" + + """ + ) + + with st.div(className="blur-background"): + with st.div(className="modal-parent"): + container_class = "modal-container " + props.pop("className", "") + container = st.div(className=container_class, **props) + + with container: + with st.div(className="d-flex justify-content-between align-items-center"): + st.markdown(f"## {self.title or ''}") + + close_ = st.button( + "✖", + key=f"{self.key}-close", + style={"padding": "0.375rem 0.75rem"}, + ) + if close_: + self.close() + yield + + return + + st.markdown( + f""" + + """, + unsafe_allow_html=True, + ) + with st.div(className="container"): + _container = st.div(className="container") + if self.title: + with _container: + st.markdown(f"

{self.title}

", unsafe_allow_html=True) + + close_ = st.button("✖", key=f"{self.key}-close") + if close_: + self.close() + + with _container: + yield _container From 6e96a0a81c24bf28cdbee259364f1ff3361da88b Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 28 Nov 2023 21:01:21 +0530 Subject: [PATCH 019/138] Add published run UX --- ...y_savedrun_is_approved_example_and_more.py | 54 +++++++ bots/models.py | 36 +++++ daras_ai_v2/base.py | 149 +++++++++++++----- 3 files changed, 201 insertions(+), 38 deletions(-) create mode 100644 bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py diff --git a/bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py b/bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py new file mode 100644 index 000000000..df1f94b17 --- /dev/null +++ b/bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py @@ -0,0 +1,54 @@ +# Generated by Django 4.2.5 on 2023-11-27 15:26 + +from django.db import migrations, models +import django.db.models.deletion + +from bots.models import PublishedRunVisibility + + +def set_defaults_for_gooey_examples(apps, schema_editor): + # bots->SavedRun + # if example_id is not null, + # set is_approved_example to True and visibility to Public + model = apps.get_model("bots", "SavedRun") + db_alias = schema_editor.connection.alias + + model.objects.using(db_alias).filter(example_id__isnull=False).update( + is_approved_example=True, + visibility=PublishedRunVisibility.PUBLIC, + ) + + +class Migration(migrations.Migration): + dependencies = [ + ("app_users", "0010_alter_appuser_balance_alter_appuser_created_at_and_more"), + ("bots", "0046_savedrun_bots_savedr_created_cb8e09_idx_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="savedrun", + name="created_by", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="examples_created", + to="app_users.appuser", + ), + ), + migrations.AddField( + model_name="savedrun", + name="is_approved_example", + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name="savedrun", + name="visibility", + field=models.IntegerField( + choices=[(1, "Unlisted"), (2, "Public")], default=1 + ), + ), + migrations.RunPython( + set_defaults_for_gooey_examples, + ), + ] diff --git a/bots/models.py b/bots/models.py index 937f26e70..afe41d270 100644 --- a/bots/models.py +++ b/bots/models.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import datetime import typing from multiprocessing.pool import ThreadPool @@ -27,6 +29,20 @@ EPOCH = datetime.datetime.utcfromtimestamp(0) +class PublishedRunVisibility(models.IntegerChoices): + UNLISTED = 1 + PUBLIC = 2 + + def help_text(self): + match self: + case PublishedRunVisibility.UNLISTED: + return "Only me + people with a link" + case PublishedRunVisibility.PUBLIC: + return "Public" + case _: + return self.label + + class Platform(models.IntegerChoices): FACEBOOK = 1 INSTAGRAM = (2, "Instagram & FB") @@ -130,6 +146,13 @@ class SavedRun(models.Model): run_id = models.CharField(max_length=128, default=None, null=True, blank=True) uid = models.CharField(max_length=128, default=None, null=True, blank=True) + created_by = models.ForeignKey( + "app_users.AppUser", + on_delete=models.SET_NULL, + null=True, + related_name="examples_created", + ) + state = models.JSONField(default=dict, blank=True, encoder=PostgresJSONEncoder) error_msg = models.TextField(default="", blank=True) @@ -140,6 +163,11 @@ class SavedRun(models.Model): hidden = models.BooleanField(default=False) is_flagged = models.BooleanField(default=False) + visibility = models.IntegerField( + choices=PublishedRunVisibility.choices, + default=PublishedRunVisibility.UNLISTED, + ) + is_approved_example = models.BooleanField(default=False) price = models.IntegerField(default=0) transaction = models.ForeignKey( @@ -265,6 +293,14 @@ def submit_api_call( ) return result, page.run_doc_sr(run_id, uid) + def get_creator(self) -> AppUser | None: + if self.created_by: + return self.created_by + elif self.uid: + return AppUser.objects.filter(uid=self.uid).first() + else: + return None + @admin.display(description="Open in Gooey") def open_in_gooey(self): return open_in_new_tab(self.get_app_url(), label=self.get_app_url()) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 7f20c36a8..feb8305c2 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -25,7 +25,7 @@ import gooey_ui as st from app_users.models import AppUser, AppUserTransaction -from bots.models import SavedRun, Workflow +from bots.models import SavedRun, PublishedRunVisibility, Workflow from daras_ai.image_input import truncate_text_words from daras_ai_v2 import settings from daras_ai_v2.api_examples_widget import api_example_generator @@ -56,6 +56,7 @@ from daras_ai_v2.user_date_widgets import render_js_dynamic_dates, js_dynamic_date from gooey_ui import realtime_clear_subs from gooey_ui.pubsub import realtime_pull +from gooey_ui.components.modal import Modal DEFAULT_META_IMG = ( # Small @@ -181,7 +182,7 @@ def render(self): self._render_page_title_with_breadcrumbs(example_id, run_id, uid) st.write(st.session_state.get(StateKeys.page_notes)) with st.div(): - self._render_save_menu() + self._render_publish_menu() try: selected_tab = MenuTabs.paths_reverse[self.tab] @@ -201,47 +202,81 @@ def render(self): with st.nav_tab_content(): self.render_selected_tab(selected_tab) - def _render_save_menu(self): - if not self.is_current_user_owner(): + def _render_publish_menu(self): + if not self.request or not self.request.user: return - with st.div(className="d-flex justify-content-end"): - save_button_space, cancel_button_space = st.tag("span"), st.tag("span") - with save_button_space: - save_button = st.button("💾 Save", className="mb-0") - with cancel_button_space: - cancel_button = st.button("❌ Cancel", className="mb-0") - if save_button or cancel_button: - st.session_state["__save_mode"] = not st.session_state.get( - "__save_mode", False - ) + example_id, run_id, uid = extract_query_params(gooey_get_query_params()) + current_run = self.get_sr_from_query_params(example_id, run_id, uid) - is_save_mode = st.session_state.get("__save_mode") - if not is_save_mode: - cancel_button_space.empty() - else: - save_button_space.empty() + if current_run.get_creator() != self.request.user: + return + + published_run = self.example_doc_sr(example_id) if example_id else None + is_update_mode = bool( + published_run + and ( + published_run.get_creator() == self.request.user + or self.is_current_user_admin() + ) + ) + + with st.div(): with st.div(className="d-flex justify-content-end"): + # if published_run and is_update_mode and current_run != published_run: + # st.caption("Unpublished changes") st.html( """ """ ) - with st.div(className="bg-light border p-4 save-button-menu"): - st.radio( + + save_text = "📝 Update" if is_update_mode else "💾 Save" + save_button = st.button( + save_text, + className="mb-0", + type="primary", + ) + publish_modal = Modal("", key="publish-modal") + if save_button: + publish_modal.open() + + published_run_options_button = ( + st.button("⋮", className="mb-0", type="secondary") + if is_update_mode + else None + ) + if published_run_options_button: + st.session_state[ + "__published_run_options" + ] = not st.session_state.get( + "__published_run_options", + False, + ) + + show_published_run_options = st.session_state.get("__published_run_options") + if show_published_run_options: + # with st.div(className="d-flex justify-content-end"): + with st.div( + className="bg-white border p-4 published-options-menu w-100" + ): + st.button("Delete") + + if publish_modal.is_open(): + with publish_modal.container(style={"min-width": "min(500px, 100vw)"}): + with st.div(className="visibility-radio"): + published_run_visibility = st.radio( "Publish to", - options=[ - "Only me + people with a link", - "Public", - ], + options=PublishedRunVisibility.values, + format_func=lambda x: PublishedRunVisibility(x).help_text(), ) st.radio( "", @@ -251,15 +286,51 @@ def _render_save_menu(self): disabled=True, checked_by_default=False, ) - with st.div(className="mt-4"): - st.text_input( - "Title", - key="published_run_title", - value=st.session_state[StateKeys.page_title], - ) - with st.div(className="mt-4 d-flex justify-content-center"): - publish_button = st.button("🌻 Publish") + with st.div(className="mt-4"): + recipe_title = ( + self.recipe_doc_sr().to_dict().get(StateKeys.page_title) + or self.title + ) + default_title = ( + published_run.page_title + if is_update_mode + else st.session_state[StateKeys.page_title] + ) + if default_title == recipe_title: + default_title = "" + published_run_title = st.text_input( + "Title", + key="published_run_title", + value=default_title, + ) + published_run_notes = st.text_area( + "Notes", + value=published_run.page_notes + if is_update_mode + else st.session_state[StateKeys.page_notes].strip(), + ) + + with st.div(className="mt-4 d-flex justify-content-center"): + publish_button = st.button("🌻 Publish", type="primary") + + if publish_button: + doc = current_run.to_dict() + doc[StateKeys.page_title] = published_run_title + doc[StateKeys.page_notes] = published_run_notes + if not is_update_mode: + published_run = self.example_doc_sr( + get_random_doc_id(), create=True + ) + published_run.created_by = self.request.user + published_run.set(doc) + if current_run != published_run: + published_run.parent = current_run + published_run.visibility = published_run_visibility + published_run.save() + raise QueryParamsRedirectException( + query_params=dict(example_id=published_run.example_id), + ) def _render_page_title_with_breadcrumbs( self, example_id: str, run_id: str, uid: str @@ -1014,6 +1085,8 @@ def _render(sr: SavedRun): example_runs = SavedRun.objects.filter( workflow=self.workflow, hidden=False, + is_approved_example=True, + visibility=PublishedRunVisibility.PUBLIC, example_id__isnull=False, )[:50] From 8926c16cfcf979844ec06614095814c4b00c3ca7 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 28 Nov 2023 21:25:03 +0530 Subject: [PATCH 020/138] Return created_at/updated_at as datetime from db models --- bots/models.py | 4 ++-- daras_ai_v2/base.py | 22 ++++++++++++++++++++++ recipes/DeforumSD.py | 15 --------------- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/bots/models.py b/bots/models.py index 155a1c40a..b527cd998 100644 --- a/bots/models.py +++ b/bots/models.py @@ -189,9 +189,9 @@ def to_dict(self) -> dict: ret = self.state.copy() if self.updated_at: - ret[StateKeys.updated_at] = self.updated_at.isoformat() + ret[StateKeys.updated_at] = self.updated_at if self.created_at: - ret[StateKeys.created_at] = self.created_at.isoformat() + ret[StateKeys.created_at] = self.created_at if self.error_msg: ret[StateKeys.error_msg] = self.error_msg if self.run_time: diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 58f6ae427..3c80f6f57 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -698,6 +698,28 @@ def _render_waiting_output(self): self.render_extra_waiting_output() def render_extra_waiting_output(self): + estimated_run_time = self.estimate_run_duration() + if not estimated_run_time: + return + if created_at := st.session_state.get("created_at"): + if isinstance(created_at, datetime.datetime): + start_time = created_at + else: + start_time = datetime.fromisoformat(created_at) + with st.countdown_timer( + end_time=start_time + datetime.timedelta(seconds=estimated_run_time), + delay_text="Sorry for the wait. Your run is taking longer than we expected.", + ): + if self.is_current_user_owner() and self.request.user.email: + st.write( + f"""We'll email **{self.request.user.email}** when your workflow is done.""" + ) + st.write( + f"""In the meantime, check out [🚀 Examples]({self.get_tab_url(MenuTabs.examples)}) + for inspiration.""" + ) + + def estimate_run_duration(self) -> int | None: pass def on_submit(self): diff --git a/recipes/DeforumSD.py b/recipes/DeforumSD.py index c045f2966..5e4ae2c88 100644 --- a/recipes/DeforumSD.py +++ b/recipes/DeforumSD.py @@ -420,21 +420,6 @@ def render_output(self): st.write("Output Video") st.video(output_video, autoplay=True) - def render_extra_waiting_output(self): - if created_at := st.session_state.get("created_at"): - start_time = datetime.fromisoformat(created_at) - with st.countdown_timer( - end_time=start_time + timedelta(seconds=self.estimate_run_duration()), - delay_text="Sorry for the wait. Your run is taking longer than we expected.", - ): - if self.is_current_user_owner() and self.request.user.email: - st.write( - f"""We'll email **{self.request.user.email}** when your workflow is done.""" - ) - st.write( - f"""In the meantime, check out [🚀 Examples]({self.get_tab_url(MenuTabs.examples)}) for inspiration.""" - ) - def estimate_run_duration(self): # in seconds return st.session_state.get("max_frames", 100) * MODEL_ESTIMATED_TIME_PER_FRAME From 8a09bd6e3bde9452d358788d35dec4102bb13b66 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 28 Nov 2023 21:54:34 +0530 Subject: [PATCH 021/138] Fix datetime usage in bsae.py --- daras_ai_v2/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 3c80f6f57..e0e9effba 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -705,7 +705,7 @@ def render_extra_waiting_output(self): if isinstance(created_at, datetime.datetime): start_time = created_at else: - start_time = datetime.fromisoformat(created_at) + start_time = datetime.datetime.fromisoformat(created_at) with st.countdown_timer( end_time=start_time + datetime.timedelta(seconds=estimated_run_time), delay_text="Sorry for the wait. Your run is taking longer than we expected.", From 2a9c2cbd7f71861c08f40a802a0472c9c35230d7 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 28 Nov 2023 22:01:22 +0530 Subject: [PATCH 022/138] Use recipe run state enum --- daras_ai_v2/base.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index e0e9effba..a76ff1a6f 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -14,6 +14,7 @@ import requests import sentry_sdk from django.utils import timezone +from enum import Enum from fastapi import HTTPException from firebase_admin import auth from furl import furl @@ -70,6 +71,13 @@ SUBMIT_AFTER_LOGIN_Q = "submitafterlogin" +class RecipeRunState(Enum): + idle = 1 + running = 2 + completed = 3 + failed = 4 + + class StateKeys: page_title = "__title" page_notes = "__notes" @@ -642,16 +650,16 @@ def _render_input_col(self): def get_run_state( self, - ) -> typing.Literal["success", "error", "waiting", "recipe_root"]: + ) -> RecipeRunState: if st.session_state.get(StateKeys.run_status): - return "waiting" + return RecipeRunState.running elif st.session_state.get(StateKeys.error_msg): - return "error" + return RecipeRunState.failed elif st.session_state.get(StateKeys.run_time): - return "success" + return RecipeRunState.completed else: # when user is at a recipe root, and not running anything - return "recipe_root" + return RecipeRunState.idle def _render_output_col(self, submitted: bool): assert inspect.isgeneratorfunction(self.run) @@ -668,13 +676,13 @@ def _render_output_col(self, submitted: bool): run_state = self.get_run_state() match run_state: - case "success": - self._render_success_output() - case "error": - self._render_error_output() - case "waiting": - self._render_waiting_output() - case "recipe_root": + case RecipeRunState.completed: + self._render_completed_output() + case RecipeRunState.failed: + self._render_failed_output() + case RecipeRunState.running: + self._render_running_output() + case RecipeRunState.idle: pass # render outputs @@ -683,15 +691,15 @@ def _render_output_col(self, submitted: bool): if run_state != "waiting": self._render_after_output() - def _render_success_output(self): + def _render_completed_output(self): run_time = st.session_state.get(StateKeys.run_time, 0) st.success(f"Success! Run Time: `{run_time:.2f}` seconds.") - def _render_error_output(self): + def _render_failed_output(self): err_msg = st.session_state.get(StateKeys.error_msg) st.error(err_msg) - def _render_waiting_output(self): + def _render_running_output(self): run_status = st.session_state.get(StateKeys.run_status) st.caption("Your changes are saved in the above URL. Save it for later!") html_spinner(run_status) From 4d46ed121143c44fdf0e44ec85c89d6ff397d698 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 28 Nov 2023 22:34:51 +0530 Subject: [PATCH 023/138] Format base.py better --- daras_ai_v2/base.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 62f0ef28a..76733eff6 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -706,9 +706,7 @@ def _render_input_col(self): ) return submitted - def get_run_state( - self, - ) -> RecipeRunState: + def get_run_state(self) -> RecipeRunState: if st.session_state.get(StateKeys.run_status): return RecipeRunState.running elif st.session_state.get(StateKeys.error_msg): From 2afa4e2df9a315102946fa0a38705620b5932253 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Wed, 29 Nov 2023 16:36:40 +0530 Subject: [PATCH 024/138] Fix modal component: close button and padding --- gooey_ui/components/modal.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gooey_ui/components/modal.py b/gooey_ui/components/modal.py index fa9b905a6..fee4094c0 100644 --- a/gooey_ui/components/modal.py +++ b/gooey_ui/components/modal.py @@ -57,7 +57,7 @@ def container(self, **props): }} .modal-container {{ overflow-y: scroll; - padding: 3rem; + padding: 1.5rem; margin: auto; background: white; z-index: 3000; @@ -74,10 +74,11 @@ def container(self, **props): with container: with st.div(className="d-flex justify-content-between align-items-center"): - st.markdown(f"## {self.title or ''}") + st.markdown(f"### {self.title or ''}") close_ = st.button( "✖", + type="tertiary", key=f"{self.key}-close", style={"padding": "0.375rem 0.75rem"}, ) From 04c57132a66807f6d730bfe916cf0b52a5e2f213 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Wed, 29 Nov 2023 16:38:54 +0530 Subject: [PATCH 025/138] Fix default title for published run --- daras_ai_v2/base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index feb8305c2..0868f827f 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -273,8 +273,10 @@ def _render_publish_menu(self): if publish_modal.is_open(): with publish_modal.container(style={"min-width": "min(500px, 100vw)"}): with st.div(className="visibility-radio"): + st.write("### Publish to") published_run_visibility = st.radio( - "Publish to", + "", + key="published_run_visibility", options=PublishedRunVisibility.values, format_func=lambda x: PublishedRunVisibility(x).help_text(), ) @@ -295,10 +297,8 @@ def _render_publish_menu(self): default_title = ( published_run.page_title if is_update_mode - else st.session_state[StateKeys.page_title] + else f"{self.request.user.display_name}'s {recipe_title}" ) - if default_title == recipe_title: - default_title = "" published_run_title = st.text_input( "Title", key="published_run_title", From c671a35c4cecd851d776ce90fd17e6e2679a10de Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Wed, 29 Nov 2023 16:40:26 +0530 Subject: [PATCH 026/138] Fix type-casting for published-run visibility --- daras_ai_v2/base.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 0868f827f..51b986fe4 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -274,6 +274,13 @@ def _render_publish_menu(self): with publish_modal.container(style={"min-width": "min(500px, 100vw)"}): with st.div(className="visibility-radio"): st.write("### Publish to") + convert_state_type( + st.session_state, "published_run_visibility", int + ) + if is_update_mode: + st.session_state.setdefault( + "published_run_visibility", published_run.visibility + ) published_run_visibility = st.radio( "", key="published_run_visibility", @@ -1415,3 +1422,8 @@ def __init__(self, query_params: dict, status_code=303): query_params = {k: v for k, v in query_params.items() if v is not None} url = "?" + urllib.parse.urlencode(query_params) super().__init__(url, status_code) + + +def convert_state_type(state, key, fn): + if key in state: + state[key] = fn(state[key]) From 240c0df9068d04ca77a49f6240af51ec847fb57b Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Wed, 29 Nov 2023 16:56:32 +0530 Subject: [PATCH 027/138] Reorder update/three-dots UI --- daras_ai_v2/base.py | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 51b986fe4..9f1dd7310 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -239,6 +239,16 @@ def _render_publish_menu(self): """ ) + published_run_options_button = ( + st.button("⋮", className="mb-0", type="secondary") + if is_update_mode + else None + ) + if published_run_options_button: + st.session_state[ + "__published_run_options" + ] = not st.session_state.get("__published_run_options", False) + save_text = "📝 Update" if is_update_mode else "💾 Save" save_button = st.button( save_text, @@ -249,19 +259,6 @@ def _render_publish_menu(self): if save_button: publish_modal.open() - published_run_options_button = ( - st.button("⋮", className="mb-0", type="secondary") - if is_update_mode - else None - ) - if published_run_options_button: - st.session_state[ - "__published_run_options" - ] = not st.session_state.get( - "__published_run_options", - False, - ) - show_published_run_options = st.session_state.get("__published_run_options") if show_published_run_options: # with st.div(className="d-flex justify-content-end"): @@ -290,7 +287,7 @@ def _render_publish_menu(self): st.radio( "", options=[ - "Anyone at my org (coming soon)", + 'Anyone at my org (coming soon)', ], disabled=True, checked_by_default=False, From f4a9172a18dda66889b3d12755aa72872856264a Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Tue, 21 Nov 2023 20:17:16 +0530 Subject: [PATCH 028/138] azure form recognizer & gpt-4-v support --- README.md | 1 + bots/admin.py | 9 +- ...achment_alter_feedback_options_and_more.py | 64 +++ .../0048_alter_messageattachment_url.py | 18 + bots/models.py | 45 +- daras_ai_v2/azure_doc_extract.py | 18 +- daras_ai_v2/bots.py | 108 +++-- daras_ai_v2/doc_search_settings_widgets.py | 46 +- daras_ai_v2/facebook_bots.py | 35 +- daras_ai_v2/field_render.py | 13 + daras_ai_v2/language_model.py | 58 ++- .../language_model_settings_widgets.py | 16 +- daras_ai_v2/serp_search_locations.py | 5 +- recipes/BulkRunner.py | 5 +- recipes/VideoBots.py | 448 +++++++++++------- 15 files changed, 623 insertions(+), 266 deletions(-) create mode 100644 bots/migrations/0047_messageattachment_alter_feedback_options_and_more.py create mode 100644 bots/migrations/0048_alter_messageattachment_url.py create mode 100644 daras_ai_v2/field_render.py diff --git a/README.md b/README.md index a0a41c537..6398e12ac 100644 --- a/README.md +++ b/README.md @@ -166,3 +166,4 @@ echo $PWD/fixture.json createdb -T template0 $PGDATABASE pg_dump $SOURCE_DATABASE | psql -q $PGDATABASE ``` + diff --git a/bots/admin.py b/bots/admin.py index f1d924434..c3ba3cfe2 100644 --- a/bots/admin.py +++ b/bots/admin.py @@ -21,6 +21,7 @@ Feedback, Conversation, BotIntegration, + MessageAttachment, ) from app_users.models import AppUser from bots.tasks import create_personal_channels_for_all_members @@ -360,6 +361,12 @@ class FeedbackInline(admin.TabularInline): readonly_fields = ["created_at"] +class MessageAttachmentInline(admin.TabularInline): + model = MessageAttachment + extra = 0 + readonly_fields = ["url", "metadata", "created_at"] + + class AnalysisResultFilter(admin.SimpleListFilter): title = "analysis_result" parameter_name = "analysis_result" @@ -419,7 +426,7 @@ class MessageAdmin(admin.ModelAdmin): ordering = ["created_at"] actions = [export_to_csv, export_to_excel] - inlines = [FeedbackInline] + inlines = [MessageAttachmentInline, FeedbackInline] formfield_overrides = { django.db.models.JSONField: {"widget": JSONEditorWidget}, diff --git a/bots/migrations/0047_messageattachment_alter_feedback_options_and_more.py b/bots/migrations/0047_messageattachment_alter_feedback_options_and_more.py new file mode 100644 index 000000000..f8e9d8910 --- /dev/null +++ b/bots/migrations/0047_messageattachment_alter_feedback_options_and_more.py @@ -0,0 +1,64 @@ +# Generated by Django 4.2.5 on 2023-11-22 13:45 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + dependencies = [ + ("files", "0001_initial"), + ("bots", "0046_savedrun_bots_savedr_created_cb8e09_idx_and_more"), + ] + + operations = [ + migrations.CreateModel( + name="MessageAttachment", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("url", models.TextField()), + ("created_at", models.DateTimeField(auto_now_add=True, db_index=True)), + ], + options={ + "ordering": ["created_at"], + }, + ), + migrations.AlterModelOptions( + name="feedback", + options={"get_latest_by": "created_at", "ordering": ["-created_at"]}, + ), + migrations.AddIndex( + model_name="feedback", + index=models.Index( + fields=["-created_at"], name="bots_feedba_created_fbd16a_idx" + ), + ), + migrations.AddField( + model_name="messageattachment", + name="message", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="attachments", + to="bots.message", + ), + ), + migrations.AddField( + model_name="messageattachment", + name="metadata", + field=models.ForeignKey( + blank=True, + default=None, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="message_attachments", + to="files.filemetadata", + ), + ), + ] diff --git a/bots/migrations/0048_alter_messageattachment_url.py b/bots/migrations/0048_alter_messageattachment_url.py new file mode 100644 index 000000000..8b5774643 --- /dev/null +++ b/bots/migrations/0048_alter_messageattachment_url.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.5 on 2023-11-25 12:38 + +import bots.custom_fields +from django.db import migrations + + +class Migration(migrations.Migration): + dependencies = [ + ("bots", "0047_messageattachment_alter_feedback_options_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="messageattachment", + name="url", + field=bots.custom_fields.CustomURLField(max_length=2048), + ), + ] diff --git a/bots/models.py b/bots/models.py index 937f26e70..07d52c4d3 100644 --- a/bots/models.py +++ b/bots/models.py @@ -14,7 +14,8 @@ from app_users.models import AppUser from bots.admin_links import open_in_new_tab -from bots.custom_fields import PostgresJSONEncoder +from bots.custom_fields import PostgresJSONEncoder, CustomURLField +from daras_ai_v2.language_model import format_chat_entry if typing.TYPE_CHECKING: from daras_ai_v2.base import BasePage @@ -701,6 +702,17 @@ def to_df(self, tz=pytz.timezone(settings.TIME_ZONE)) -> "pd.DataFrame": df = pd.DataFrame.from_records(rows) return df + def as_llm_context(self, limit: int = 100) -> list["ConversationEntry"]: + msgs = self.order_by("-created_at").prefetch_related("attachments")[:limit] + entries = [None] * len(msgs) + for i, msg in enumerate(reversed(msgs)): + entries[i] = format_chat_entry( + role=msg.role, + content=msg.content, + images=msg.attachments.values_list("url", flat=True), + ) + return entries + class Message(models.Model): conversation = models.ForeignKey( @@ -783,6 +795,32 @@ def local_lang(self): return Truncator(self.display_content).words(30) +class MessageAttachment(models.Model): + message = models.ForeignKey( + "bots.Message", + on_delete=models.CASCADE, + related_name="attachments", + ) + url = CustomURLField() + metadata = models.ForeignKey( + "files.FileMetadata", + on_delete=models.SET_NULL, + null=True, + blank=True, + default=None, + related_name="message_attachments", + ) + created_at = models.DateTimeField(auto_now_add=True, db_index=True) + + class Meta: + ordering = ["created_at"] + + def __str__(self): + if self.metadata_id: + return f"{self.metadata.name} ({self.url})" + return self.url + + class FeedbackQuerySet(models.QuerySet): def to_df(self, tz=pytz.timezone(settings.TIME_ZONE)) -> "pd.DataFrame": import pandas as pd @@ -874,7 +912,10 @@ class Status(models.IntegerChoices): objects = FeedbackQuerySet.as_manager() class Meta: - ordering = ("-created_at",) + indexes = [ + models.Index(fields=["-created_at"]), + ] + ordering = ["-created_at"] get_latest_by = "created_at" def __str__(self): diff --git a/daras_ai_v2/azure_doc_extract.py b/daras_ai_v2/azure_doc_extract.py index 484fc86ce..b8647dfbb 100644 --- a/daras_ai_v2/azure_doc_extract.py +++ b/daras_ai_v2/azure_doc_extract.py @@ -24,7 +24,21 @@ def azure_doc_extract_pages(pdf_url: str, model_id: str = "prebuilt-layout"): @redis_cache_decorator -def azure_form_recognizer(pdf_url: str, model_id: str): +def azure_form_recognizer_models() -> dict[str, str]: + r = requests.get( + str( + furl(settings.AZURE_FORM_RECOGNIZER_ENDPOINT) + / "formrecognizer/documentModels" + ), + params={"api-version": "2023-07-31"}, + headers=auth_headers, + ) + r.raise_for_status() + return {value["modelId"]: value["description"] for value in r.json()["value"]} + + +@redis_cache_decorator +def azure_form_recognizer(url: str, model_id: str): r = requests.post( str( furl(settings.AZURE_FORM_RECOGNIZER_ENDPOINT) @@ -32,7 +46,7 @@ def azure_form_recognizer(pdf_url: str, model_id: str): ), params={"api-version": "2023-07-31"}, headers=auth_headers, - json={"urlSource": pdf_url}, + json={"urlSource": url}, ) r.raise_for_status() location = r.headers["Operation-Location"] diff --git a/daras_ai_v2/bots.py b/daras_ai_v2/bots.py index 6a4d6fd4e..3d19d99b3 100644 --- a/daras_ai_v2/bots.py +++ b/daras_ai_v2/bots.py @@ -3,6 +3,7 @@ import typing from urllib.parse import parse_qs +from django.db import transaction from fastapi import HTTPException, Request from furl import furl from sentry_sdk import capture_exception @@ -17,10 +18,12 @@ SavedRun, ConvoState, Workflow, + MessageAttachment, ) from daras_ai_v2.asr import AsrModels, run_google_translate from daras_ai_v2.base import BasePage from daras_ai_v2.language_model import CHATML_ROLE_USER, CHATML_ROLE_ASSISTANT +from daras_ai_v2.vector_search import doc_url_to_file_metadata from gooeysite.bg_db_conn import db_middleware @@ -68,6 +71,9 @@ def get_input_text(self) -> str | None: def get_input_audio(self) -> str | None: raise NotImplementedError + def get_input_images(self) -> list[str] | None: + raise NotImplementedError + def nice_filename(self, mime_type: str) -> str: ext = mimetypes.guess_extension(mime_type) or "" return f"{self.platform}_{self.input_type}_from_{self.user_id}_to_{self.bot_id}{ext}" @@ -159,6 +165,7 @@ def _mock_api_output(input_text): @db_middleware def _on_msg(bot: BotInterface): speech_run = None + input_images = None if not bot.page_cls: bot.send_msg(text=PAGE_NOT_CONNECTED_ERROR) return @@ -194,6 +201,13 @@ def _on_msg(bot: BotInterface): return # send confirmation of asr bot.send_msg(text=AUDIO_ASR_CONFIRMATION.format(input_text)) + case "image": + input_images = bot.get_input_images() + if not input_images: + raise HTTPException( + status_code=400, detail="No image found in request." + ) + input_text = (bot.get_input_text() or "").strip() case "text": input_text = (bot.get_input_text() or "").strip() if not input_text: @@ -221,6 +235,7 @@ def _on_msg(bot: BotInterface): _process_and_send_msg( billing_account_user=billing_account_user, bot=bot, + input_images=input_images, input_text=input_text, speech_run=speech_run, ) @@ -258,6 +273,7 @@ def _process_and_send_msg( *, billing_account_user: AppUser, bot: BotInterface, + input_images: list[str] | None, input_text: str, speech_run: str | None, ): @@ -267,7 +283,13 @@ def _process_and_send_msg( # bot, input_text # ) # make API call to gooey bots to get the response - response_text, response_audio, response_video, msgs_to_save = _process_msg( + ( + response_text, + response_audio, + response_video, + user_msg, + assistant_msg, + ) = _process_msg( page_cls=bot.page_cls, api_user=billing_account_user, query_params=bot.query_params, @@ -275,6 +297,7 @@ def _process_and_send_msg( input_text=input_text, user_language=bot.language, speech_run=speech_run, + input_images=input_images, ) except HTTPException as e: traceback.print_exc() @@ -285,24 +308,35 @@ def _process_and_send_msg( # this really shouldn't happen, but just in case it does, we should have a nice message response_text = response_text or DEFAULT_RESPONSE # send the response to the user - print(bot.show_feedback_buttons) msg_id = bot.send_msg( text=response_text, audio=response_audio, video=response_video, buttons=_feedback_start_buttons() if bot.show_feedback_buttons else None, ) - if not msgs_to_save: + if not (user_msg and assistant_msg): return - # save the message id for the sent message - if msg_id: - msgs_to_save[-1].platform_msg_id = msg_id # save the message id for the received message if bot.recieved_msg_id: - msgs_to_save[0].platform_msg_id = bot.recieved_msg_id - # save the messages - for msg in msgs_to_save: - msg.save() + user_msg.platform_msg_id = bot.recieved_msg_id + # save the message id for the sent message + if msg_id: + assistant_msg.platform_msg_id = msg_id + + # get the attachments + attachments = [] + for img in input_images or []: + metadata = doc_url_to_file_metadata(img) + attachments.append( + MessageAttachment(message=user_msg, url=img, metadata=metadata) + ) + # save the messages & attachments + with transaction.atomic(): + user_msg.save() + assistant_msg.save() + for attachment in attachments: + attachment.metadata.save() + attachment.save() def _handle_interactive_msg(bot: BotInterface): @@ -438,18 +472,15 @@ def _process_msg( api_user: AppUser, query_params: dict, convo: Conversation, + input_images: list[str] | None, input_text: str, user_language: str, speech_run: str | None, -) -> tuple[str, str | None, str | None, list[Message]]: +) -> tuple[str, str | None, str | None, Message, Message]: from routers.api import call_api # get latest messages for context (upto 100) - saved_msgs = list( - reversed( - convo.messages.order_by("-created_at").values("role", "content")[:100], - ), - ) + saved_msgs = convo.messages.all().as_llm_context() # # mock testing # result = _mock_api_output(input_text) @@ -460,6 +491,7 @@ def _process_msg( user=api_user, request_body={ "input_prompt": input_text, + "input_images": input_images, "messages": saved_msgs, "user_language": user_language, }, @@ -480,26 +512,24 @@ def _process_msg( raw_output_text = result["output"]["raw_output_text"][0] response_text = result["output"]["output_text"][0] # save new messages for future context - msgs_to_save = [ - Message( - conversation=convo, - role=CHATML_ROLE_USER, - content=raw_input_text, - display_content=input_text, - saved_run=SavedRun.objects.get_or_create( - workflow=Workflow.ASR, **furl(speech_run).query.params - )[0] - if speech_run - else None, - ), - Message( - conversation=convo, - role=CHATML_ROLE_ASSISTANT, - content=raw_output_text, - display_content=output_text, - saved_run=SavedRun.objects.get_or_create( - workflow=Workflow.VIDEO_BOTS, **furl(result.get("url", "")).query.params - )[0], - ), - ] - return response_text, response_audio, response_video, msgs_to_save + user_msg = Message( + conversation=convo, + role=CHATML_ROLE_USER, + content=raw_input_text, + display_content=input_text, + saved_run=SavedRun.objects.get_or_create( + workflow=Workflow.ASR, **furl(speech_run).query.params + )[0] + if speech_run + else None, + ) + assistant_msg = Message( + conversation=convo, + role=CHATML_ROLE_ASSISTANT, + content=raw_output_text, + display_content=output_text, + saved_run=SavedRun.objects.get_or_create( + workflow=Workflow.VIDEO_BOTS, **furl(result.get("url", "")).query.params + )[0], + ) + return response_text, response_audio, response_video, user_msg, assistant_msg diff --git a/daras_ai_v2/doc_search_settings_widgets.py b/daras_ai_v2/doc_search_settings_widgets.py index 4351fca26..06be1c12d 100644 --- a/daras_ai_v2/doc_search_settings_widgets.py +++ b/daras_ai_v2/doc_search_settings_widgets.py @@ -2,7 +2,6 @@ import typing import gooey_ui as st - from daras_ai_v2 import settings from daras_ai_v2.asr import AsrModels, google_translate_language_selector from daras_ai_v2.enum_selector_widget import enum_selector @@ -80,7 +79,8 @@ def document_uploader( def doc_search_settings( - asr_allowed: bool = True, keyword_instructions_allowed: bool = False + asr_allowed: bool = False, + keyword_instructions_allowed: bool = False, ): from daras_ai_v2.vector_search import DocSearchRequest @@ -95,6 +95,24 @@ def doc_search_settings( allow_none=True, ) + st.text_area( + """ +###### 👁‍🗨 Summarization Instructions +Prompt to transform the conversation history into a vector search query. +These instructions run before the workflow performs a search of the knowledge base documents and should summarize the conversation into a VectorDB query most relevant to the user's last message. In general, you shouldn't need to adjust these instructions. + """, + key="query_instructions", + height=300, + ) + if keyword_instructions_allowed: + st.text_area( + """ +###### 🔑 Keyword Extraction + """, + key="keyword_instructions", + height=300, + ) + dense_weight_ = DocSearchRequest.__fields__["dense_weight"] st.slider( label=f"###### {dense_weight_.field_info.title}\n{dense_weight_.field_info.description}", @@ -135,33 +153,15 @@ def doc_search_settings( max_value=50, ) - st.text_area( - """ -###### 👁‍🗨 Summarization Instructions -Prompt to transform the conversation history into a vector search query. -These instructions run before the workflow performs a search of the knowledge base documents and should summarize the conversation into a VectorDB query most relevant to the user's last message. In general, you shouldn't need to adjust these instructions. - """, - key="query_instructions", - height=300, - ) - if keyword_instructions_allowed: - st.text_area( - """ -###### 🔑 Keyword Extraction - """, - key="keyword_instructions", - height=300, - ) - if not asr_allowed: return st.write("---") st.write( """ - ##### 🎤 Knowledge Base Speech Recognition - If your knowledge base documents contain audio or video files, we'll transcribe and optionally translate them to English, given we've found most vectorDBs and LLMs perform best in English (even if their final answers are translated into another language). - """ + ##### 🎤 Knowledge Base Speech Recognition + If your knowledge base documents contain audio or video files, we'll transcribe and optionally translate them to English, given we've found most vectorDBs and LLMs perform best in English (even if their final answers are translated into another language). + """ ) enum_selector( diff --git a/daras_ai_v2/facebook_bots.py b/daras_ai_v2/facebook_bots.py index c2a785867..33bdd7872 100644 --- a/daras_ai_v2/facebook_bots.py +++ b/daras_ai_v2/facebook_bots.py @@ -24,12 +24,6 @@ def __init__(self, message: dict, metadata: dict): self.input_type = message["type"] - # if the message has a caption, treat it as text - caption = self._get_caption() - if caption: - self.input_type = "text" - self.input_message["text"] = {"body": caption} - bi = BotIntegration.objects.get(wa_phone_number_id=self.bot_id) self.convo = Conversation.objects.get_or_create( bot_integration=bi, @@ -41,10 +35,11 @@ def get_input_text(self) -> str | None: try: return self.input_message["text"]["body"] except KeyError: - return None - - def _get_caption(self): - return self.input_message.get(self.input_type, {}).get("caption") + pass + try: + return self.input_message[self.input_type]["caption"] + except KeyError: + pass def get_input_audio(self) -> str | None: try: @@ -54,9 +49,6 @@ def get_input_audio(self) -> str | None: media_id = self.input_message["video"]["id"] except KeyError: return None - return self._download_wa_media(media_id) - - def _download_wa_media(self, media_id: str) -> str: # download file from whatsapp data, mime_type = retrieve_wa_media_by_id(media_id) data, _ = audio_bytes_to_wav(data) @@ -68,6 +60,23 @@ def _download_wa_media(self, media_id: str) -> str: content_type=mime_type, ) + def get_input_images(self) -> list[str] | None: + try: + media_id = self.input_message["image"]["id"] + except KeyError: + return None + return [self._download_wa_media(media_id)] + + def _download_wa_media(self, media_id: str) -> str: + # download file from whatsapp + data, mime_type = retrieve_wa_media_by_id(media_id) + # upload file to firebase + return upload_file_from_bytes( + filename=self.nice_filename(mime_type), + data=data, + content_type=mime_type, + ) + def get_interactive_msg_info(self) -> tuple[str, str]: button_id = self.input_message["interactive"]["button_reply"]["id"] context_msg_id = self.input_message["context"]["id"] diff --git a/daras_ai_v2/field_render.py b/daras_ai_v2/field_render.py new file mode 100644 index 000000000..1b79c1673 --- /dev/null +++ b/daras_ai_v2/field_render.py @@ -0,0 +1,13 @@ +import typing + +from pydantic import BaseModel + + +def field_title_desc(model: typing.Type[BaseModel], name: str) -> str: + field = model.__fields__[name] + return "\n".join( + filter( + None, + [field.field_info.title, field.field_info.description or ""], + ) + ) diff --git a/daras_ai_v2/language_model.py b/daras_ai_v2/language_model.py index 0bc994b26..8a4614449 100644 --- a/daras_ai_v2/language_model.py +++ b/daras_ai_v2/language_model.py @@ -1,6 +1,7 @@ import hashlib import io import re +import typing from enum import Enum from functools import partial @@ -16,6 +17,7 @@ from django.conf import settings from jinja2.lexer import whitespace_re from loguru import logger +from openai.types.chat import ChatCompletionContentPartParam from daras_ai_v2.asr import get_google_auth_session from daras_ai_v2.functional import map_parallel @@ -41,6 +43,7 @@ class LLMApis(Enum): class LargeLanguageModels(Enum): + gpt_4_vision = "GPT-4 Vision (openai)" gpt_4_turbo = "GPT-4 Turbo (openai)" gpt_4 = "GPT-4 (openai)" gpt_4_32k = "GPT-4 32K (openai)" @@ -64,6 +67,11 @@ class LargeLanguageModels(Enum): def _deprecated(cls): return {cls.code_davinci_002} + def is_vision_model(self) -> bool: + return self in { + self.gpt_4_vision, + } + def is_chat_model(self) -> bool: return self not in { self.palm2_text, @@ -79,6 +87,7 @@ def is_chat_model(self) -> bool: AZURE_OPENAI_MODEL_PREFIX = "openai-" llm_model_names = { + LargeLanguageModels.gpt_4_vision: "gpt-4-vision-preview", LargeLanguageModels.gpt_4_turbo: ( "openai-gpt-4-turbo-prod-ca-1", "gpt-4-1106-preview", @@ -108,6 +117,7 @@ def is_chat_model(self) -> bool: } llm_api = { + LargeLanguageModels.gpt_4_vision: LLMApis.openai, LargeLanguageModels.gpt_4_turbo: LLMApis.openai, LargeLanguageModels.gpt_4: LLMApis.openai, LargeLanguageModels.gpt_4_32k: LLMApis.openai, @@ -127,6 +137,8 @@ def is_chat_model(self) -> bool: EMBEDDING_MODEL_MAX_TOKENS = 8191 model_max_tokens = { + # https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo + LargeLanguageModels.gpt_4_vision: 128_000, # https://help.openai.com/en/articles/8555510-gpt-4-turbo LargeLanguageModels.gpt_4_turbo: 128_000, # https://platform.openai.com/docs/models/gpt-4 @@ -150,6 +162,7 @@ def is_chat_model(self) -> bool: } llm_price = { + LargeLanguageModels.gpt_4_vision: 6, LargeLanguageModels.gpt_4_turbo: 5, LargeLanguageModels.gpt_4: 10, LargeLanguageModels.gpt_4_32k: 20, @@ -274,9 +287,27 @@ def _run_openai_embedding( class ConversationEntry(typing_extensions.TypedDict): - role: str + role: typing.Literal["user", "system", "assistant"] + content: str | list[ChatCompletionContentPartParam] display_name: typing_extensions.NotRequired[str] - content: str + + +def get_entry_images(entry: ConversationEntry) -> list[str]: + contents = entry.get("content") or "" + if isinstance(contents, str): + return [] + return list( + filter(None, (part.get("image_url", {}).get("url") for part in contents)), + ) + + +def get_entry_text(entry: ConversationEntry) -> str: + contents = entry.get("content") or "" + if isinstance(contents, str): + return contents + return "\n".join( + filter(None, (part.get("text") for part in contents)), + ) def run_language_model( @@ -306,6 +337,11 @@ def run_language_model( is_chatml, messages = parse_chatml(prompt) # type: ignore messages = messages or [] logger.info(f"{model_name=}, {len(messages)=}, {max_tokens=}, {temperature=}") + if not model.is_vision_model(): + messages = [ + format_chat_entry(role=entry["role"], content=get_entry_text(entry)) + for entry in messages + ] result = _run_chat_model( api=api, model=model_name, @@ -429,6 +465,8 @@ def _run_openai_chat( stop: list[str] | None, avoid_repetition: bool, ) -> list[ConversationEntry]: + from openai._types import NOT_GIVEN + if avoid_repetition: frequency_penalty = 0.1 presence_penalty = 0.25 @@ -444,7 +482,7 @@ def _run_openai_chat( model=model_str, messages=messages, max_tokens=max_tokens, - stop=stop, + stop=stop or NOT_GIVEN, n=num_outputs, temperature=temperature, frequency_penalty=frequency_penalty, @@ -648,7 +686,7 @@ def _run_palm_text( def format_chatml_message(entry: ConversationEntry) -> str: msg = CHATML_START_TOKEN + entry.get("role", "") - content = entry.get("content").strip() + content = get_entry_text(entry).strip() if content: msg += "\n" + content + CHATML_END_TOKEN return msg @@ -740,3 +778,15 @@ def build_llama_prompt(messages: list[ConversationEntry]): ret += f"{B_INST} {messages[-1].get('content').strip()} {E_INST}" return ret + + +def format_chat_entry( + *, role: str, content: str, images: list[str] = None +) -> ConversationEntry: + if images: + content = [ + {"type": "image_url", "image_url": {"url": url}} for url in images + ] + [ + {"type": "text", "text": content}, + ] + return {"role": role, "content": content} diff --git a/daras_ai_v2/language_model_settings_widgets.py b/daras_ai_v2/language_model_settings_widgets.py index 4083ece31..e5ab27a59 100644 --- a/daras_ai_v2/language_model_settings_widgets.py +++ b/daras_ai_v2/language_model_settings_widgets.py @@ -1,10 +1,14 @@ import gooey_ui as st +from daras_ai_v2.azure_doc_extract import azure_form_recognizer_models from daras_ai_v2.enum_selector_widget import enum_selector +from daras_ai_v2.field_render import field_title_desc from daras_ai_v2.language_model import LargeLanguageModels -def language_model_settings(show_selector=True): +def language_model_settings(show_selector=True, show_document_model=False): + from recipes.VideoBots import VideoBotsPage + st.write("##### 🔠 Language Model Settings") if show_selector: @@ -14,6 +18,16 @@ def language_model_settings(show_selector=True): key="selected_model", use_selectbox=True, ) + if show_document_model: + doc_model_descriptions = azure_form_recognizer_models() + st.selectbox( + f"###### {field_title_desc(VideoBotsPage.RequestModel, 'document_model')}", + key="document_model", + options=[None, *doc_model_descriptions], + format_func=lambda x: f"{doc_model_descriptions[x]} ({x})" + if x + else "———", + ) st.checkbox("Avoid Repetition", key="avoid_repetition") diff --git a/daras_ai_v2/serp_search_locations.py b/daras_ai_v2/serp_search_locations.py index b56184ab3..2631ed8f5 100644 --- a/daras_ai_v2/serp_search_locations.py +++ b/daras_ai_v2/serp_search_locations.py @@ -3,6 +3,7 @@ from pydantic import Field import gooey_ui as st +from daras_ai_v2.field_render import field_title_desc def serp_search_settings(): @@ -26,7 +27,7 @@ def serp_search_settings(): def serp_search_type_selectbox(key="serp_search_type"): st.selectbox( - f"###### {GoogleSearchMixin.__fields__[key].field_info.title}\n{GoogleSearchMixin.__fields__[key].field_info.description or ''}", + f"###### {field_title_desc(GoogleSearchMixin, key)}", options=SerpSearchType, format_func=lambda x: x.label, key=key, @@ -35,7 +36,7 @@ def serp_search_type_selectbox(key="serp_search_type"): def serp_search_location_selectbox(key="serp_search_location"): st.selectbox( - f"###### {GoogleSearchMixin.__fields__[key].field_info.title}\n{GoogleSearchMixin.__fields__[key].field_info.description or ''}", + f"###### {field_title_desc(GoogleSearchMixin, key)}", options=SerpSearchLocation, format_func=lambda x: f"{x.label} ({x.value})", key=key, diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index d36f780a3..588dd6f2a 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -11,6 +11,7 @@ from daras_ai.image_input import upload_file_from_bytes from daras_ai_v2.base import BasePage from daras_ai_v2.doc_search_settings_widgets import document_uploader +from daras_ai_v2.field_render import field_title_desc from daras_ai_v2.functional import map_parallel from daras_ai_v2.query_params_util import extract_query_params from daras_ai_v2.vector_search import ( @@ -65,7 +66,7 @@ def render_form_v2(self): st.session_state.setdefault("__run_urls", "\n".join(run_urls)) run_urls = ( st.text_area( - f"##### {self.RequestModel.__fields__['run_urls'].field_info.title}\n{self.RequestModel.__fields__['run_urls'].field_info.description or ''}", + f"##### {field_title_desc(self.RequestModel, 'run_urls')}", key="__run_urls", ) .strip() @@ -74,7 +75,7 @@ def render_form_v2(self): st.session_state["run_urls"] = run_urls files = document_uploader( - f"##### {self.RequestModel.__fields__['documents'].field_info.title}\n{self.RequestModel.__fields__['documents'].field_info.description or ''}", + f"##### {field_title_desc(self.RequestModel, 'documents')}", accept=(".csv", ".xlsx", ".xls", ".json", ".tsv", ".xml"), ) diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index 6822f89a0..ce2b4bc60 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -17,11 +17,16 @@ run_google_translate, google_translate_language_selector, ) +from daras_ai_v2.azure_doc_extract import ( + azure_form_recognizer, + azure_form_recognizer_models, +) from daras_ai_v2.base import BasePage, MenuTabs, StateKeys from daras_ai_v2.doc_search_settings_widgets import ( doc_search_settings, document_uploader, ) +from daras_ai_v2.field_render import field_title_desc from daras_ai_v2.glossary import glossary_input from daras_ai_v2.language_model import ( run_language_model, @@ -35,6 +40,9 @@ CHATML_ROLE_USER, CHATML_ROLE_SYSTEM, model_max_tokens, + get_entry_images, + get_entry_text, + format_chat_entry, ) from daras_ai_v2.language_model_settings_widgets import language_model_settings from daras_ai_v2.lipsync_settings_widgets import lipsync_settings @@ -73,80 +81,6 @@ SAFETY_BUFFER = 100 -def show_landbot_widget(): - landbot_url = st.session_state.get("landbot_url") - if not landbot_url: - st.html("", **{"data-landbot-config-url": ""}) - return - - f = furl(landbot_url) - config_path = os.path.join(f.host, *f.path.segments[:2]) - config_url = f"https://storage.googleapis.com/{config_path}/index.json" - - st.html( - # language=HTML - """ - - """, - **{"data-landbot-config-url": config_url}, - ) - - -def parse_script(bot_script: str) -> (str, list[ConversationEntry]): - # run regex to find scripted messages in script text - script_matches = list(BOT_SCRIPT_RE.finditer(bot_script)) - # extract system message from script - system_message = bot_script - if script_matches: - system_message = system_message[: script_matches[0].start()] - system_message = system_message.strip() - # extract pre-scripted messages from script - scripted_msgs: list[ConversationEntry] = [] - for idx in range(len(script_matches)): - match = script_matches[idx] - try: - next_match = script_matches[idx + 1] - except IndexError: - next_match_start = None - else: - next_match_start = next_match.start() - if (len(script_matches) - idx) % 2 == 0: - role = CHATML_ROLE_USER - else: - role = CHATML_ROLE_ASSISTANT - scripted_msgs.append( - { - "role": role, - "display_name": match.group(1).strip(), - "content": bot_script[match.end() : next_match_start].strip(), - } - ) - return system_message, scripted_msgs - - class VideoBotsPage(BasePage): title = "Copilot for your Enterprise" # "Create Interactive Video Bots" workflow = Workflow.VIDEO_BOTS @@ -191,9 +125,14 @@ class VideoBotsPage(BasePage): } class RequestModel(BaseModel): - input_prompt: str bot_script: str | None + input_prompt: str + input_images: list[str] | None + + # conversation history/context + messages: list[ConversationEntry] | None + # tts settings tts_provider: typing.Literal[ tuple(e.name for e in TextToSpeechProviders) @@ -215,6 +154,11 @@ class RequestModel(BaseModel): selected_model: typing.Literal[ tuple(e.name for e in LargeLanguageModels) ] | None + document_model: str | None = Field( + title="🩻 Photo / Document Intelligence", + description="When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? " + "(via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))", + ) avoid_repetition: bool | None num_outputs: int | None quality: float | None @@ -228,9 +172,6 @@ class RequestModel(BaseModel): face_padding_left: int | None face_padding_right: int | None - # conversation history/context - messages: list[ConversationEntry] | None - # doc search task_instructions: str | None query_instructions: str | None @@ -264,7 +205,7 @@ class RequestModel(BaseModel): variables: dict[str, typing.Any] | None class ResponseModel(BaseModel): - final_prompt: str + final_prompt: str | list[ConversationEntry] output_text: list[str] @@ -375,7 +316,8 @@ def render_settings(self): st.write("---") doc_search_settings(keyword_instructions_allowed=True) st.write("---") - language_model_settings() + + language_model_settings(show_document_model=True) st.write("---") google_translate_language_selector( @@ -393,11 +335,11 @@ def render_settings(self): """ ) glossary_input( - f"##### {self.RequestModel.__fields__['input_glossary_document'].field_info.title}\n{self.RequestModel.__fields__['input_glossary_document'].field_info.description or ''}", + f"##### {field_title_desc(self.RequestModel, 'input_glossary_document')}", key="input_glossary_document", ) glossary_input( - f"##### {self.RequestModel.__fields__['output_glossary_document'].field_info.title}\n{self.RequestModel.__fields__['output_glossary_document'].field_info.description or ''}", + f"##### {field_title_desc(self.RequestModel, 'output_glossary_document')}", key="output_glossary_document", ) st.write("---") @@ -458,81 +400,24 @@ def render_example(self, state: dict): st.write(truncate_text_words(output_text[0], maxlen=200)) def render_output(self): + # chat window with st.div(className="pb-3"): - with st.div( - className="pb-1", - style=dict( - maxHeight="80vh", - overflowY="scroll", - display="flex", - flexDirection="column-reverse", - border="1px solid #c9c9c9", - ), - ): - with msg_container_widget(CHATML_ROLE_ASSISTANT): - output_text = st.session_state.get("output_text", []) - output_video = st.session_state.get("output_video", []) - output_audio = st.session_state.get("output_audio", []) - if output_text: - st.write(f"**Assistant**") - for idx, text in enumerate(output_text): - st.write(text) - try: - st.video(output_video[idx], autoplay=True) - except IndexError: - try: - st.audio(output_audio[idx]) - except IndexError: - pass - - input_prompt = st.session_state.get("input_prompt") - if input_prompt: - with msg_container_widget(CHATML_ROLE_USER): - st.write(f"**User** \\\n{input_prompt}") - - for entry in reversed(st.session_state.get("messages", [])): - with msg_container_widget(entry["role"]): - display_name = entry.get("display_name") or entry["role"] - display_name = display_name.capitalize() - st.write(f'**{display_name}** \\\n{entry["content"]}') - - with st.div( - className="px-3 pt-3 d-flex gap-1", - style=dict(background="rgba(239, 239, 239, 0.6)"), - ): - with st.div(className="flex-grow-1"): - new_input = st.text_area( - "", placeholder="Send a message", height=50 - ) + chat_list_view() + pressed_send, new_input, new_input_images = chat_input_view() - if st.button("✈ Send", style=dict(height="3.2rem")): - messsages = st.session_state.get("messages", []) - raw_input_text = st.session_state.get("raw_input_text") or "" - raw_output_text = (st.session_state.get("raw_output_text") or [""])[ - 0 - ] - if raw_input_text and raw_output_text: - messsages += [ - { - "role": CHATML_ROLE_USER, - "content": raw_input_text, - }, - { - "role": CHATML_ROLE_ASSISTANT, - "content": raw_output_text, - }, - ] - st.session_state["messages"] = messsages - st.session_state["input_prompt"] = new_input - self.on_submit() + if pressed_send: + self.on_send(new_input, new_input_images) + # clear chat inputs if st.button("🗑️ Clear"): st.session_state["messages"] = [] st.session_state["input_prompt"] = "" + st.session_state["input_images"] = None st.session_state["raw_input_text"] = "" self.clear_outputs() st.experimental_rerun() + # render sources references = st.session_state.get("references", []) if not references: return @@ -545,6 +430,30 @@ def render_output(self): label_visibility="collapsed", ) + def on_send(self, new_input: str, new_input_images: list[str]): + prev_input = st.session_state.get("raw_input_text") or "" + prev_output = (st.session_state.get("raw_output_text") or [""])[0] + + if prev_input and prev_output: + # append previous input to the history + st.session_state["messsages"] = st.session_state.get("messages", []) + [ + format_chat_entry( + role=CHATML_ROLE_USER, + content=prev_input, + images=(st.session_state.pop("input_images", None)), + ), + format_chat_entry( + role=CHATML_ROLE_ASSISTANT, + content=prev_output, + ), + ] + + # add new input to the state + st.session_state["input_prompt"] = new_input + st.session_state["input_images"] = new_input_images or None + + self.on_submit() + def render_steps(self): if st.session_state.get("tts_provider"): st.video(st.session_state.get("input_face"), caption="Input Face") @@ -568,11 +477,10 @@ def render_steps(self): final_prompt = st.session_state.get("final_prompt") if final_prompt: - text_output( - "**Final Prompt**", - value=final_prompt, - height=300, - ) + if isinstance(final_prompt, str): + text_output("**Final Prompt**", value=final_prompt, height=300) + else: + st.json(final_prompt) for idx, text in enumerate(st.session_state.get("raw_output_text", [])): st.text_area( @@ -629,22 +537,38 @@ def run(self, state: dict) -> typing.Iterator[str | None]: """ user_input = request.input_prompt.strip() - if not user_input: + if not (user_input or request.input_images): return model = LargeLanguageModels[request.selected_model] is_chat_model = model.is_chat_model() saved_msgs = request.messages.copy() bot_script = request.bot_script + ocr_texts = [] + if request.input_images: + yield "Running Azure Form Recognizer..." + for img in request.input_images: + ocr_text = ( + azure_form_recognizer( + img, model_id=request.document_model or "prebuilt-read" + ) + .get("content", "") + .strip() + ) + if not ocr_text: + continue + ocr_texts.append(ocr_text) + # translate input text - if request.user_language and request.user_language != "en": - yield f"Translating input to english..." - user_input = run_google_translate( - texts=[user_input], - source_language=request.user_language, - target_language="en", - glossary_url=request.input_glossary_document, - )[0] + yield f"Translating input to english..." + user_input, *ocr_texts = run_google_translate( + texts=[user_input, *ocr_texts], + target_language="en", + glossary_url=request.input_glossary_document, + ) + + for text in ocr_texts: + user_input = f"Image: {text!r}\n{user_input}" # parse the bot script system_message, scripted_msgs = parse_script(bot_script) @@ -686,7 +610,8 @@ def run(self, state: dict) -> typing.Iterator[str | None]: query_msgs = query_msgs[clip_idx:] chat_history = "\n".join( - f'{msg["role"]}: """{msg["content"]}"""' for msg in query_msgs + f'{entry["role"]}: """{get_entry_text(entry)}"""' + for entry in query_msgs ) query_instructions = (request.query_instructions or "").strip() @@ -700,7 +625,7 @@ def run(self, state: dict) -> typing.Iterator[str | None]: else: query_msgs.reverse() state["final_search_query"] = "\n---\n".join( - msg["content"] for msg in query_msgs + get_entry_text(entry) for entry in query_msgs ) keyword_instructions = (request.keyword_instructions or "").strip() @@ -765,9 +690,7 @@ def run(self, state: dict) -> typing.Iterator[str | None]: } ) - # final prompt to display - prompt = "\n".join(format_chatml_message(entry) for entry in prompt_messages) - state["final_prompt"] = prompt + state["final_prompt"] = prompt_messages # ensure input script is not too big max_allowed_tokens = model_max_tokens[model] - calc_gpt_tokens( @@ -791,6 +714,9 @@ def run(self, state: dict) -> typing.Iterator[str | None]: avoid_repetition=request.avoid_repetition, ) else: + prompt = "\n".join( + format_chatml_message(entry) for entry in prompt_messages + ) output_text = run_language_model( model=request.selected_model, prompt=prompt, @@ -1021,6 +947,10 @@ def messenger_bot_integration(self): if is_connected: bi.saved_run = None else: + # set bot language from state + bi.user_language = ( + st.session_state.get("user_language") or bi.user_language + ) bi.saved_run = current_sr if bi.platform == Platform.SLACK: from daras_ai_v2.slack_bot import send_confirmation_msg @@ -1032,6 +962,181 @@ def messenger_bot_integration(self): st.write("---") +def show_landbot_widget(): + landbot_url = st.session_state.get("landbot_url") + if not landbot_url: + st.html("", **{"data-landbot-config-url": ""}) + return + + f = furl(landbot_url) + config_path = os.path.join(f.host, *f.path.segments[:2]) + config_url = f"https://storage.googleapis.com/{config_path}/index.json" + + st.html( + # language=HTML + """ + + """, + **{"data-landbot-config-url": config_url}, + ) + + +def parse_script(bot_script: str) -> (str, list[ConversationEntry]): + # run regex to find scripted messages in script text + script_matches = list(BOT_SCRIPT_RE.finditer(bot_script)) + # extract system message from script + system_message = bot_script + if script_matches: + system_message = system_message[: script_matches[0].start()] + system_message = system_message.strip() + # extract pre-scripted messages from script + scripted_msgs: list[ConversationEntry] = [] + for idx in range(len(script_matches)): + match = script_matches[idx] + try: + next_match = script_matches[idx + 1] + except IndexError: + next_match_start = None + else: + next_match_start = next_match.start() + if (len(script_matches) - idx) % 2 == 0: + role = CHATML_ROLE_USER + else: + role = CHATML_ROLE_ASSISTANT + scripted_msgs.append( + { + "role": role, + "display_name": match.group(1).strip(), + "content": bot_script[match.end() : next_match_start].strip(), + } + ) + return system_message, scripted_msgs + + +def chat_list_view(): + # render a reversed list view + with st.div( + className="pb-1", + style=dict( + maxHeight="80vh", + overflowY="scroll", + display="flex", + flexDirection="column-reverse", + border="1px solid #c9c9c9", + ), + ): + with st.div(className="px-3"): + show_raw_msgs = st.checkbox("_Show Raw Output_") + # render the last output + with msg_container_widget(CHATML_ROLE_ASSISTANT): + if show_raw_msgs: + output_text = st.session_state.get("raw_output_text", []) + else: + output_text = st.session_state.get("output_text", []) + output_video = st.session_state.get("output_video", []) + output_audio = st.session_state.get("output_audio", []) + if output_text: + st.write(f"**Assistant**") + for idx, text in enumerate(output_text): + st.write(text) + try: + st.video(output_video[idx], autoplay=True) + except IndexError: + try: + st.audio(output_audio[idx]) + except IndexError: + pass + messages = st.session_state.get("messages", []).copy() + # add last input to history if present + if show_raw_msgs: + input_prompt = st.session_state.get("raw_input_text") + else: + input_prompt = st.session_state.get("input_prompt") + input_images = st.session_state.get("input_images") + if input_prompt or input_images: + messages += [ + format_chat_entry( + role=CHATML_ROLE_USER, content=input_prompt, images=input_images + ), + ] + # render history + for entry in reversed(messages): + with msg_container_widget(entry["role"]): + display_name = entry.get("display_name") or entry["role"] + display_name = display_name.capitalize() + images = get_entry_images(entry) + text = get_entry_text(entry) + if text or images: + st.write(f"**{display_name}** \n{text}") + if images: + for im in images: + st.image(im, style={"maxHeight": "200px"}) + + +def chat_input_view() -> tuple[bool, str, list[str]]: + with st.div( + className="px-3 pt-3 d-flex gap-1", + style=dict(background="rgba(239, 239, 239, 0.6)"), + ): + show_uploader_key = "--show-file-uploader" + show_uploader = st.session_state.setdefault(show_uploader_key, False) + if st.button( + "📎", + style=dict(height="3.2rem", backgroundColor="white"), + ): + show_uploader = not show_uploader + st.session_state[show_uploader_key] = show_uploader + + with st.div(className="flex-grow-1"): + new_input = st.text_area("", placeholder="Send a message", height=50) + + pressed_send = st.button("✈ Send", style=dict(height="3.2rem")) + + if show_uploader: + new_input_images = st.file_uploader( + "", + accept_multiple_files=True, + ) + else: + new_input_images = None + + return pressed_send, new_input, new_input_images + + +def msg_container_widget(role: str): + return st.div( + className="px-3 py-1 pt-2", + style=dict( + background="rgba(239, 239, 239, 0.6)" + if role == CHATML_ROLE_USER + else "#fff", + ), + ) + + def convo_window_clipper( window: list[ConversationEntry], max_tokens, @@ -1047,14 +1152,3 @@ def convo_window_clipper( ): return i + step return 0 - - -def msg_container_widget(role: str): - return st.div( - className="px-3 py-1 pt-2", - style=dict( - background="rgba(239, 239, 239, 0.6)" - if role == CHATML_ROLE_USER - else "#fff", - ), - ) From b5e83e52e36ef945bd26ac3eda6dcb5ccca81cfa Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Wed, 29 Nov 2023 21:58:22 +0530 Subject: [PATCH 029/138] fix msg history in copilot when sending images --- recipes/VideoBots.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index ce2b4bc60..ff7af6fdb 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -432,15 +432,16 @@ def render_output(self): def on_send(self, new_input: str, new_input_images: list[str]): prev_input = st.session_state.get("raw_input_text") or "" + prev_input_images = st.session_state.pop("input_images", None) prev_output = (st.session_state.get("raw_output_text") or [""])[0] - if prev_input and prev_output: + if (prev_input or prev_input_images) and prev_output: # append previous input to the history st.session_state["messsages"] = st.session_state.get("messages", []) + [ format_chat_entry( role=CHATML_ROLE_USER, content=prev_input, - images=(st.session_state.pop("input_images", None)), + images=prev_input_images, ), format_chat_entry( role=CHATML_ROLE_ASSISTANT, From 505006d59d5752e29f2d35c6d53d4bd022d41f57 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Thu, 30 Nov 2023 13:13:36 +0530 Subject: [PATCH 030/138] copy_to_clipboard_button now takes a button type as argument --- daras_ai_v2/copy_to_clipboard_button_widget.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/daras_ai_v2/copy_to_clipboard_button_widget.py b/daras_ai_v2/copy_to_clipboard_button_widget.py index c3efbe47a..a43da7547 100644 --- a/daras_ai_v2/copy_to_clipboard_button_widget.py +++ b/daras_ai_v2/copy_to_clipboard_button_widget.py @@ -1,3 +1,4 @@ +import typing import gooey_ui as gui # language="html" @@ -20,16 +21,18 @@ def copy_to_clipboard_button( *, value: str, style: str = "", + className: str = "", + type: typing.Literal["primary", "secondary", "tertiary", "link"] = "primary", ): return gui.html( # language="html" f""" - """, From 4b8d2cb3d1da8458592ebc4996c739dfa168a011 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:06:05 +0530 Subject: [PATCH 031/138] move copy-to-clipboard button to ... actions menu --- daras_ai_v2/base.py | 227 ++++++++++++++++++++++++-------------------- 1 file changed, 122 insertions(+), 105 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 9f1dd7310..de71214e7 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -182,7 +182,21 @@ def render(self): self._render_page_title_with_breadcrumbs(example_id, run_id, uid) st.write(st.session_state.get(StateKeys.page_notes)) with st.div(): - self._render_publish_menu() + example_id, run_id, uid = extract_query_params(gooey_get_query_params()) + current_run = self.get_sr_from_query_params(example_id, run_id, uid) + if ( + self.request + and self.request.user + and current_run.get_creator() == self.request.user + ): + self._render_run_action_buttons( + current_run=current_run, + published_run=self.example_doc_sr(example_id) + if example_id + else None, + ) + else: + self._render_social_buttons() try: selected_tab = MenuTabs.paths_reverse[self.tab] @@ -202,17 +216,20 @@ def render(self): with st.nav_tab_content(): self.render_selected_tab(selected_tab) - def _render_publish_menu(self): - if not self.request or not self.request.user: - return - - example_id, run_id, uid = extract_query_params(gooey_get_query_params()) - current_run = self.get_sr_from_query_params(example_id, run_id, uid) - - if current_run.get_creator() != self.request.user: - return + def _render_social_buttons(self): + copy_to_clipboard_button( + "🔗 Copy URL", + value=self._get_current_app_url(), + type="secondary", + className="mb-0", + ) - published_run = self.example_doc_sr(example_id) if example_id else None + def _render_run_action_buttons( + self, + *, + current_run: SavedRun, + published_run: SavedRun | None, + ): is_update_mode = bool( published_run and ( @@ -223,8 +240,6 @@ def _render_publish_menu(self): with st.div(): with st.div(className="d-flex justify-content-end"): - # if published_run and is_update_mode and current_run != published_run: - # st.caption("Unpublished changes") st.html( """ + """ + ) + render_item1 = items and items[0] render_item2 = items[1:] and items[1] if render_item1 or render_item2: # avoids empty space @@ -764,9 +784,28 @@ def render_author(self, user: AppUser): html = "
" if user.photo_url: + st.html( + """ + + """ + ) html += f""" - -
+ """ if user.display_name: html += f"
{user.display_name}
" diff --git a/gooey_ui/components.py b/gooey_ui/components.py index bf5c1ef20..25a5db508 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -782,7 +782,7 @@ def breadcrumbs(divider: str = "/", **props) -> state.NestingCtx: def breadcrumb_item(inner_html: str, link_to: str | None = None, **props): - className = "breadcrumb-item lead " + props.pop("className", "") + className = "breadcrumb-item " + props.pop("className", "") with tag("li", className=className, **props): if link_to: with tag("a", href=link_to): From e353b9bb50b966b2ddfcf61033258feafe05ec87 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Mon, 4 Dec 2023 15:27:26 +0530 Subject: [PATCH 050/138] Change ellipsis button and hide unpublished changes in mobile view --- daras_ai_v2/base.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index ea5ae44f2..62a5f6f43 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -196,19 +196,26 @@ def render(self): self.render_author(author) with st.div(className="d-flex align-items-start"): - self._render_social_buttons() - if ( + is_current_user_creator = ( self.request and self.request.user and not self.request.user.is_anonymous and current_run.get_creator() == self.request.user - ): - if ( - published_run - and published_run.saved_run != current_run - and published_run.is_editor(self.request.user) - ): - self._render_unpublished_changes_indicator() + ) + has_unpublished_changes = ( + published_run + and published_run.saved_run != current_run + and self.request + and self.request.user + and published_run.is_editor(self.request.user) + ) + + if is_current_user_creator and has_unpublished_changes: + self._render_unpublished_changes_indicator() + + self._render_social_buttons() + + if is_current_user_creator: self._render_published_run_buttons( current_run=current_run, published_run=published_run, @@ -245,7 +252,10 @@ def _render_title(self, title: str): st.write(f"# {title}") def _render_unpublished_changes_indicator(self): - st.html('Unpublished changes') + with st.div( + className="d-none d-lg-flex h-100 align-items-center text-muted me-3" + ): + st.html("Unpublished changes") def _render_social_buttons(self): copy_to_clipboard_button( @@ -286,7 +296,11 @@ def _render_published_run_buttons( ) run_actions_button = ( - st.button("⋮", className="mb-0", type="secondary") + st.button( + '', + className="mb-0", + type="tertiary", + ) if is_update_mode else None ) From 873cfe1120d894f989772441973aa9bb6eb39e41 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Mon, 4 Dec 2023 16:39:48 +0530 Subject: [PATCH 051/138] fix api for unset body indicate which image models are slow --- daras_ai_v2/stable_diffusion.py | 26 +++++++++++++++----------- routers/api.py | 11 ++++------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/daras_ai_v2/stable_diffusion.py b/daras_ai_v2/stable_diffusion.py index f325de57e..9701e2554 100644 --- a/daras_ai_v2/stable_diffusion.py +++ b/daras_ai_v2/stable_diffusion.py @@ -42,20 +42,22 @@ def _deprecated(cls): class Text2ImgModels(Enum): # sd_1_4 = "SD v1.4 (RunwayML)" # Host this too? - sd_2 = "Stable Diffusion v2.1 (stability.ai)" - sd_1_5 = "Stable Diffusion v1.5 (RunwayML)" dream_shaper = "DreamShaper (Lykon)" - openjourney = "Open Journey (PromptHero)" - openjourney_2 = "Open Journey v2 beta (PromptHero)" - analog_diffusion = "Analog Diffusion (wavymulder)" - protogen_5_3 = "Protogen v5.3 (darkstorm2150)" dreamlike_2 = "Dreamlike Photoreal 2.0 (dreamlike.art)" + sd_2 = "Stable Diffusion v2.1 (stability.ai)" + sd_1_5 = "Stable Diffusion v1.5 (RunwayML)" + dall_e = "DALL·E 2 (OpenAI)" dall_e_3 = "DALL·E 3 (OpenAI)" + openjourney_2 = "Open Journey v2 beta (PromptHero) 🐢" + openjourney = "Open Journey (PromptHero) 🐢" + analog_diffusion = "Analog Diffusion (wavymulder) 🐢" + protogen_5_3 = "Protogen v5.3 (darkstorm2150) 🐢" + jack_qiao = "Stable Diffusion v1.4 [Deprecated] (Jack Qiao)" - deepfloyd_if = "DeepFloyd IF [Deprecated] (stability.ai)" rodent_diffusion_1_5 = "Rodent Diffusion 1.5 [Deprecated] (NerdyRodent)" + deepfloyd_if = "DeepFloyd IF [Deprecated] (stability.ai)" @classmethod def _deprecated(cls): @@ -81,12 +83,14 @@ class Img2ImgModels(Enum): dreamlike_2 = "Dreamlike Photoreal 2.0 (dreamlike.art)" sd_2 = "Stable Diffusion v2.1 (stability.ai)" sd_1_5 = "Stable Diffusion v1.5 (RunwayML)" + dall_e = "Dall-E (OpenAI)" + instruct_pix2pix = "✨ InstructPix2Pix (Tim Brooks)" - openjourney_2 = "Open Journey v2 beta (PromptHero)" - openjourney = "Open Journey (PromptHero)" - analog_diffusion = "Analog Diffusion (wavymulder)" - protogen_5_3 = "Protogen v5.3 (darkstorm2150)" + openjourney_2 = "Open Journey v2 beta (PromptHero) 🐢" + openjourney = "Open Journey (PromptHero) 🐢" + analog_diffusion = "Analog Diffusion (wavymulder) 🐢" + protogen_5_3 = "Protogen v5.3 (darkstorm2150) 🐢" jack_qiao = "Stable Diffusion v1.4 [Deprecated] (Jack Qiao)" rodent_diffusion_1_5 = "Rodent Diffusion 1.5 [Deprecated] (NerdyRodent)" diff --git a/routers/api.py b/routers/api.py index 245cc048b..c57d948a6 100644 --- a/routers/api.py +++ b/routers/api.py @@ -121,7 +121,7 @@ def run_api_json( return call_api( page_cls=page_cls, user=user, - request_body=page_request.dict(), + request_body=page_request.dict(exclude_unset=True), query_params=dict(request.query_params), ) @@ -175,7 +175,7 @@ def run_api_json_async( ret = call_api( page_cls=page_cls, user=user, - request_body=page_request.dict(), + request_body=page_request.dict(exclude_unset=True), query_params=dict(request.query_params), run_async=True, ) @@ -332,14 +332,11 @@ def submit_api_call( state = self.get_sr_from_query_params_dict(query_params).to_dict() if state is None: raise HTTPException(status_code=404) - # set sane defaults for k, v in self.sane_defaults.items(): state.setdefault(k, v) - - # remove None values & insert request data - request_dict = {k: v for k, v in request_body.items() if v is not None} - state.update(request_dict) + # insert request data + state.update(request_body) # set streamlit session state st.set_session_state(state) From 986e2037ce15cbb60cc9713a65bbfba4d7f93230 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Mon, 4 Dec 2023 16:45:54 +0530 Subject: [PATCH 052/138] fix typo --- recipes/VideoBots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index ff7af6fdb..97a8d0cf5 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -432,12 +432,12 @@ def render_output(self): def on_send(self, new_input: str, new_input_images: list[str]): prev_input = st.session_state.get("raw_input_text") or "" - prev_input_images = st.session_state.pop("input_images", None) prev_output = (st.session_state.get("raw_output_text") or [""])[0] + prev_input_images = st.session_state.get("input_images") if (prev_input or prev_input_images) and prev_output: # append previous input to the history - st.session_state["messsages"] = st.session_state.get("messages", []) + [ + st.session_state["messages"] = st.session_state.get("messages", []) + [ format_chat_entry( role=CHATML_ROLE_USER, content=prev_input, From a5fe8ea8c2112613973b50fcc99a782c2fc3c8d5 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Mon, 4 Dec 2023 17:20:06 +0530 Subject: [PATCH 053/138] Add changes for better responsiveness --- daras_ai_v2/base.py | 45 ++++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 62a5f6f43..9a2e02bfc 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -183,19 +183,19 @@ def render(self): published_run=published_run, ) with st.div(className="d-flex justify-content-between mt-4"): - with st.div(className="d-md-flex d-block align-items-center"): + with st.div(className="d-lg-flex d-block align-items-center"): if not breadcrumbs and not self.run_user: self._render_title(title) if breadcrumbs: - with st.tag("div", className="me-3 mb-1 mb-md-0 py-2 py-md-0"): + with st.tag("div", className="me-3 mb-1 mb-lg-0 py-2 py-lg-0"): self._render_breadcrumbs(breadcrumbs) author = self.run_user or current_run.get_creator() if not is_root_example: self.render_author(author) - with st.div(className="d-flex align-items-start"): + with st.div(className="d-flex align-items-center"): is_current_user_creator = ( self.request and self.request.user @@ -213,14 +213,32 @@ def render(self): if is_current_user_creator and has_unpublished_changes: self._render_unpublished_changes_indicator() - self._render_social_buttons() + with st.div(className="d-flex align-items-center right-action-icons"): + st.html( + """ + + """ ) + self._render_social_buttons() + + if is_current_user_creator: + self._render_published_run_buttons( + current_run=current_run, + published_run=published_run, + ) + with st.div(): if breadcrumbs or self.run_user: # only render title here if the above row was not empty @@ -255,7 +273,8 @@ def _render_unpublished_changes_indicator(self): with st.div( className="d-none d-lg-flex h-100 align-items-center text-muted me-3" ): - st.html("Unpublished changes") + with st.tag("span", className="d-inline-block"): + st.html("Unpublished changes") def _render_social_buttons(self): copy_to_clipboard_button( @@ -471,15 +490,15 @@ def _render_breadcrumbs(self, items: list[tuple[str, str | None]]): st.html( """ - """, - unsafe_allow_html=True, - ) - with st.div(className="container"): - _container = st.div(className="container") - if self.title: - with _container: - st.markdown(f"

{self.title}

", unsafe_allow_html=True) - - close_ = st.button("✖", key=f"{self.key}-close") - if close_: - self.close() - - with _container: - yield _container + yield container From 65a024821ace9020d54657f0dd683e3e43dda60e Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Mon, 4 Dec 2023 22:18:08 +0530 Subject: [PATCH 056/138] Add support for showing version history --- daras_ai_v2/base.py | 55 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index b8d46fb2f..cf5896204 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -347,6 +347,7 @@ def _render_published_run_buttons( current_run=current_run, published_run=published_run, is_update_mode=is_update_mode, + modal=publish_modal, ) if run_actions_modal.is_open(): @@ -359,6 +360,7 @@ def _render_publish_modal( self, *, current_run: SavedRun, + modal: Modal, published_run: PublishedRun | None, is_update_mode: bool = False, ): @@ -429,6 +431,9 @@ def _render_publish_modal( title=published_run_title.strip(), notes=published_run_notes.strip(), ) + modal.close() + st.experimental_rerun() + raise QueryParamsRedirectException( query_params=dict(example_id=published_run.published_run_id), ) @@ -582,6 +587,7 @@ def render_selected_tab(self, selected_tab: str): with col1: self._render_help() with col2: + self._render_versions() self._render_save_options() self.render_related_workflows() @@ -599,6 +605,35 @@ def render_selected_tab(self, selected_tab: str): case MenuTabs.published: self._published_tab() + render_js_dynamic_dates() + + def _render_versions(self): + example_id, run_id, uid = extract_query_params(gooey_get_query_params()) + published_run = self.get_published_run_from_query_params( + example_id, run_id, uid + ) + + if published_run: + st.write("## Versions") + col1, col2, col3 = st.columns([1, 3, 2], responsive=False) + versions = published_run.versions.all() + for i, version in reverse_enumerate(len(versions) - 1, versions): + url = self.app_url( + example_id=published_run.published_run_id, + run_id=version.saved_run.run_id, + uid=version.saved_run.uid, + ) + with col1: + st.write(f"{i}") + with col2: + with st.link(to=url): + st.write(version.title) + with col3: + if isinstance(version.created_at, datetime.datetime): + timestamp = version.created_at + else: + timestamp = datetime.datetime.fromisoformat(version.created_at) + st.write(format_timestamp(timestamp)) def render_related_workflows(self): page_clses = self.related_workflows() @@ -1257,18 +1292,6 @@ def _render_save_options(self): "Note: To approve a run as an example, it must be published publicly first." ) - if published_run: - st.write("#### Versions") - versions = published_run.versions.all() - for i, version in reverse_enumerate(len(versions) - 1, versions): - url = self.app_url( - example_id=published_run.published_run_id, - run_id=version.saved_run.run_id, - uid=version.saved_run.uid, - ) - with st.link(to=url): - st.write(f"{i}: {version.title}") - def state_to_doc(self, state: dict): ret = { field_name: deepcopy(state[field_name]) @@ -1658,3 +1681,11 @@ def convert_state_type(state, key, fn): def reverse_enumerate(start, iterator): return zip(range(start, -1, -1), iterator) + + +def format_timestamp(timestamp: datetime.datetime): + current_year = datetime.datetime.now().year + if timestamp.year == current_year: + return timestamp.strftime("%a, %d %b, %I:%M %p") + else: + return timestamp.strftime("%a, %d %b %Y, %I:%M %p") From e5aad33df04f8bb2d2c5042645bdb77aa22a067a Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Tue, 21 Nov 2023 18:04:57 -0800 Subject: [PATCH 057/138] Added new photos --- daras_ai_v2/base.py | 13 +++++++++++-- explore.py | 3 +++ recipes/BulkRunner.py | 1 + recipes/ChyronPlant.py | 1 + recipes/CompareLLM.py | 1 + recipes/CompareText2Img.py | 1 + recipes/CompareUpscaler.py | 1 + recipes/DeforumSD.py | 1 + recipes/DocExtract.py | 1 + recipes/DocSearch.py | 1 + recipes/DocSummary.py | 1 + recipes/EmailFaceInpainting.py | 1 + recipes/FaceInpainting.py | 1 + recipes/GoogleGPT.py | 1 + recipes/GoogleImageGen.py | 1 + recipes/ImageSegmentation.py | 1 + recipes/Img2Img.py | 1 + recipes/LetterWriter.py | 1 + recipes/Lipsync.py | 1 + recipes/LipsyncTTS.py | 1 + recipes/ObjectInpainting.py | 1 + recipes/QRCodeGenerator.py | 1 + recipes/RelatedQnA.py | 1 + recipes/RelatedQnADoc.py | 1 + recipes/SEOSummary.py | 1 + recipes/SmartGPT.py | 1 + recipes/SocialLookupEmail.py | 1 + recipes/Text2Audio.py | 1 + recipes/TextToSpeech.py | 1 + recipes/VideoBots.py | 1 + recipes/asr.py | 1 + recipes/embeddings_page.py | 1 + 32 files changed, 44 insertions(+), 2 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 69fbc6c60..513ba7301 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -81,6 +81,7 @@ class RecipeRunState(Enum): class StateKeys: page_title = "__title" + page_image = "__image" page_notes = "__notes" created_at = "created_at" @@ -96,6 +97,7 @@ class StateKeys: class BasePage: title: str + image: str workflow: Workflow slug_versions: list[str] @@ -189,6 +191,10 @@ def render(self): return st.session_state.setdefault(StateKeys.page_title, self.title) + st.session_state.setdefault( + StateKeys.page_image, + self.image, + ) st.session_state.setdefault( StateKeys.page_notes, self.preview_description(st.session_state) ) @@ -263,6 +269,9 @@ def _render_page_title_with_breadcrumbs( def get_recipe_title(self, state: dict) -> str: return state.get(StateKeys.page_title) or self.title or "" + def get_recipe_image(self, state: dict) -> str: + return state.get(StateKeys.page_image) or self.image or "" + def _user_disabled_check(self): if self.run_user and self.run_user.is_disabled: msg = ( @@ -918,8 +927,8 @@ def _render_after_output(self): self._render_report_button() def _render_save_options(self): - if not self.is_current_user_admin(): - return + # if not self.is_current_user_admin(): + # return parent_example_id, parent_run_id, parent_uid = extract_query_params( gooey_get_query_params() diff --git a/explore.py b/explore.py index 05cddac15..9d8c7dcf4 100644 --- a/explore.py +++ b/explore.py @@ -19,6 +19,9 @@ def _render(page_cls): with gui.link(to=page.app_url()): gui.markdown(f"### {page.get_recipe_title(state)}") + gui.html( + f"" + ) preview = page.preview_description(state) if preview: gui.write(truncate_text_words(preview, 150)) diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index 588dd6f2a..195f906d8 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -23,6 +23,7 @@ class BulkRunnerPage(BasePage): title = "Bulk Runner & Evaluator" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/87f35df4-88d7-11ee-aac9-02420a00016b/Bulk%20Runner.png.png" workflow = Workflow.BULK_RUNNER slug_versions = ["bulk-runner", "bulk"] diff --git a/recipes/ChyronPlant.py b/recipes/ChyronPlant.py index b4d3af5c0..4c371abf3 100644 --- a/recipes/ChyronPlant.py +++ b/recipes/ChyronPlant.py @@ -10,6 +10,7 @@ class ChyronPlantPage(BasePage): title = "Chyron Plant Bot" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.CHYRON_PLANT slug_versions = ["ChyronPlant"] diff --git a/recipes/CompareLLM.py b/recipes/CompareLLM.py index b7361b20f..867fc31ef 100644 --- a/recipes/CompareLLM.py +++ b/recipes/CompareLLM.py @@ -22,6 +22,7 @@ class CompareLLMPage(BasePage): title = "Large Language Models: GPT-3" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ae42015e-88d7-11ee-aac9-02420a00016b/Compare%20LLMs.png.png" workflow = Workflow.COMPARE_LLM slug_versions = ["CompareLLM", "llm", "compare-large-language-models"] diff --git a/recipes/CompareText2Img.py b/recipes/CompareText2Img.py index f7cc29eb6..2b6757020 100644 --- a/recipes/CompareText2Img.py +++ b/recipes/CompareText2Img.py @@ -28,6 +28,7 @@ class CompareText2ImgPage(BasePage): title = "Compare AI Image Generators" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/d127484e-88d9-11ee-b549-02420a000167/Compare%20AI%20Image%20generators.png.png" workflow = Workflow.COMPARE_TEXT2IMG slug_versions = [ "CompareText2Img", diff --git a/recipes/CompareUpscaler.py b/recipes/CompareUpscaler.py index e68ac65e1..428d80322 100644 --- a/recipes/CompareUpscaler.py +++ b/recipes/CompareUpscaler.py @@ -14,6 +14,7 @@ class CompareUpscalerPage(BasePage): title = "Compare AI Image Upscalers" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/64393e0c-88db-11ee-b428-02420a000168/AI%20Image%20Upscaler.png.png" workflow = Workflow.COMPARE_UPSCALER slug_versions = ["compare-ai-upscalers"] diff --git a/recipes/DeforumSD.py b/recipes/DeforumSD.py index d8c348d07..a8b87ac6e 100644 --- a/recipes/DeforumSD.py +++ b/recipes/DeforumSD.py @@ -161,6 +161,7 @@ def get_last_frame(prompt_list: list) -> int: class DeforumSDPage(BasePage): title = "AI Animation Generator" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/media/users/kxmNIYAOJbfOURxHBKNCWeUSKiP2/dd88c110-88d6-11ee-9b4f-2b58bd50e819/animation.gif" workflow = Workflow.DEFORUM_SD slug_versions = ["DeforumSD", "animation-generator"] diff --git a/recipes/DocExtract.py b/recipes/DocExtract.py index cca9848f7..bbb0b2277 100644 --- a/recipes/DocExtract.py +++ b/recipes/DocExtract.py @@ -56,6 +56,7 @@ class Columns(IntegerChoices): class DocExtractPage(BasePage): title = "Youtube Transcripts + GPT extraction to Google Sheets" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.DOC_EXTRACT slug_versions = [ "doc-extract", diff --git a/recipes/DocSearch.py b/recipes/DocSearch.py index 22a6dddb9..70a3c43b5 100644 --- a/recipes/DocSearch.py +++ b/recipes/DocSearch.py @@ -38,6 +38,7 @@ class DocSearchPage(BasePage): title = "Search your Docs with GPT" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cbbb4dc6-88d7-11ee-bf6c-02420a000166/Search%20your%20docs%20with%20gpt.png.png" workflow = Workflow.DOC_SEARCH slug_versions = ["doc-search"] diff --git a/recipes/DocSummary.py b/recipes/DocSummary.py index 1e4873573..8e099e947 100644 --- a/recipes/DocSummary.py +++ b/recipes/DocSummary.py @@ -38,6 +38,7 @@ class CombineDocumentsChains(Enum): class DocSummaryPage(BasePage): title = "Summarize your Docs with GPT" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1f858a7a-88d8-11ee-a658-02420a000163/Summarize%20your%20docs%20with%20gpt.png.png" workflow = Workflow.DOC_SUMMARY slug_versions = ["doc-summary"] diff --git a/recipes/EmailFaceInpainting.py b/recipes/EmailFaceInpainting.py index c4c7d6ba0..8550cb46b 100644 --- a/recipes/EmailFaceInpainting.py +++ b/recipes/EmailFaceInpainting.py @@ -20,6 +20,7 @@ class EmailFaceInpaintingPage(FaceInpaintingPage): title = "AI Generated Photo from Email Profile Lookup" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/2affefa6-88da-11ee-aa86-02420a000165/AI%20generated%20photo%20with%20email%20profile%20lookup.png.png" workflow = Workflow.EMAIL_FACE_INPAINTING slug_versions = ["EmailFaceInpainting", "ai-image-from-email-lookup"] diff --git a/recipes/FaceInpainting.py b/recipes/FaceInpainting.py index daddcb54a..3702652ae 100644 --- a/recipes/FaceInpainting.py +++ b/recipes/FaceInpainting.py @@ -26,6 +26,7 @@ class FaceInpaintingPage(BasePage): title = "AI Image with a Face" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/10c2ce06-88da-11ee-b428-02420a000168/ai%20image%20with%20a%20face.png.png" workflow = Workflow.FACE_INPAINTING slug_versions = ["FaceInpainting", "face-in-ai-generated-photo"] diff --git a/recipes/GoogleGPT.py b/recipes/GoogleGPT.py index 636798b71..dde12482e 100644 --- a/recipes/GoogleGPT.py +++ b/recipes/GoogleGPT.py @@ -42,6 +42,7 @@ class GoogleGPTPage(BasePage): title = "Web Search + GPT3" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1de97d80-88d7-11ee-ad97-02420a00016c/Websearch%20GPT.png.png" workflow = Workflow.GOOGLE_GPT slug_versions = ["google-gpt"] diff --git a/recipes/GoogleImageGen.py b/recipes/GoogleImageGen.py index 795e716a6..e02833063 100644 --- a/recipes/GoogleImageGen.py +++ b/recipes/GoogleImageGen.py @@ -32,6 +32,7 @@ class GoogleImageGenPage(BasePage): title = "Render Image Search Results with AI" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/eb23c078-88da-11ee-aa86-02420a000165/web%20search%20render.png.png" workflow = Workflow.GOOGLE_IMAGE_GEN slug_versions = ["GoogleImageGen", "render-images-with-ai"] diff --git a/recipes/ImageSegmentation.py b/recipes/ImageSegmentation.py index 80a8bd677..72b6ec193 100644 --- a/recipes/ImageSegmentation.py +++ b/recipes/ImageSegmentation.py @@ -31,6 +31,7 @@ class ImageSegmentationPage(BasePage): title = "AI Background Changer" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/06fc595e-88db-11ee-b428-02420a000168/AI%20Background%20Remover.png.png" workflow = Workflow.IMAGE_SEGMENTATION slug_versions = ["ImageSegmentation", "remove-image-background-with-ai"] diff --git a/recipes/Img2Img.py b/recipes/Img2Img.py index 03d4dd668..228c3ed73 100644 --- a/recipes/Img2Img.py +++ b/recipes/Img2Img.py @@ -22,6 +22,7 @@ class Img2ImgPage(BasePage): title = "Edit An Image with AI prompt" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/bcc9351a-88d9-11ee-bf6c-02420a000166/Edit%20an%20image%20with%20AI%201.png.png" workflow = Workflow.IMG_2_IMG slug_versions = ["Img2Img", "ai-photo-editor"] diff --git a/recipes/LetterWriter.py b/recipes/LetterWriter.py index 23265c4c2..d4cd34fce 100644 --- a/recipes/LetterWriter.py +++ b/recipes/LetterWriter.py @@ -14,6 +14,7 @@ class LetterWriterPage(BasePage): title = "Letter Writer" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.LETTER_WRITER slug_versions = ["LetterWriter"] diff --git a/recipes/Lipsync.py b/recipes/Lipsync.py index 68ac496a0..74c4671b8 100644 --- a/recipes/Lipsync.py +++ b/recipes/Lipsync.py @@ -20,6 +20,7 @@ class LipsyncPage(BasePage): title = "Lip Syncing" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f33e6332-88d8-11ee-89f9-02420a000169/Lipsync%20TTS.png.png" workflow = Workflow.LIPSYNC slug_versions = ["Lipsync"] diff --git a/recipes/LipsyncTTS.py b/recipes/LipsyncTTS.py index 5a1eb10d4..5ec6d7518 100644 --- a/recipes/LipsyncTTS.py +++ b/recipes/LipsyncTTS.py @@ -14,6 +14,7 @@ class LipsyncTTSPage(LipsyncPage, TextToSpeechPage): title = "Lipsync Video with Any Text" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1acfa370-88d9-11ee-bf6c-02420a000166/Lipsync%20with%20audio%201.png.png" workflow = Workflow.LIPSYNC_TTS slug_versions = ["LipsyncTTS", "lipsync-maker"] diff --git a/recipes/ObjectInpainting.py b/recipes/ObjectInpainting.py index ff4c81fde..be04355dd 100644 --- a/recipes/ObjectInpainting.py +++ b/recipes/ObjectInpainting.py @@ -27,6 +27,7 @@ class ObjectInpaintingPage(BasePage): title = "Generate Product Photo Backgrounds" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f07b731e-88d9-11ee-a658-02420a000163/W.I.3.png.png" workflow = Workflow.OBJECT_INPAINTING slug_versions = ["ObjectInpainting", "product-photo-background-generator"] diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index f64566499..693d60537 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -51,6 +51,7 @@ class QrSources(Enum): class QRCodeGeneratorPage(BasePage): title = "AI Art QR Code" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/03d6538e-88d5-11ee-ad97-02420a00016c/W.I.2.png.png" workflow = Workflow.QR_CODE slug_versions = ["art-qr-code", "qr", "qr-code"] diff --git a/recipes/RelatedQnA.py b/recipes/RelatedQnA.py index 7e2e71271..67467b5eb 100644 --- a/recipes/RelatedQnA.py +++ b/recipes/RelatedQnA.py @@ -25,6 +25,7 @@ class RelatedGoogleGPTResponse(GoogleGPTPage.ResponseModel): class RelatedQnAPage(BasePage): title = "Generate “People Also Ask” SEO Content " + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/37b0ba22-88d6-11ee-b549-02420a000167/People%20also%20ask.png.png" workflow = Workflow.RELATED_QNA_MAKER slug_versions = ["related-qna-maker"] diff --git a/recipes/RelatedQnADoc.py b/recipes/RelatedQnADoc.py index 9f3f3d11e..93a68b963 100644 --- a/recipes/RelatedQnADoc.py +++ b/recipes/RelatedQnADoc.py @@ -24,6 +24,7 @@ class RelatedDocSearchResponse(DocSearchPage.ResponseModel): class RelatedQnADocPage(BasePage): title = '"People Also Ask" Answers from a Doc' + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.RELATED_QNA_MAKER_DOC slug_versions = ["related-qna-maker-doc"] diff --git a/recipes/SEOSummary.py b/recipes/SEOSummary.py index 674760eb8..1227e52e0 100644 --- a/recipes/SEOSummary.py +++ b/recipes/SEOSummary.py @@ -56,6 +56,7 @@ class SEOSummaryPage(BasePage): title = "Create a perfect SEO-optimized Title & Paragraph" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85f38b42-88d6-11ee-ad97-02420a00016c/Create%20SEO%20optimized%20content%20option%202.png.png" workflow = Workflow.SEO_SUMMARY slug_versions = ["SEOSummary", "seo-paragraph-generator"] diff --git a/recipes/SmartGPT.py b/recipes/SmartGPT.py index 55e2a9f5d..56dd886c5 100644 --- a/recipes/SmartGPT.py +++ b/recipes/SmartGPT.py @@ -22,6 +22,7 @@ class SmartGPTPage(BasePage): title = "SmartGPT" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ffd24ad8-88d7-11ee-a658-02420a000163/SmartGPT.png.png" workflow = Workflow.SMART_GPT slug_versions = ["SmartGPT"] price = 20 diff --git a/recipes/SocialLookupEmail.py b/recipes/SocialLookupEmail.py index 243c9fa2d..23a455c03 100644 --- a/recipes/SocialLookupEmail.py +++ b/recipes/SocialLookupEmail.py @@ -19,6 +19,7 @@ class SocialLookupEmailPage(BasePage): title = "Profile Lookup + GPT3 for AI-Personalized Emails" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fbd475a-88d7-11ee-aac9-02420a00016b/personalized%20email.png.png" workflow = Workflow.SOCIAL_LOOKUP_EMAIL slug_versions = ["SocialLookupEmail", "email-writer-with-profile-lookup"] diff --git a/recipes/Text2Audio.py b/recipes/Text2Audio.py index 074462257..589800d92 100644 --- a/recipes/Text2Audio.py +++ b/recipes/Text2Audio.py @@ -28,6 +28,7 @@ class Text2AudioModels(Enum): class Text2AudioPage(BasePage): title = "Text guided audio generator" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a4481d58-88d9-11ee-aa86-02420a000165/Text%20guided%20audio%20generator.png.png" workflow = Workflow.TEXT_2_AUDIO slug_versions = ["text2audio"] diff --git a/recipes/TextToSpeech.py b/recipes/TextToSpeech.py index 0df5d1d3d..b4c3702b9 100644 --- a/recipes/TextToSpeech.py +++ b/recipes/TextToSpeech.py @@ -27,6 +27,7 @@ class TextToSpeechPage(BasePage): title = "Compare AI Voice Generators" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3621e11a-88d9-11ee-b549-02420a000167/Compare%20AI%20voice%20generators.png.png" workflow = Workflow.TEXT_TO_SPEECH slug_versions = [ "TextToSpeech", diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index 97a8d0cf5..f57712dad 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -83,6 +83,7 @@ class VideoBotsPage(BasePage): title = "Copilot for your Enterprise" # "Create Interactive Video Bots" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8c014530-88d4-11ee-aac9-02420a00016b/Copilot.png.png" workflow = Workflow.VIDEO_BOTS slug_versions = ["video-bots", "bots", "copilot"] diff --git a/recipes/asr.py b/recipes/asr.py index 172f57a7c..603b31c59 100644 --- a/recipes/asr.py +++ b/recipes/asr.py @@ -30,6 +30,7 @@ class AsrPage(BasePage): title = "Speech Recognition & Translation" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fb7e5f6-88d9-11ee-aa86-02420a000165/Speech.png.png" workflow = Workflow.ASR slug_versions = ["asr", "speech"] diff --git a/recipes/embeddings_page.py b/recipes/embeddings_page.py index f2474f7ea..8c2214a2d 100644 --- a/recipes/embeddings_page.py +++ b/recipes/embeddings_page.py @@ -39,6 +39,7 @@ class EmbeddingModels(models.TextChoices): class EmbeddingsPage(BasePage): title = "Embeddings" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.EMBEDDINGS slug_versions = ["embeddings", "embed", "text-embedings"] price = 1 From 75d35e5829be347d9d2d6225de1ce105d25c40a7 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Wed, 22 Nov 2023 01:03:47 -0800 Subject: [PATCH 058/138] Fixed css of images --- explore.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/explore.py b/explore.py index 9d8c7dcf4..5f8242006 100644 --- a/explore.py +++ b/explore.py @@ -16,24 +16,21 @@ def _render(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() - with gui.link(to=page.app_url()): - gui.markdown(f"### {page.get_recipe_title(state)}") + gui.image(page.get_recipe_image(state), "", "", style={"border-radius": 5}) - gui.html( - f"" - ) + with gui.link(to=page.app_url()): + gui.markdown(f"#### {page.get_recipe_title(state)}") preview = page.preview_description(state) if preview: gui.write(truncate_text_words(preview, 150)) else: page.render_description() - page.render_example(state) - heading(title=TITLE, description=DESCRIPTION) for category, pages in all_home_pages_by_category.items(): gui.write("---") - section_heading(category) + if category != "Featured": + section_heading(category) grid_layout(3, pages, _render, separator=False) From 5763c74a83eed534d51f7eaf344a40eb99742ac7 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Wed, 22 Nov 2023 13:37:54 -0800 Subject: [PATCH 059/138] Split featured and non featured --- explore.py | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/explore.py b/explore.py index 5f8242006..d8a25695d 100644 --- a/explore.py +++ b/explore.py @@ -12,11 +12,33 @@ def render(): - def _render(page_cls): + def _render_non_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() - gui.image(page.get_recipe_image(state), "", "", style={"border-radius": 5}) + col1, col2 = gui.columns([1, 2]) + with col1: + gui.image( + page.get_recipe_image(state), + style={"border-radius": 5}, + ) + + with col2: + with gui.link(to=page.app_url()): + gui.markdown(f"#### {page.get_recipe_title(state)}") + preview = page.preview_description(state) + if preview: + gui.write(truncate_text_words(preview, 150)) + else: + page.render_description() + + def _render_as_featured(page_cls): + page = page_cls() + state = page.recipe_doc_sr().to_dict() + gui.image( + page.get_recipe_image(state), + style={"border-radius": 5}, + ) with gui.link(to=page.app_url()): gui.markdown(f"#### {page.get_recipe_title(state)}") @@ -31,7 +53,9 @@ def _render(page_cls): gui.write("---") if category != "Featured": section_heading(category) - grid_layout(3, pages, _render, separator=False) + grid_layout(2, pages, _render_non_featured, separator=False) + else: + grid_layout(3, pages, _render_as_featured, separator=False) def heading( From 86e511e4d2d077842b5c03b7806c40ce828de7ca Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Mon, 27 Nov 2023 09:12:15 -0800 Subject: [PATCH 060/138] Fixes --- daras_ai_v2/base.py | 10 +++------- explore.py | 19 ++++++++----------- 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 513ba7301..e6f58dcf8 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -191,10 +191,6 @@ def render(self): return st.session_state.setdefault(StateKeys.page_title, self.title) - st.session_state.setdefault( - StateKeys.page_image, - self.image, - ) st.session_state.setdefault( StateKeys.page_notes, self.preview_description(st.session_state) ) @@ -329,7 +325,7 @@ def _render(page_cls): page = page_cls() state = page_cls().recipe_doc_sr().to_dict() preview_image = meta_preview_url( - page_cls().preview_image(state), page_cls().fallback_preivew_image() + page.get_recipe_image(state), page.fallback_preivew_image() ) with st.link(to=page.app_url()): @@ -927,8 +923,8 @@ def _render_after_output(self): self._render_report_button() def _render_save_options(self): - # if not self.is_current_user_admin(): - # return + if not self.is_current_user_admin(): + return parent_example_id, parent_run_id, parent_uid = extract_query_params( gooey_get_query_params() diff --git a/explore.py b/explore.py index d8a25695d..61f2a62e1 100644 --- a/explore.py +++ b/explore.py @@ -18,28 +18,25 @@ def _render_non_featured(page_cls): col1, col2 = gui.columns([1, 2]) with col1: - gui.image( - page.get_recipe_image(state), - style={"border-radius": 5}, - ) + render_image(page, state) with col2: - with gui.link(to=page.app_url()): - gui.markdown(f"#### {page.get_recipe_title(state)}") - preview = page.preview_description(state) - if preview: - gui.write(truncate_text_words(preview, 150)) - else: - page.render_description() + render_description(page, state) def _render_as_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() + render_image(page, state) + + render_description(page, state) + + def render_image(page, state): gui.image( page.get_recipe_image(state), style={"border-radius": 5}, ) + def render_description(page, state): with gui.link(to=page.app_url()): gui.markdown(f"#### {page.get_recipe_title(state)}") preview = page.preview_description(state) From 4e8aa1cd25738428747aa870bbde9e46c806ae36 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Mon, 27 Nov 2023 09:38:52 -0800 Subject: [PATCH 061/138] Made Images section 3 columns --- explore.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/explore.py b/explore.py index 61f2a62e1..cd4ee46a4 100644 --- a/explore.py +++ b/explore.py @@ -50,9 +50,10 @@ def render_description(page, state): gui.write("---") if category != "Featured": section_heading(category) - grid_layout(2, pages, _render_non_featured, separator=False) - else: + if category == "Images" or category == "Featured": grid_layout(3, pages, _render_as_featured, separator=False) + else: + grid_layout(2, pages, _render_non_featured, separator=False) def heading( From 4b69025e71e5e18328f6a519c2cfb3fb6436d88a Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Wed, 29 Nov 2023 11:41:05 -0800 Subject: [PATCH 062/138] Show number of runs on each workflow --- daras_ai_v2/base.py | 4 ++++ explore.py | 21 +++++++++++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index e6f58dcf8..1d1a36fc5 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -458,6 +458,10 @@ def get_sr_from_query_params( except SavedRun.DoesNotExist: raise HTTPException(status_code=404) + @classmethod + def get_total_runs(cls) -> int: + return SavedRun.objects.filter(workflow=cls.workflow).count() + @classmethod def recipe_doc_sr(cls) -> SavedRun: return SavedRun.objects.get_or_create( diff --git a/explore.py b/explore.py index cd4ee46a4..89affad46 100644 --- a/explore.py +++ b/explore.py @@ -2,6 +2,7 @@ from daras_ai.image_input import truncate_text_words from daras_ai_v2.all_pages import all_home_pages_by_category from daras_ai_v2.grid_layout_widget import grid_layout +import fontawesome as fa META_TITLE = "Explore AI workflows" @@ -15,6 +16,7 @@ def render(): def _render_non_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() + total_runs = page.get_total_runs() col1, col2 = gui.columns([1, 2]) with col1: @@ -26,9 +28,20 @@ def _render_non_featured(page_cls): def _render_as_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() + total_runs = page.get_total_runs() render_image(page, state) - - render_description(page, state) + render_description(page, state, total_runs) + with gui.tag( + "p", + style={ + "color": "grey", + "font-size": "14px", + "float": "right", + }, + ): + gui.html( + f" {total_runs} runs" + ) def render_image(page, state): gui.image( @@ -36,12 +49,12 @@ def render_image(page, state): style={"border-radius": 5}, ) - def render_description(page, state): + def render_description(page, state, total_runs=0): with gui.link(to=page.app_url()): gui.markdown(f"#### {page.get_recipe_title(state)}") preview = page.preview_description(state) if preview: - gui.write(truncate_text_words(preview, 150)) + gui.html(truncate_text_words(preview, 150) + "
") else: page.render_description() From 418c9c155b2ff9fe2f28a5de7522efb2ed95ff29 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Wed, 29 Nov 2023 12:24:50 -0800 Subject: [PATCH 063/138] Deleted unnecessary code --- explore.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/explore.py b/explore.py index 89affad46..9c3ba06f5 100644 --- a/explore.py +++ b/explore.py @@ -2,7 +2,6 @@ from daras_ai.image_input import truncate_text_words from daras_ai_v2.all_pages import all_home_pages_by_category from daras_ai_v2.grid_layout_widget import grid_layout -import fontawesome as fa META_TITLE = "Explore AI workflows" @@ -16,7 +15,6 @@ def render(): def _render_non_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() - total_runs = page.get_total_runs() col1, col2 = gui.columns([1, 2]) with col1: @@ -30,14 +28,14 @@ def _render_as_featured(page_cls): state = page.recipe_doc_sr().to_dict() total_runs = page.get_total_runs() render_image(page, state) - render_description(page, state, total_runs) + render_description(page, state) with gui.tag( "p", style={ - "color": "grey", "font-size": "14px", "float": "right", }, + className="text-muted", ): gui.html( f" {total_runs} runs" @@ -49,12 +47,13 @@ def render_image(page, state): style={"border-radius": 5}, ) - def render_description(page, state, total_runs=0): + def render_description(page, state): with gui.link(to=page.app_url()): gui.markdown(f"#### {page.get_recipe_title(state)}") preview = page.preview_description(state) if preview: - gui.html(truncate_text_words(preview, 150) + "
") + with gui.tag("p", style={"margin-bottom": "2px"}): + gui.html(truncate_text_words(preview, 150)) else: page.render_description() From 4b2f68d690cf9d45a983917ababaa30e202ca02c Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Thu, 30 Nov 2023 08:40:52 -0800 Subject: [PATCH 064/138] Pro Font Awesome icons --- explore.py | 6 ++++-- templates/base.html | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/explore.py b/explore.py index 9c3ba06f5..7ba969b1d 100644 --- a/explore.py +++ b/explore.py @@ -38,7 +38,7 @@ def _render_as_featured(page_cls): className="text-muted", ): gui.html( - f" {total_runs} runs" + f' {total_runs} runs' ) def render_image(page, state): @@ -53,7 +53,9 @@ def render_description(page, state): preview = page.preview_description(state) if preview: with gui.tag("p", style={"margin-bottom": "2px"}): - gui.html(truncate_text_words(preview, 150)) + gui.html( + truncate_text_words(preview, 150), + ) else: page.render_description() diff --git a/templates/base.html b/templates/base.html index 4a668e876..405df24f4 100644 --- a/templates/base.html +++ b/templates/base.html @@ -12,6 +12,7 @@ {% block head %}{% endblock head %} + From efa8cc94a2b2a0d5816ac683b5887ba00b2e8daa Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Thu, 30 Nov 2023 17:17:53 -0800 Subject: [PATCH 065/138] Show runs on all workflows --- explore.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/explore.py b/explore.py index 7ba969b1d..d5a47bed8 100644 --- a/explore.py +++ b/explore.py @@ -15,6 +15,7 @@ def render(): def _render_non_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() + total_runs = page.get_total_runs() col1, col2 = gui.columns([1, 2]) with col1: @@ -22,6 +23,7 @@ def _render_non_featured(page_cls): with col2: render_description(page, state) + render_run_count(total_runs) def _render_as_featured(page_cls): page = page_cls() @@ -29,17 +31,7 @@ def _render_as_featured(page_cls): total_runs = page.get_total_runs() render_image(page, state) render_description(page, state) - with gui.tag( - "p", - style={ - "font-size": "14px", - "float": "right", - }, - className="text-muted", - ): - gui.html( - f' {total_runs} runs' - ) + render_run_count(total_runs) def render_image(page, state): gui.image( @@ -59,6 +51,19 @@ def render_description(page, state): else: page.render_description() + def render_run_count(total_runs): + with gui.tag( + "p", + style={ + "font-size": "14px", + "float": "right", + }, + className="text-muted", + ): + gui.html( + f' {total_runs} runs' + ) + heading(title=TITLE, description=DESCRIPTION) for category, pages in all_home_pages_by_category.items(): gui.write("---") From b31dfaa7e79958e14c790c72935a7db4921cda2e Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Thu, 30 Nov 2023 17:19:16 -0800 Subject: [PATCH 066/138] Simplified --- explore.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/explore.py b/explore.py index d5a47bed8..f8d2e1c99 100644 --- a/explore.py +++ b/explore.py @@ -22,16 +22,14 @@ def _render_non_featured(page_cls): render_image(page, state) with col2: - render_description(page, state) - render_run_count(total_runs) + render_description(page, state, total_runs) def _render_as_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() total_runs = page.get_total_runs() render_image(page, state) - render_description(page, state) - render_run_count(total_runs) + render_description(page, state, total_runs) def render_image(page, state): gui.image( @@ -39,7 +37,7 @@ def render_image(page, state): style={"border-radius": 5}, ) - def render_description(page, state): + def render_description(page, state, total_runs): with gui.link(to=page.app_url()): gui.markdown(f"#### {page.get_recipe_title(state)}") preview = page.preview_description(state) @@ -50,8 +48,6 @@ def render_description(page, state): ) else: page.render_description() - - def render_run_count(total_runs): with gui.tag( "p", style={ From 356c3cd31ba7feed0f38c16a8061b53fb7dbbce9 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Mon, 4 Dec 2023 08:53:15 -0800 Subject: [PATCH 067/138] Commented out run count for now --- explore.py | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/explore.py b/explore.py index f8d2e1c99..107317fc3 100644 --- a/explore.py +++ b/explore.py @@ -15,21 +15,23 @@ def render(): def _render_non_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() - total_runs = page.get_total_runs() + # total_runs = page.get_total_runs() col1, col2 = gui.columns([1, 2]) with col1: render_image(page, state) with col2: - render_description(page, state, total_runs) + # render_description(page, state, total_runs) + render_description(page, state) def _render_as_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() - total_runs = page.get_total_runs() + # total_runs = page.get_total_runs() render_image(page, state) - render_description(page, state, total_runs) + # render_description(page, state, total_runs) + render_description(page, state) def render_image(page, state): gui.image( @@ -37,7 +39,7 @@ def render_image(page, state): style={"border-radius": 5}, ) - def render_description(page, state, total_runs): + def render_description(page, state): with gui.link(to=page.app_url()): gui.markdown(f"#### {page.get_recipe_title(state)}") preview = page.preview_description(state) @@ -48,17 +50,17 @@ def render_description(page, state, total_runs): ) else: page.render_description() - with gui.tag( - "p", - style={ - "font-size": "14px", - "float": "right", - }, - className="text-muted", - ): - gui.html( - f' {total_runs} runs' - ) + # with gui.tag( + # "p", + # style={ + # "font-size": "14px", + # "float": "right", + # }, + # className="text-muted", + # ): + # gui.html( + # f' {total_runs} runs' + # ) heading(title=TITLE, description=DESCRIPTION) for category, pages in all_home_pages_by_category.items(): From e6d6ec4a085f795226d0d62f02b833b08d6981eb Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Mon, 4 Dec 2023 09:11:14 -0800 Subject: [PATCH 068/138] Commented out --- templates/base.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/base.html b/templates/base.html index 405df24f4..e2c180de7 100644 --- a/templates/base.html +++ b/templates/base.html @@ -12,7 +12,7 @@ {% block head %}{% endblock head %} - + From 2948774a031fd6b624f93378170b461ed55c0f2b Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Mon, 4 Dec 2023 22:42:37 +0530 Subject: [PATCH 069/138] fix type error on bulk runner --- recipes/BulkRunner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index 195f906d8..70b3c94a8 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -117,12 +117,12 @@ def render_form_v2(self): except KeyError: try: keys = {k: k for k in sr.state[field][0].keys()} - except (KeyError, IndexError, AttributeError): + except (KeyError, IndexError, AttributeError, TypeError): pass elif field_props.get("type") == "object": try: keys = {k: k for k in sr.state[field].keys()} - except (KeyError, AttributeError): + except (KeyError, AttributeError, TypeError): pass if keys: for k, ktitle in keys.items(): From 1f0adf277e189203f03c7b3b04d8339d4d81996f Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Mon, 4 Dec 2023 23:03:32 +0530 Subject: [PATCH 070/138] fix image links on /explore --- explore.py | 1 + gooey_ui/components.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/explore.py b/explore.py index 107317fc3..22d8fcc63 100644 --- a/explore.py +++ b/explore.py @@ -36,6 +36,7 @@ def _render_as_featured(page_cls): def render_image(page, state): gui.image( page.get_recipe_image(state), + href=page.app_url(), style={"border-radius": 5}, ) diff --git a/gooey_ui/components.py b/gooey_ui/components.py index e5488e424..1172d59fb 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -214,6 +214,7 @@ def image( src: str | np.ndarray, caption: str = None, alt: str = None, + href: str = None, **props, ): if isinstance(src, np.ndarray): @@ -234,6 +235,7 @@ def image( src=src, caption=dedent(caption), alt=alt or caption, + href=href, **props, ), ).mount() From b51281c8f128bbb95ee05cbd7f2c0ef319709fec Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Mon, 4 Dec 2023 23:27:29 +0530 Subject: [PATCH 071/138] Force redirect upon workflow publish --- daras_ai_v2/base.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index cf5896204..8f9857102 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -431,12 +431,8 @@ def _render_publish_modal( title=published_run_title.strip(), notes=published_run_notes.strip(), ) - modal.close() - st.experimental_rerun() - raise QueryParamsRedirectException( - query_params=dict(example_id=published_run.published_run_id), - ) + force_redirect(published_run.get_app_url()) def _render_run_actions_modal( self, @@ -1674,6 +1670,17 @@ def __init__(self, query_params: dict, status_code=303): super().__init__(url, status_code) +def force_redirect(url: str): + # note: assumes sanitized URLs + st.html( + f""" + + """ + ) + + def convert_state_type(state, key, fn): if key in state: state[key] = fn(state[key]) From 4084f3893dc55202265f870cc567b61688957b7b Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Mon, 4 Dec 2023 23:29:13 +0530 Subject: [PATCH 072/138] Fix version history and timestamp view --- daras_ai_v2/base.py | 38 ++++++++++++++++---------------- daras_ai_v2/user_date_widgets.py | 13 ++++++++++- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 8f9857102..313c8ebef 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -53,7 +53,11 @@ ) from daras_ai_v2.send_email import send_reported_run_email from daras_ai_v2.tabs_widget import MenuTabs -from daras_ai_v2.user_date_widgets import render_js_dynamic_dates, js_dynamic_date +from daras_ai_v2.user_date_widgets import ( + render_js_dynamic_dates, + re_render_js_dynamic_dates, + js_dynamic_date, +) from gooey_ui import realtime_clear_subs from gooey_ui.pubsub import realtime_pull from gooey_ui.components.modal import Modal @@ -467,6 +471,10 @@ def _render_run_actions_modal( published_run.delete() raise QueryParamsRedirectException(query_params={}) + with st.div(className="mt-3"): + st.write("#### Version History") + self._render_versions() + def _get_title_and_breadcrumbs( self, current_run: SavedRun, @@ -583,10 +591,10 @@ def render_selected_tab(self, selected_tab: str): with col1: self._render_help() with col2: - self._render_versions() self._render_save_options() self.render_related_workflows() + render_js_dynamic_dates() case MenuTabs.examples: self._examples_tab() @@ -610,18 +618,17 @@ def _render_versions(self): ) if published_run: - st.write("## Versions") - col1, col2, col3 = st.columns([1, 3, 2], responsive=False) versions = published_run.versions.all() - for i, version in reverse_enumerate(len(versions) - 1, versions): - url = self.app_url( - example_id=published_run.published_run_id, - run_id=version.saved_run.run_id, - uid=version.saved_run.uid, - ) + for i, version in reverse_enumerate(len(versions), versions): + col1, col2, col3 = st.columns([1, 4, 4], responsive=False) with col1: st.write(f"{i}") with col2: + url = self.app_url( + example_id=published_run.published_run_id, + run_id=version.saved_run.run_id, + uid=version.saved_run.uid, + ) with st.link(to=url): st.write(version.title) with col3: @@ -629,7 +636,8 @@ def _render_versions(self): timestamp = version.created_at else: timestamp = datetime.datetime.fromisoformat(version.created_at) - st.write(format_timestamp(timestamp)) + js_dynamic_date(timestamp) + re_render_js_dynamic_dates() def render_related_workflows(self): page_clses = self.related_workflows() @@ -1688,11 +1696,3 @@ def convert_state_type(state, key, fn): def reverse_enumerate(start, iterator): return zip(range(start, -1, -1), iterator) - - -def format_timestamp(timestamp: datetime.datetime): - current_year = datetime.datetime.now().year - if timestamp.year == current_year: - return timestamp.strftime("%a, %d %b, %I:%M %p") - else: - return timestamp.strftime("%a, %d %b %Y, %I:%M %p") diff --git a/daras_ai_v2/user_date_widgets.py b/daras_ai_v2/user_date_widgets.py index 91d6c001e..936b50608 100644 --- a/daras_ai_v2/user_date_widgets.py +++ b/daras_ai_v2/user_date_widgets.py @@ -29,7 +29,7 @@ def render_js_dynamic_dates(): let yearToShow = ""; if (date.getFullYear() != new Date().getFullYear()) { yearToShow = " " + date.getFullYear().toString(); - } + } elem.children[0].innerHTML = ` ${date.toLocaleDateString("en-IN", dateOptions)}${yearToShow}, ${date.toLocaleTimeString("en-IN", timeOptions).toUpperCase()} @@ -41,3 +41,14 @@ def render_js_dynamic_dates(): """, ) + + +def re_render_js_dynamic_dates(): + gui.html( + # language=HTML + """ + + """, + ) From cc497afe792b401a9e803e74f716e2467af7fe9e Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Mon, 4 Dec 2023 23:29:44 +0530 Subject: [PATCH 073/138] Rename 'actions' to 'options' --- daras_ai_v2/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 313c8ebef..bbf437f15 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -327,7 +327,7 @@ def _render_published_run_buttons( if is_update_mode else None ) - run_actions_modal = Modal("Actions", key="published-run-options-modal") + run_actions_modal = Modal("Options", key="published-run-options-modal") if run_actions_button: run_actions_modal.open() From d869cd2f78fb84e25706484067599b091152be1a Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 00:45:27 +0530 Subject: [PATCH 074/138] Fix date rendering in version history --- daras_ai_v2/base.py | 6 ++-- daras_ai_v2/user_date_widgets.py | 52 ++++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 15 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index bbf437f15..1e2cffe70 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -620,7 +620,7 @@ def _render_versions(self): if published_run: versions = published_run.versions.all() for i, version in reverse_enumerate(len(versions), versions): - col1, col2, col3 = st.columns([1, 4, 4], responsive=False) + col1, col2, col3 = st.columns([1, 4, 3], responsive=False) with col1: st.write(f"{i}") with col2: @@ -636,7 +636,9 @@ def _render_versions(self): timestamp = version.created_at else: timestamp = datetime.datetime.fromisoformat(version.created_at) - js_dynamic_date(timestamp) + js_dynamic_date( + timestamp, date_options={"day": "numeric", "month": "short"} + ) re_render_js_dynamic_dates() def render_related_workflows(self): diff --git a/daras_ai_v2/user_date_widgets.py b/daras_ai_v2/user_date_widgets.py index 936b50608..42e2ed065 100644 --- a/daras_ai_v2/user_date_widgets.py +++ b/daras_ai_v2/user_date_widgets.py @@ -1,30 +1,52 @@ import datetime +import json +from typing import Any import gooey_ui as gui -def js_dynamic_date(dt: datetime.datetime): +def js_dynamic_date( + dt: datetime.datetime, + *, + date_options: dict[str, Any] | None = None, + time_options: dict[str, Any] | None = None, +): timestamp_ms = dt.timestamp() * 1000 - gui.caption("Loading...", **{"data-id-dynamic-date": str(timestamp_ms)}) + attrs = {"data-id-dynamic-date": str(timestamp_ms)} + if date_options: + attrs["data-id-date-options"] = json.dumps(date_options) + if time_options: + attrs["data-id-time-options"] = json.dumps(time_options) + gui.caption("Loading...", **attrs) def render_js_dynamic_dates(): + default_date_options = { + "weekday": "short", + "day": "numeric", + "month": "short", + } + default_time_options = { + "hour": "numeric", + "hour12": True, + "minute": "numeric", + } gui.html( # language=HTML """ - """, + """ + % { + "date_options_json": json.dumps(default_date_options), + "time_options_json": json.dumps(default_time_options), + }, ) From 7dae5374a24ab25dcb5b78ece5c121d02db66930 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 01:43:05 +0530 Subject: [PATCH 075/138] Add confirmation modal after delete is pressed --- daras_ai_v2/base.py | 48 +++++++++++++++++++++++++++++++++--- gooey_ui/components/modal.py | 12 ++++++--- 2 files changed, 54 insertions(+), 6 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 1e2cffe70..3bf569698 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -358,7 +358,10 @@ def _render_published_run_buttons( with run_actions_modal.container( style={"min-width": "min(300px, 100vw)"} ): - self._render_run_actions_modal(published_run=published_run) + self._render_run_actions_modal( + published_run=published_run, + modal=run_actions_modal, + ) def _render_publish_modal( self, @@ -442,6 +445,7 @@ def _render_run_actions_modal( self, *, published_run: PublishedRun, + modal: Modal, ): with st.div(className="mt-4"): duplicate_icon = '' @@ -464,17 +468,55 @@ def _render_run_actions_modal( query_params=dict(example_id=duplicate_pr.published_run_id), ) + confirm_delete_modal = Modal("Confirm Delete", key="confirm-delete-modal") if delete_button: if not published_run.published_run_id: st.error("Cannot delete root example") return - published_run.delete() - raise QueryParamsRedirectException(query_params={}) + confirm_delete_modal.open() with st.div(className="mt-3"): st.write("#### Version History") self._render_versions() + if confirm_delete_modal.is_open(): + modal.empty() + with confirm_delete_modal.container(): + self._render_confirm_delete_modal( + published_run=published_run, + modal=confirm_delete_modal, + ) + + def _render_confirm_delete_modal( + self, + *, + published_run: PublishedRun, + modal: Modal, + ): + st.write( + "Are you sure you want to delete this published run? " + f"({published_run.title})" + ) + st.caption("This will also delete all the associated versions.") + with st.div(className="d-flex"): + confirm_button = st.button( + 'Confirm', + type="secondary", + className="w-100", + ) + cancel_button = st.button( + "Cancel", + type="secondary", + className="w-100", + ) + + if confirm_button: + published_run.delete() + raise QueryParamsRedirectException(query_params={}) + + if cancel_button: + modal.close() + def _get_title_and_breadcrumbs( self, current_run: SavedRun, diff --git a/gooey_ui/components/modal.py b/gooey_ui/components/modal.py index ea03d60c3..149187a4b 100644 --- a/gooey_ui/components/modal.py +++ b/gooey_ui/components/modal.py @@ -17,6 +17,8 @@ def __init__(self, title, key, padding=20, max_width=744): self.max_width = str(max_width) + "px" self.key = key + self._container = None + def is_open(self): return st.session_state.get(f"{self.key}-opened", False) @@ -29,6 +31,10 @@ def close(self, rerun_condition=True): if rerun_condition: rerun() + def empty(self): + if self._container: + self._container.empty() + @contextmanager def container(self, **props): st.html( @@ -70,9 +76,9 @@ def container(self, **props): with st.div(className="blur-background"): with st.div(className="modal-parent"): container_class = "modal-container " + props.pop("className", "") - container = st.div(className=container_class, **props) + self._container = st.div(className=container_class, **props) - with container: + with self._container: with st.div(className="d-flex justify-content-between align-items-center"): st.markdown(f"### {self.title or ''}") @@ -84,4 +90,4 @@ def container(self, **props): ) if close_: self.close() - yield container + yield self._container From 773773f9431adfa3a3e346a8879e0611a24fa118 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 15:42:28 +0530 Subject: [PATCH 076/138] Redo version history view --- daras_ai_v2/base.py | 182 +++++++++++++++++++++++-------- daras_ai_v2/user_date_widgets.py | 5 +- 2 files changed, 139 insertions(+), 48 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 3bf569698..bdd4f851b 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -6,6 +6,7 @@ import urllib.parse import uuid from copy import deepcopy +from itertools import pairwise from random import Random from time import sleep from types import SimpleNamespace @@ -25,7 +26,13 @@ import gooey_ui as st from app_users.models import AppUser, AppUserTransaction -from bots.models import SavedRun, PublishedRun, PublishedRunVisibility, Workflow +from bots.models import ( + SavedRun, + PublishedRun, + PublishedRunVersion, + PublishedRunVisibility, + Workflow, +) from daras_ai.image_input import truncate_text_words from daras_ai_v2 import settings from daras_ai_v2.api_examples_widget import api_example_generator @@ -197,7 +204,10 @@ def render(self): author = self.run_user or current_run.get_creator() if not is_root_example: - self.render_author(author) + self.render_author( + author, + show_as_link=self.is_current_user_admin(), + ) with st.div(className="d-flex align-items-center"): is_current_user_creator = ( @@ -431,16 +441,41 @@ def _render_publish_modal( visibility=PublishedRunVisibility(published_run_visibility), ) else: - published_run.add_version( - user=self.request.user, + updates = dict( saved_run=current_run, - visibility=PublishedRunVisibility(published_run_visibility), title=published_run_title.strip(), notes=published_run_notes.strip(), + visibility=PublishedRunVisibility(published_run_visibility), ) + if self._has_published_run_changed( + published_run=published_run, **updates + ): + published_run.add_version( + user=self.request.user, + **updates, + ) + else: + st.error("No changes to publish") + return force_redirect(published_run.get_app_url()) + def _has_published_run_changed( + self, + *, + published_run: PublishedRun, + saved_run: SavedRun, + title: str, + notes: str, + visibility: PublishedRunVisibility, + ): + return ( + published_run.title != title + or published_run.notes != notes + or published_run.visibility != visibility + or published_run.saved_run != saved_run + ) + def _render_run_actions_modal( self, *, @@ -475,9 +510,9 @@ def _render_run_actions_modal( return confirm_delete_modal.open() - with st.div(className="mt-3"): - st.write("#### Version History") - self._render_versions() + with st.div(className="mt-4"): + st.write("#### Version History", className="mb-4") + self._render_version_history() if confirm_delete_modal.is_open(): modal.empty() @@ -653,7 +688,7 @@ def render_selected_tab(self, selected_tab: str): self._published_tab() render_js_dynamic_dates() - def _render_versions(self): + def _render_version_history(self): example_id, run_id, uid = extract_query_params(gooey_get_query_params()) published_run = self.get_published_run_from_query_params( example_id, run_id, uid @@ -661,27 +696,66 @@ def _render_versions(self): if published_run: versions = published_run.versions.all() - for i, version in reverse_enumerate(len(versions), versions): - col1, col2, col3 = st.columns([1, 4, 3], responsive=False) - with col1: - st.write(f"{i}") - with col2: - url = self.app_url( - example_id=published_run.published_run_id, - run_id=version.saved_run.run_id, - uid=version.saved_run.uid, - ) - with st.link(to=url): - st.write(version.title) - with col3: - if isinstance(version.created_at, datetime.datetime): - timestamp = version.created_at - else: - timestamp = datetime.datetime.fromisoformat(version.created_at) - js_dynamic_date( - timestamp, date_options={"day": "numeric", "month": "short"} - ) - re_render_js_dynamic_dates() + first_version = versions[0] + for version, older_version in pairwise(versions): + first_version = older_version + self._render_version_row(version, older_version) + self._render_version_row(first_version, None) + re_render_js_dynamic_dates() + + def _render_version_row( + self, + version: PublishedRunVersion, + older_version: PublishedRunVersion | None, + ): + st.html( + """ + + """ + ) + url = self.app_url( + example_id=version.published_run.published_run_id, + run_id=version.saved_run.run_id, + uid=version.saved_run.uid, + ) + with st.link(to=url, className="text-decoration-none"): + with st.div( + className="d-flex mb-4 disable-p-margin", + style={"min-width": "min(100vw, 500px)"}, + ): + col1 = st.div(className="me-4") + col2 = st.div() + with col1: + with st.div(className="fs-5 mt-1"): + st.html('') + with col2: + is_first_version = not older_version + with st.div(className="fs-5 d-flex align-items-center"): + js_dynamic_date( + version.created_at, + container=self._render_version_history_date, + date_options={"month": "short", "day": "numeric"}, + ) + if is_first_version: + with st.tag("span", className="badge bg-secondary px-3 ms-2"): + st.write("FIRST VERSION") + with st.div(className="text-muted"): + if older_version and older_version.title != version.title: + st.write(f"Renamed: {version.title}") + elif not older_version: + st.write(version.title) + with st.div(className="mt-1", style={"font-size": "0.85rem"}): + self.render_author( + version.changed_by, image_size="18px", responsive=False + ) + + def _render_version_history_date(self, text, **props): + with st.tag("span", **props): + st.html(text) def render_related_workflows(self): page_clses = self.related_workflows() @@ -925,40 +999,56 @@ def render_form_v2(self): def validate_form_v2(self): pass - def render_author(self, user: AppUser): + def render_author( + self, + user: AppUser, + *, + image_size: str = "30px", + responsive: bool = True, + show_as_link: bool = False, + ): if not user or (not user.photo_url and not user.display_name): return + responsive_image_size = ( + f"calc({image_size} * 0.67)" if responsive else image_size + ) + + # new class name so that different ones don't conflict + class_name = f"author-image-{image_size}" + if responsive: + class_name += "-responsive" + html = "
" if user.photo_url: st.html( - """ + f""" """ ) html += f""" - + """ if user.display_name: - html += f"
{user.display_name}
" + html += f"{user.display_name}" html += "
" - if self.is_current_user_admin(): + if show_as_link: linkto = lambda: st.link( to=self.app_url( tab_name=MenuTabs.paths[MenuTabs.history], diff --git a/daras_ai_v2/user_date_widgets.py b/daras_ai_v2/user_date_widgets.py index 42e2ed065..a4c510b56 100644 --- a/daras_ai_v2/user_date_widgets.py +++ b/daras_ai_v2/user_date_widgets.py @@ -1,6 +1,6 @@ import datetime import json -from typing import Any +from typing import Any, Callable import gooey_ui as gui @@ -8,6 +8,7 @@ def js_dynamic_date( dt: datetime.datetime, *, + container: Callable = gui.caption, date_options: dict[str, Any] | None = None, time_options: dict[str, Any] | None = None, ): @@ -17,7 +18,7 @@ def js_dynamic_date( attrs["data-id-date-options"] = json.dumps(date_options) if time_options: attrs["data-id-time-options"] = json.dumps(time_options) - gui.caption("Loading...", **attrs) + container("Loading...", **attrs) def render_js_dynamic_dates(): From 01d406e04e03b1fa2da3821314fd3d0e02cba3d6 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 16:30:06 +0530 Subject: [PATCH 077/138] copy_to_clipboard_button: fix bug where icons are lost after clicking --- daras_ai_v2/copy_to_clipboard_button_widget.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daras_ai_v2/copy_to_clipboard_button_widget.py b/daras_ai_v2/copy_to_clipboard_button_widget.py index a43da7547..55222edf2 100644 --- a/daras_ai_v2/copy_to_clipboard_button_widget.py +++ b/daras_ai_v2/copy_to_clipboard_button_widget.py @@ -6,10 +6,10 @@ From df11e946fb6d265ff1e152d26e0b9844eabbb6f2 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 16:31:14 +0530 Subject: [PATCH 078/138] Reorder buttons and set button styles --- daras_ai_v2/base.py | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index bdd4f851b..851f88bb3 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -227,32 +227,27 @@ def render(self): if is_current_user_creator and has_unpublished_changes: self._render_unpublished_changes_indicator() - with st.div(className="d-flex align-items-center right-action-icons"): + with st.div(className="d-flex align-items-start right-action-icons"): st.html( """ """ ) - self._render_social_buttons() - if is_current_user_creator: self._render_published_run_buttons( current_run=current_run, published_run=published_run, ) + self._render_social_buttons( + show_button_text=not is_current_user_creator + ) + with st.div(): if breadcrumbs or self.run_user: # only render title here if the above row was not empty @@ -285,16 +280,22 @@ def _render_title(self, title: str): def _render_unpublished_changes_indicator(self): with st.div( - className="d-none d-lg-flex h-100 align-items-center text-muted me-3" + className="d-none d-lg-flex h-100 align-items-center text-muted ms-2" ): with st.tag("span", className="d-inline-block"): st.html("Unpublished changes") - def _render_social_buttons(self): + def _render_social_buttons(self, show_button_text: bool = False): + button_text = ( + ' Copy Link' + if show_button_text + else "" + ) + copy_to_clipboard_button( - '', + f'{button_text}', value=self._get_current_app_url(), - type="tertiary", + type="secondary", className="mb-0", ) @@ -331,7 +332,7 @@ def _render_published_run_buttons( run_actions_button = ( st.button( '', - className="mb-0", + className="mb-0 ms-lg-2", type="tertiary", ) if is_update_mode @@ -342,12 +343,11 @@ def _render_published_run_buttons( run_actions_modal.open() save_icon = '' - # save_text = f"{save_icon} Update" if is_update_mode else f"{save_icon} Save" - save_text = save_icon + save_text = "Update" if is_update_mode else "Save" save_button = st.button( - save_text, - className="mb-0", - type="tertiary", + f'{save_icon} {save_text}', + className="mb-0 px-lg-4", + type="primary", ) publish_modal = Modal("", key="publish-modal") if save_button: From fc8eef0bcd2eee24413f7bf120b9f35f94543135 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:10:00 +0530 Subject: [PATCH 079/138] Cleanup migrations to only include relevant changes --- .../0047_publishedrun_publishedrunversion.py | 265 ++++++++++++++++++ ...y_savedrun_is_approved_example_and_more.py | 54 ---- bots/migrations/0048_publishedrun.py | 155 ---------- ...049_remove_savedrun_created_by_and_more.py | 96 ------- 4 files changed, 265 insertions(+), 305 deletions(-) create mode 100644 bots/migrations/0047_publishedrun_publishedrunversion.py delete mode 100644 bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py delete mode 100644 bots/migrations/0048_publishedrun.py delete mode 100644 bots/migrations/0049_remove_savedrun_created_by_and_more.py diff --git a/bots/migrations/0047_publishedrun_publishedrunversion.py b/bots/migrations/0047_publishedrun_publishedrunversion.py new file mode 100644 index 000000000..4fda8ec7e --- /dev/null +++ b/bots/migrations/0047_publishedrun_publishedrunversion.py @@ -0,0 +1,265 @@ +# Generated by Django 4.2.7 on 2023-12-05 11:04 + +from django.db import migrations, models +import django.db.models.deletion + +from bots.models import PublishedRunVisibility +from daras_ai_v2.crypto import get_random_doc_id + + +def set_field_attribute(instance, field_name, **attrs): + for field in instance._meta.local_fields: + if field.name == field_name: + for attr, value in attrs.items(): + setattr(field, attr, value) + + +def create_published_run_from_example( + *, + published_run_model, + published_run_version_model, + saved_run, + user, + published_run_id, +): + published_run = published_run_model( + workflow=saved_run.workflow, + published_run_id=published_run_id, + created_by=user, + last_edited_by=user, + saved_run=saved_run, + title=saved_run.page_title, + notes=saved_run.page_notes, + visibility=PublishedRunVisibility.PUBLIC, + is_approved_example=True, + ) + set_field_attribute(published_run, "created_at", auto_now_add=False) + set_field_attribute(published_run, "updated_at", auto_now=False) + published_run.created_at = saved_run.created_at + published_run.updated_at = saved_run.updated_at + published_run.save() + set_field_attribute(published_run, "created_at", auto_now_add=True) + set_field_attribute(published_run, "updated_at", auto_now=True) + + version = published_run_version_model( + published_run=published_run, + version_id=get_random_doc_id(), + saved_run=saved_run, + changed_by=user, + title=saved_run.page_title, + notes=saved_run.page_notes, + visibility=PublishedRunVisibility.PUBLIC, + ) + set_field_attribute(published_run, "created_at", auto_now_add=False) + version.created_at = saved_run.updated_at + version.save() + set_field_attribute(published_run, "created_at", auto_now_add=True) + + return published_run + + +def forwards_func(apps, schema_editor): + # if example_id is not null, create published run with + # is_approved_example to True and visibility to Public + saved_run_model = apps.get_model("bots", "SavedRun") + published_run_model = apps.get_model("bots", "PublishedRun") + published_run_version_model = apps.get_model("bots", "PublishedRunVersion") + db_alias = schema_editor.connection.alias + + # all examples + for saved_run in saved_run_model.objects.using(db_alias).filter( + example_id__isnull=False, + ): + create_published_run_from_example( + published_run_model=published_run_model, + published_run_version_model=published_run_version_model, + saved_run=saved_run, + user=None, # TODO: use gooey-support user instead? + published_run_id=saved_run.example_id, + ) + + # recipe root examples + for saved_run in saved_run_model.objects.using(db_alias).filter( + example_id__isnull=True, + run_id__isnull=True, + uid__isnull=True, + ): + create_published_run_from_example( + published_run_model=published_run_model, + published_run_version_model=published_run_version_model, + saved_run=saved_run, + user=None, + published_run_id="", + ) + + +def backwards_func(apps, schema_editor): + pass + + +class Migration(migrations.Migration): + dependencies = [ + ("app_users", "0010_alter_appuser_balance_alter_appuser_created_at_and_more"), + ("bots", "0046_savedrun_bots_savedr_created_cb8e09_idx_and_more"), + ] + + operations = [ + migrations.CreateModel( + name="PublishedRun", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("published_run_id", models.CharField(blank=True, max_length=128)), + ( + "workflow", + models.IntegerField( + choices=[ + (1, "Doc Search"), + (2, "Doc Summary"), + (3, "Google GPT"), + (4, "Copilot"), + (5, "Lipysnc + TTS"), + (6, "Text to Speech"), + (7, "Speech Recognition"), + (8, "Lipsync"), + (9, "Deforum Animation"), + (10, "Compare Text2Img"), + (11, "Text2Audio"), + (12, "Img2Img"), + (13, "Face Inpainting"), + (14, "Google Image Gen"), + (15, "Compare AI Upscalers"), + (16, "SEO Summary"), + (17, "Email Face Inpainting"), + (18, "Social Lookup Email"), + (19, "Object Inpainting"), + (20, "Image Segmentation"), + (21, "Compare LLM"), + (22, "Chyron Plant"), + (23, "Letter Writer"), + (24, "Smart GPT"), + (25, "AI QR Code"), + (26, "Doc Extract"), + (27, "Related QnA Maker"), + (28, "Related QnA Maker Doc"), + (29, "Embeddings"), + (30, "Bulk Runner"), + ] + ), + ), + ("title", models.TextField(blank=True, default="")), + ("notes", models.TextField(blank=True, default="")), + ( + "visibility", + models.IntegerField( + choices=[(1, "Unlisted"), (2, "Public")], default=1 + ), + ), + ("is_approved_example", models.BooleanField(default=False)), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ( + "created_by", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="published_runs", + to="app_users.appuser", + ), + ), + ( + "last_edited_by", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="app_users.appuser", + ), + ), + ( + "saved_run", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.PROTECT, + related_name="published_runs", + to="bots.savedrun", + ), + ), + ], + options={ + "ordering": ["-updated_at"], + "unique_together": {("workflow", "published_run_id")}, + }, + ), + migrations.CreateModel( + name="PublishedRunVersion", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("version_id", models.CharField(max_length=128, unique=True)), + ("title", models.TextField(blank=True, default="")), + ("notes", models.TextField(blank=True, default="")), + ( + "visibility", + models.IntegerField( + choices=[(1, "Unlisted"), (2, "Public")], default=1 + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ( + "changed_by", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="app_users.appuser", + ), + ), + ( + "published_run", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="versions", + to="bots.publishedrun", + ), + ), + ( + "saved_run", + models.ForeignKey( + on_delete=django.db.models.deletion.PROTECT, + related_name="published_run_versions", + to="bots.savedrun", + ), + ), + ], + options={ + "ordering": ["-created_at"], + "get_latest_by": "created_at", + "indexes": [ + models.Index( + fields=["published_run", "-created_at"], + name="bots_publis_publish_9cd246_idx", + ), + models.Index( + fields=["version_id"], name="bots_publis_version_c121d4_idx" + ), + ], + }, + ), + migrations.RunPython( + forwards_func, + backwards_func, + ), + ] diff --git a/bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py b/bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py deleted file mode 100644 index df1f94b17..000000000 --- a/bots/migrations/0047_savedrun_created_by_savedrun_is_approved_example_and_more.py +++ /dev/null @@ -1,54 +0,0 @@ -# Generated by Django 4.2.5 on 2023-11-27 15:26 - -from django.db import migrations, models -import django.db.models.deletion - -from bots.models import PublishedRunVisibility - - -def set_defaults_for_gooey_examples(apps, schema_editor): - # bots->SavedRun - # if example_id is not null, - # set is_approved_example to True and visibility to Public - model = apps.get_model("bots", "SavedRun") - db_alias = schema_editor.connection.alias - - model.objects.using(db_alias).filter(example_id__isnull=False).update( - is_approved_example=True, - visibility=PublishedRunVisibility.PUBLIC, - ) - - -class Migration(migrations.Migration): - dependencies = [ - ("app_users", "0010_alter_appuser_balance_alter_appuser_created_at_and_more"), - ("bots", "0046_savedrun_bots_savedr_created_cb8e09_idx_and_more"), - ] - - operations = [ - migrations.AddField( - model_name="savedrun", - name="created_by", - field=models.ForeignKey( - null=True, - on_delete=django.db.models.deletion.SET_NULL, - related_name="examples_created", - to="app_users.appuser", - ), - ), - migrations.AddField( - model_name="savedrun", - name="is_approved_example", - field=models.BooleanField(default=False), - ), - migrations.AddField( - model_name="savedrun", - name="visibility", - field=models.IntegerField( - choices=[(1, "Unlisted"), (2, "Public")], default=1 - ), - ), - migrations.RunPython( - set_defaults_for_gooey_examples, - ), - ] diff --git a/bots/migrations/0048_publishedrun.py b/bots/migrations/0048_publishedrun.py deleted file mode 100644 index 21f23aa24..000000000 --- a/bots/migrations/0048_publishedrun.py +++ /dev/null @@ -1,155 +0,0 @@ -# Generated by Django 4.2.7 on 2023-11-30 21:02 - -from django.db import migrations, models -import django.db.models.deletion - -from bots.models import PublishedRunVisibility - - -def forwards_func(apps, schema_editor): - # if example_id is not null, create published run with - # is_approved_example to True and visibility to Public - savedrun_model = apps.get_model("bots", "SavedRun") - publishedrun_model = apps.get_model("bots", "PublishedRun") - db_alias = schema_editor.connection.alias - - for savedrun in savedrun_model.objects.using(db_alias).filter( - example_id__isnull=False, - ): - publishedrun_model.objects.create( - workflow=savedrun.workflow, - published_run_id=savedrun.example_id, - title=savedrun.page_title, - notes=savedrun.page_notes, - saved_run=savedrun, - created_by=None, # TODO: use gooey-support user - last_edited_by=None, # TODO: use gooey-support user - visibility=PublishedRunVisibility.PUBLIC, - is_approved_example=True, - ) - for savedrun in savedrun_model.objects.using(db_alias).filter( - example_id__isnull=True, - run_id__isnull=True, - uid__isnull=True, - ): - publishedrun_model.objects.create( - workflow=savedrun.workflow, - published_run_id="", - title=savedrun.page_title, - notes=savedrun.page_notes, - saved_run=savedrun, - created_by=None, # TODO: use gooey-support user - last_edited_by=None, # TODO: use gooey-support user - visibility=PublishedRunVisibility.PUBLIC, - is_approved_example=True, - ) - - -def backwards_func(apps, schema_editor): - pass - - -class Migration(migrations.Migration): - dependencies = [ - ("app_users", "0010_alter_appuser_balance_alter_appuser_created_at_and_more"), - ("bots", "0047_savedrun_created_by_savedrun_is_approved_example_and_more"), - ] - - operations = [ - migrations.CreateModel( - name="PublishedRun", - fields=[ - ( - "id", - models.BigAutoField( - auto_created=True, - primary_key=True, - serialize=False, - verbose_name="ID", - ), - ), - ( - "published_run_id", - models.CharField(blank=True, max_length=128), - ), - ( - "workflow", - models.IntegerField( - choices=[ - (1, "Doc Search"), - (2, "Doc Summary"), - (3, "Google GPT"), - (4, "Copilot"), - (5, "Lipysnc + TTS"), - (6, "Text to Speech"), - (7, "Speech Recognition"), - (8, "Lipsync"), - (9, "Deforum Animation"), - (10, "Compare Text2Img"), - (11, "Text2Audio"), - (12, "Img2Img"), - (13, "Face Inpainting"), - (14, "Google Image Gen"), - (15, "Compare AI Upscalers"), - (16, "SEO Summary"), - (17, "Email Face Inpainting"), - (18, "Social Lookup Email"), - (19, "Object Inpainting"), - (20, "Image Segmentation"), - (21, "Compare LLM"), - (22, "Chyron Plant"), - (23, "Letter Writer"), - (24, "Smart GPT"), - (25, "AI QR Code"), - (26, "Doc Extract"), - (27, "Related QnA Maker"), - (28, "Related QnA Maker Doc"), - (29, "Embeddings"), - (30, "Bulk Runner"), - ] - ), - ), - ("title", models.TextField()), - ("notes", models.TextField(blank=True, default="")), - ( - "visibility", - models.IntegerField( - choices=[(1, "Unlisted"), (2, "Public")], default=1 - ), - ), - ("is_approved_example", models.BooleanField(default=False)), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("updated_at", models.DateTimeField(auto_now=True)), - ( - "created_by", - models.ForeignKey( - null=True, - on_delete=django.db.models.deletion.SET_NULL, - related_name="published_runs", - to="app_users.appuser", - ), - ), - ( - "last_edited_by", - models.ForeignKey( - null=True, - on_delete=django.db.models.deletion.SET_NULL, - to="app_users.appuser", - ), - ), - ( - "saved_run", - models.ForeignKey( - null=True, - on_delete=django.db.models.deletion.PROTECT, - related_name="published_runs", - to="bots.savedrun", - ), - ), - ], - ), - migrations.RunPython( - code=forwards_func, - reverse_code=backwards_func, - ), - ] diff --git a/bots/migrations/0049_remove_savedrun_created_by_and_more.py b/bots/migrations/0049_remove_savedrun_created_by_and_more.py deleted file mode 100644 index a4f4b1894..000000000 --- a/bots/migrations/0049_remove_savedrun_created_by_and_more.py +++ /dev/null @@ -1,96 +0,0 @@ -# Generated by Django 4.2.7 on 2023-12-01 13:28 - -from django.db import migrations, models -import django.db.models.deletion - - -class Migration(migrations.Migration): - dependencies = [ - ("app_users", "0010_alter_appuser_balance_alter_appuser_created_at_and_more"), - ("bots", "0048_publishedrun"), - ] - - operations = [ - migrations.RemoveField( - model_name="savedrun", - name="created_by", - ), - migrations.RemoveField( - model_name="savedrun", - name="is_approved_example", - ), - migrations.RemoveField( - model_name="savedrun", - name="visibility", - ), - migrations.AlterField( - model_name="publishedrun", - name="title", - field=models.TextField(blank=True, default=""), - ), - migrations.AlterUniqueTogether( - name="publishedrun", - unique_together={("workflow", "published_run_id")}, - ), - migrations.CreateModel( - name="PublishedRunVersion", - fields=[ - ( - "id", - models.BigAutoField( - auto_created=True, - primary_key=True, - serialize=False, - verbose_name="ID", - ), - ), - ("version_id", models.CharField(max_length=128, unique=True)), - ("title", models.TextField(blank=True, default="")), - ("notes", models.TextField(blank=True, default="")), - ( - "visibility", - models.IntegerField( - choices=[(1, "Unlisted"), (2, "Public")], default=1 - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ( - "changed_by", - models.ForeignKey( - null=True, - on_delete=django.db.models.deletion.SET_NULL, - to="app_users.appuser", - ), - ), - ( - "published_run", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, - related_name="versions", - to="bots.publishedrun", - ), - ), - ( - "saved_run", - models.ForeignKey( - on_delete=django.db.models.deletion.PROTECT, - related_name="published_run_versions", - to="bots.savedrun", - ), - ), - ], - options={ - "ordering": ["-created_at"], - "get_latest_by": "created_at", - "indexes": [ - models.Index( - fields=["published_run", "-created_at"], - name="bots_publis_publish_9cd246_idx", - ), - models.Index( - fields=["version_id"], name="bots_publis_version_c121d4_idx" - ), - ], - }, - ), - ] From 5d523da77c6134d5d895f4d27e65fb1e0a1cf5f4 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:18:36 +0530 Subject: [PATCH 080/138] Remove StateKeys.page_title and StateKeys.page_notes --- bots/models.py | 7 ------- daras_ai_v2/base.py | 12 +++++------ recipes/VideoBots.py | 47 ++++++++++++++++++++++---------------------- 3 files changed, 28 insertions(+), 38 deletions(-) diff --git a/bots/models.py b/bots/models.py index 21cd182a9..de3cafb61 100644 --- a/bots/models.py +++ b/bots/models.py @@ -217,10 +217,6 @@ def to_dict(self) -> dict: ret[StateKeys.run_time] = self.run_time.total_seconds() if self.run_status: ret[StateKeys.run_status] = self.run_status - if self.page_title: - ret[StateKeys.page_title] = self.page_title - if self.page_notes: - ret[StateKeys.page_notes] = self.page_notes if self.hidden: ret[StateKeys.hidden] = self.hidden if self.is_flagged: @@ -251,9 +247,6 @@ def copy_from_firebase_state(self, state: dict) -> "SavedRun": seconds=state.pop(StateKeys.run_time, None) or 0 ) self.run_status = state.pop(StateKeys.run_status, None) or "" - self.page_title = state.pop(StateKeys.page_title, None) or "" - self.page_notes = state.pop(StateKeys.page_notes, None) or "" - # self.hidden = state.pop(StateKeys.hidden, False) self.is_flagged = state.pop("is_flagged", False) self.state = state diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 851f88bb3..347bf6828 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -84,9 +84,6 @@ class StateKeys: - page_title = "__title" - page_notes = "__notes" - created_at = "created_at" updated_at = "updated_at" @@ -689,10 +686,7 @@ def render_selected_tab(self, selected_tab: str): render_js_dynamic_dates() def _render_version_history(self): - example_id, run_id, uid = extract_query_params(gooey_get_query_params()) - published_run = self.get_published_run_from_query_params( - example_id, run_id, uid - ) + published_run = self.get_current_published_run() if published_run: versions = published_run.versions.all() @@ -888,6 +882,10 @@ def get_sr_from_query_params_dict(self, query_params) -> SavedRun: example_id, run_id, uid = extract_query_params(query_params) return self.get_sr_from_query_params(example_id, run_id, uid) + def get_current_published_run(self) -> PublishedRun | None: + example_id, run_id, uid = extract_query_params(gooey_get_query_params()) + return self.get_published_run_from_query_params(example_id, run_id, uid) + @classmethod def get_sr_from_query_params( cls, example_id: str, run_id: str, uid: str diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index 6822f89a0..78a5bd528 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -17,7 +17,7 @@ run_google_translate, google_translate_language_selector, ) -from daras_ai_v2.base import BasePage, MenuTabs, StateKeys +from daras_ai_v2.base import BasePage, MenuTabs from daras_ai_v2.doc_search_settings_widgets import ( doc_search_settings, document_uploader, @@ -251,13 +251,13 @@ class RequestModel(BaseModel): input_glossary_document: str | None = Field( title="Input Glossary", description=""" -Translation Glossary for User Langauge -> LLM Language (English) +Translation Glossary for User Langauge -> LLM Language (English) """, ) output_glossary_document: str | None = Field( title="Output Glossary", description=""" -Translation Glossary for LLM Language (English) -> User Langauge +Translation Glossary for LLM Language (English) -> User Langauge """, ) @@ -310,20 +310,20 @@ def get_submit_container_props(self): def render_description(self): st.write( """ -Have you ever wanted to create a bot that you could talk to about anything? Ever wanted to create your own https://dara.network/RadBots or https://Farmer.CHAT? This is how. +Have you ever wanted to create a bot that you could talk to about anything? Ever wanted to create your own https://dara.network/RadBots or https://Farmer.CHAT? This is how. -This workflow takes a dialog LLM prompt describing your character, a collection of docs & links and optional an video clip of your bot’s face and voice settings. - -We use all these to build a bot that anyone can speak to about anything and you can host directly in your own site or app, or simply connect to your Facebook, WhatsApp or Instagram page. +This workflow takes a dialog LLM prompt describing your character, a collection of docs & links and optional an video clip of your bot’s face and voice settings. + +We use all these to build a bot that anyone can speak to about anything and you can host directly in your own site or app, or simply connect to your Facebook, WhatsApp or Instagram page. How It Works: -1. Appends the user's question to the bottom of your dialog script. +1. Appends the user's question to the bottom of your dialog script. 2. Sends the appended script to OpenAI’s GPT3 asking it to respond to the question in the style of your character 3. Synthesizes your character's response as audio using your voice settings (using Google Text-To-Speech or Uberduck) 4. Lip syncs the face video clip to the voice clip 5. Shows the resulting video to the user -PS. This is the workflow that we used to create RadBots - a collection of Turing-test videobots, authored by leading international writers, singers and playwrights - and really inspired us to create Gooey.AI so that every person and organization could create their own fantastic characters, in any personality of their choosing. It's also the workflow that powers https://Farmer.CHAT and was demo'd at the UN General Assembly in April 2023 as a multi-lingual WhatsApp bot for Indian, Ethiopian and Kenyan farmers. +PS. This is the workflow that we used to create RadBots - a collection of Turing-test videobots, authored by leading international writers, singers and playwrights - and really inspired us to create Gooey.AI so that every person and organization could create their own fantastic characters, in any personality of their choosing. It's also the workflow that powers https://Farmer.CHAT and was demo'd at the UN General Assembly in April 2023 as a multi-lingual WhatsApp bot for Indian, Ethiopian and Kenyan farmers. """ ) @@ -389,7 +389,7 @@ def render_settings(self): """ ###### 📖 Customize with Glossary Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). """ ) glossary_input( @@ -425,8 +425,8 @@ def render_settings(self): st.file_uploader( """ #### 👩‍🦰 Input Face - Upload a video/image that contains faces to use - *Recommended - mp4 / mov / png / jpg / gif* + Upload a video/image that contains faces to use + *Recommended - mp4 / mov / png / jpg / gif* """, key="input_face", ) @@ -891,7 +891,7 @@ def render_selected_tab(self, selected_tab): with col2: st.write( """ - + #### Part 2: [Interactive Chatbots for your Content - Part 2: Make your Chatbot - How to use Gooey.AI Workflows ](https://youtu.be/h817RolPjq4) """ @@ -900,7 +900,7 @@ def render_selected_tab(self, selected_tab): """
+
""", unsafe_allow_html=True, @@ -920,7 +920,7 @@ def messenger_bot_integration(self): st.markdown( # language=html f""" -

Connect this bot to your Website, Instagram, Whatsapp & More

+

Connect this bot to your Website, Instagram, Whatsapp & More

Your can connect your FB Messenger account and Slack Workspace here directly.
If you ping us at support@gooey.ai, we'll add your other accounts too! @@ -929,26 +929,26 @@ def messenger_bot_integration(self): --> @@ -1003,9 +1003,8 @@ def messenger_bot_integration(self): placeholder=bi.name, ) if st.button("Reset to Default"): - bi.name = st.session_state.get( - StateKeys.page_title, bi.name - ) + title = self.get_current_published_run().title + bi.name = title or bi.name bi.slack_read_receipt_msg = BotIntegration._meta.get_field( "slack_read_receipt_msg" ).default From f1ced7a5599d315a11eec47ffeb20d74f24ed618 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:06:41 +0530 Subject: [PATCH 081/138] Move components.py to components/__init__.py --- gooey_ui/components.py | 892 -------------------------------- gooey_ui/components/__init__.py | 132 +++-- 2 files changed, 86 insertions(+), 938 deletions(-) delete mode 100644 gooey_ui/components.py diff --git a/gooey_ui/components.py b/gooey_ui/components.py deleted file mode 100644 index 20d66ae4c..000000000 --- a/gooey_ui/components.py +++ /dev/null @@ -1,892 +0,0 @@ -import base64 -import math -import textwrap -import typing -from datetime import datetime, timezone - -import numpy as np - -from furl import furl - -from daras_ai.image_input import resize_img_scale -from gooey_ui import state -from gooey_ui.pubsub import md5_values - -T = typing.TypeVar("T") -LabelVisibility = typing.Literal["visible", "collapsed"] - - -def _default_format(value: typing.Any) -> str: - if value is None: - return "---" - return str(value) - - -def dummy(*args, **kwargs): - return state.NestingCtx() - - -spinner = dummy -set_page_config = dummy -form = dummy -plotly_chart = dummy -dataframe = dummy - - -def countdown_timer( - end_time: datetime, - delay_text: str, -) -> state.NestingCtx: - return _node( - "countdown-timer", - endTime=end_time.astimezone(timezone.utc).isoformat(), - delayText=delay_text, - ) - - -def nav_tabs(): - return _node("nav-tabs") - - -def nav_item(href: str, *, active: bool): - return _node("nav-item", to=href, active="true" if active else None) - - -def nav_tab_content(): - return _node("nav-tab-content") - - -def div(**props) -> state.NestingCtx: - return tag("div", **props) - - -def link(*, to: str, **props) -> state.NestingCtx: - return _node("Link", to=to, **props) - - -def tag(tag_name: str, **props) -> state.NestingCtx: - props["__reactjsxelement"] = tag_name - return _node("tag", **props) - - -def html(body: str, **props): - props["className"] = props.get("className", "") + " gui-html-container" - return _node("html", body=body, **props) - - -def write(*objs: typing.Any, unsafe_allow_html=False, **props): - for obj in objs: - markdown( - obj if isinstance(obj, str) else repr(obj), - unsafe_allow_html=unsafe_allow_html, - **props, - ) - - -def markdown(body: str, *, unsafe_allow_html=False, **props): - if body is None: - return _node("markdown", body="", **props) - props["className"] = ( - props.get("className", "") + " gui-html-container gui-md-container" - ) - return _node("markdown", body=dedent(body).strip(), **props) - - -def _node(name: str, **props): - node = state.RenderTreeNode(name=name, props=props) - node.mount() - return state.NestingCtx(node) - - -def text(body: str, *, unsafe_allow_html=False, **props): - state.RenderTreeNode( - name="pre", - props=dict(body=dedent(body), **props), - ).mount() - - -def error(body: str, icon: str = "🔥", *, unsafe_allow_html=False): - if not isinstance(body, str): - body = repr(body) - with div( - style=dict( - backgroundColor="rgba(255, 108, 108, 0.2)", - padding="1rem", - paddingBottom="0", - marginBottom="0.5rem", - borderRadius="0.25rem", - display="flex", - gap="0.5rem", - ) - ): - markdown(icon) - with div(): - markdown(dedent(body), unsafe_allow_html=unsafe_allow_html) - - -def success(body: str, icon: str = "✅", *, unsafe_allow_html=False): - if not isinstance(body, str): - body = repr(body) - with div( - style=dict( - backgroundColor="rgba(108, 255, 108, 0.2)", - padding="1rem", - paddingBottom="0", - marginBottom="0.5rem", - borderRadius="0.25rem", - display="flex", - gap="0.5rem", - ) - ): - markdown(icon) - markdown(dedent(body), unsafe_allow_html=unsafe_allow_html) - - -def caption(body: str, **props): - style = props.setdefault("style", {"fontSize": "0.9rem"}) - markdown(body, className="text-muted", **props) - - -def option_menu(*args, options, **kwargs): - return tabs(options) - - -def tabs(labels: list[str]) -> list[state.NestingCtx]: - parent = state.RenderTreeNode( - name="tabs", - children=[ - state.RenderTreeNode( - name="tab", - props=dict(label=dedent(label)), - ) - for label in labels - ], - ).mount() - return [state.NestingCtx(tab) for tab in parent.children] - - -def controllable_tabs( - labels: list[str], key: str -) -> tuple[list[state.NestingCtx], int]: - index = state.session_state.get(key, 0) - for i, label in enumerate(labels): - if button( - label, - key=f"tab-{i}", - type="primary", - className="replicate-nav", - style={ - "background": "black" if i == index else "white", - "color": "white" if i == index else "black", - }, - ): - state.session_state[key] = index = i - state.experimental_rerun() - ctxs = [] - for i, label in enumerate(labels): - if i == index: - ctxs += [div(className="tab-content")] - else: - ctxs += [div(className="tab-content", style={"display": "none"})] - return ctxs, index - - -def columns( - spec, - *, - gap: str = None, - responsive: bool = True, - **props, -) -> tuple[state.NestingCtx, ...]: - if isinstance(spec, int): - spec = [1] * spec - total_weight = sum(spec) - props.setdefault("className", "row") - with div(**props): - return tuple( - div(className=f"col-lg-{p} {'col-12' if responsive else f'col-{p}'}") - for w in spec - if (p := f"{round(w / total_weight * 12)}") - ) - - -def image( - src: str | np.ndarray, - caption: str = None, - alt: str = None, - href: str = None, - **props, -): - if isinstance(src, np.ndarray): - from daras_ai.image_input import cv2_img_to_bytes - - if not src.shape: - return - # ensure image is not too large - data = resize_img_scale(cv2_img_to_bytes(src), (128, 128)) - # convert to base64 - b64 = base64.b64encode(data).decode("utf-8") - src = "data:image/png;base64," + b64 - if not src: - return - state.RenderTreeNode( - name="img", - props=dict( - src=src, - caption=dedent(caption), - alt=alt or caption, - href=href, - **props, - ), - ).mount() - - -def video(src: str, caption: str = None, autoplay: bool = False): - autoplay_props = {} - if autoplay: - autoplay_props = { - "preload": "auto", - "controls": True, - "autoPlay": True, - "loop": True, - "muted": True, - "playsInline": True, - } - - if not src: - return - if isinstance(src, str): - # https://muffinman.io/blog/hack-for-ios-safari-to-display-html-video-thumbnail/ - f = furl(src) - f.fragment.args["t"] = "0.001" - src = f.url - state.RenderTreeNode( - name="video", - props=dict(src=src, caption=dedent(caption), **autoplay_props), - ).mount() - - -def audio(src: str, caption: str = None): - if not src: - return - state.RenderTreeNode( - name="audio", - props=dict(src=src, caption=dedent(caption)), - ).mount() - - -def text_area( - label: str, - value: str = "", - height: int = 100, - key: str = None, - help: str = None, - placeholder: str = None, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", - **props, -) -> str: - style = props.setdefault("style", {}) - if key: - assert not value, "only one of value or key can be provided" - else: - key = md5_values( - "textarea", label, height, help, value, placeholder, label_visibility - ) - value = str(state.session_state.setdefault(key, value)) - if label_visibility != "visible": - label = None - if disabled: - max_height = f"{height}px" - rows = nrows_for_text(value, height, min_rows=1) - else: - max_height = "90vh" - rows = nrows_for_text(value, height) - style.setdefault("maxHeight", max_height) - props.setdefault("rows", rows) - state.RenderTreeNode( - name="textarea", - props=dict( - name=key, - label=dedent(label), - defaultValue=value, - help=help, - placeholder=placeholder, - disabled=disabled, - **props, - ), - ).mount() - return value or "" - - -def nrows_for_text( - text: str, - max_height_px: int, - min_rows: int = 2, - row_height_px: int = 30, - row_width_px: int = 80, -) -> int: - max_rows = max_height_px // row_height_px - nrows = math.ceil( - sum(len(line) / row_width_px for line in (text or "").strip().splitlines()) - ) - nrows = min(max(nrows, min_rows), max_rows) - return nrows - - -def multiselect( - label: str, - options: typing.Sequence[T], - format_func: typing.Callable[[T], typing.Any] = _default_format, - key: str = None, - help: str = None, - allow_none: bool = False, - *, - disabled: bool = False, -) -> list[T]: - if not options: - return [] - options = list(options) - if not key: - key = md5_values("multiselect", label, options, help) - value = state.session_state.get(key) or [] - if not isinstance(value, list): - value = [value] - value = [o if o in options else options[0] for o in value] - if not allow_none and not value: - value = [options[0]] - state.session_state[key] = value - state.RenderTreeNode( - name="select", - props=dict( - name=key, - label=dedent(label), - help=help, - isDisabled=disabled, - isMulti=True, - defaultValue=value, - allow_none=allow_none, - options=[ - {"value": option, "label": str(format_func(option))} - for option in options - ], - ), - ).mount() - return value - - -def selectbox( - label: str, - options: typing.Sequence[T], - format_func: typing.Callable[[T], typing.Any] = _default_format, - key: str = None, - help: str = None, - *, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", - default_value: T = None, -) -> T | None: - if not options: - return None - if label_visibility != "visible": - label = None - options = list(options) - if not key: - key = md5_values("select", label, options, help, label_visibility) - value = state.session_state.get(key) - if key not in state.session_state or value not in options: - value = default_value or options[0] - state.session_state.setdefault(key, value) - state.RenderTreeNode( - name="select", - props=dict( - name=key, - label=dedent(label), - help=help, - isDisabled=disabled, - defaultValue=value, - options=[ - {"value": option, "label": str(format_func(option))} - for option in options - ], - ), - ).mount() - return value - - -def button( - label: str, - key: str = None, - help: str = None, - *, - type: typing.Literal["primary", "secondary", "tertiary", "link"] = "secondary", - disabled: bool = False, - **props, -) -> bool: - """ - Example: - st.button("Primary", key="test0", type="primary") - st.button("Secondary", key="test1") - st.button("Tertiary", key="test3", type="tertiary") - st.button("Link Button", key="test3", type="link") - """ - if not key: - key = md5_values("button", label, help, type, props) - className = f"btn-{type} " + props.pop("className", "") - state.RenderTreeNode( - name="gui-button", - props=dict( - type="submit", - value="yes", - name=key, - label=dedent(label), - help=help, - disabled=disabled, - className=className, - **props, - ), - ).mount() - return bool(state.session_state.pop(key, False)) - - -form_submit_button = button - - -def expander(label: str, *, expanded: bool = False, **props): - node = state.RenderTreeNode( - name="expander", - props=dict( - label=dedent(label), - open=expanded, - **props, - ), - ) - node.mount() - return state.NestingCtx(node) - - -def file_uploader( - label: str, - accept: list[str] = None, - accept_multiple_files=False, - key: str = None, - upload_key: str = None, - help: str = None, - *, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", - upload_meta: dict = None, -): - if label_visibility != "visible": - label = None - key = upload_key or key - if not key: - key = md5_values( - "file_uploader", - label, - accept, - accept_multiple_files, - help, - label_visibility, - ) - value = state.session_state.get(key) - if not value: - if accept_multiple_files: - value = [] - else: - value = "" - state.session_state[key] = value - state.RenderTreeNode( - name="input", - props=dict( - type="file", - name=key, - label=dedent(label), - help=help, - disabled=disabled, - accept=accept, - multiple=accept_multiple_files, - defaultValue=value, - uploadMeta=upload_meta, - ), - ).mount() - return value or "" - - -def json(value: typing.Any, expanded: bool = False, depth: int = 1): - state.RenderTreeNode( - name="json", - props=dict( - value=value, - expanded=expanded, - defaultInspectDepth=3 if expanded else depth, - ), - ).mount() - - -def data_table(file_url: str): - return _node("data-table", fileUrl=file_url) - - -def table(df: "pd.DataFrame"): - state.RenderTreeNode( - name="table", - children=[ - state.RenderTreeNode( - name="thead", - children=[ - state.RenderTreeNode( - name="tr", - children=[ - state.RenderTreeNode( - name="th", - children=[ - state.RenderTreeNode( - name="markdown", - props=dict(body=dedent(col)), - ), - ], - ) - for col in df.columns - ], - ), - ], - ), - state.RenderTreeNode( - name="tbody", - children=[ - state.RenderTreeNode( - name="tr", - children=[ - state.RenderTreeNode( - name="td", - children=[ - state.RenderTreeNode( - name="markdown", - props=dict(body=dedent(str(value))), - ), - ], - ) - for value in row - ], - ) - for row in df.itertuples(index=False) - ], - ), - ], - ).mount() - - -def horizontal_radio( - label: str, - options: typing.Sequence[T], - format_func: typing.Callable[[T], typing.Any] = _default_format, - key: str = None, - help: str = None, - *, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", -) -> T | None: - if not options: - return None - options = list(options) - if not key: - key = md5_values("horizontal_radio", label, options, help, label_visibility) - value = state.session_state.get(key) - if key not in state.session_state or value not in options: - value = options[0] - state.session_state.setdefault(key, value) - if label_visibility != "visible": - label = None - markdown(label) - for option in options: - if button( - format_func(option), - key=f"tab-{key}-{option}", - type="primary", - className="replicate-nav " + ("active" if value == option else ""), - disabled=disabled, - ): - state.session_state[key] = value = option - state.experimental_rerun() - return value - - -def horizontal_radio( - label: str, - options: typing.Sequence[T], - format_func: typing.Callable[[T], typing.Any] = _default_format, - key: str = None, - help: str = None, - *, - disabled: bool = False, - checked_by_default: bool = True, - label_visibility: LabelVisibility = "visible", -) -> T | None: - if not options: - return None - options = list(options) - if not key: - key = md5_values("horizontal_radio", label, options, help, label_visibility) - value = state.session_state.get(key) - if (key not in state.session_state or value not in options) and checked_by_default: - value = options[0] - state.session_state.setdefault(key, value) - if label_visibility != "visible": - label = None - markdown(label) - for option in options: - if button( - format_func(option), - key=f"tab-{key}-{option}", - type="primary", - className="replicate-nav " + ("active" if value == option else ""), - disabled=disabled, - ): - state.session_state[key] = value = option - state.experimental_rerun() - return value - - -def radio( - label: str, - options: typing.Sequence[T], - format_func: typing.Callable[[T], typing.Any] = _default_format, - key: str = None, - help: str = None, - *, - disabled: bool = False, - checked_by_default: bool = True, - label_visibility: LabelVisibility = "visible", -) -> T | None: - if not options: - return None - options = list(options) - if not key: - key = md5_values("radio", label, options, help, label_visibility) - value = state.session_state.get(key) - if (key not in state.session_state or value not in options) and checked_by_default: - value = options[0] - state.session_state.setdefault(key, value) - if label_visibility != "visible": - label = None - markdown(label) - for option in options: - state.RenderTreeNode( - name="input", - props=dict( - type="radio", - name=key, - label=dedent(str(format_func(option))), - value=option, - defaultChecked=bool(value == option), - help=help, - disabled=disabled, - ), - ).mount() - return value - - -def text_input( - label: str, - value: str = "", - max_chars: str = None, - key: str = None, - help: str = None, - *, - placeholder: str = None, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", - **props, -) -> str: - value = _input_widget( - input_type="text", - label=label, - value=value, - key=key, - help=help, - disabled=disabled, - label_visibility=label_visibility, - maxLength=max_chars, - placeholder=placeholder, - **props, - ) - return value or "" - - -def password_input( - label: str, - value: str = "", - max_chars: str = None, - key: str = None, - help: str = None, - *, - placeholder: str = None, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", - **props, -) -> str: - value = _input_widget( - input_type="password", - label=label, - value=value, - key=key, - help=help, - disabled=disabled, - label_visibility=label_visibility, - maxLength=max_chars, - placeholder=placeholder, - **props, - ) - return value or "" - - -def slider( - label: str, - min_value: float = None, - max_value: float = None, - value: float = None, - step: float = None, - key: str = None, - help: str = None, - *, - disabled: bool = False, -) -> float: - value = _input_widget( - input_type="range", - label=label, - value=value, - key=key, - help=help, - disabled=disabled, - min=min_value, - max=max_value, - step=_step_value(min_value, max_value, step), - ) - return value or 0 - - -def number_input( - label: str, - min_value: float = None, - max_value: float = None, - value: float = None, - step: float = None, - key: str = None, - help: str = None, - *, - disabled: bool = False, -) -> float: - value = _input_widget( - input_type="number", - inputMode="decimal", - label=label, - value=value, - key=key, - help=help, - disabled=disabled, - min=min_value, - max=max_value, - step=_step_value(min_value, max_value, step), - ) - return value or 0 - - -def _step_value( - min_value: float | None, max_value: float | None, step: float | None -) -> float: - if step: - return step - elif isinstance(min_value, float) or isinstance(max_value, float): - return 0.1 - else: - return 1 - - -def checkbox( - label: str, - value: bool = False, - key: str = None, - help: str = None, - *, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", -) -> bool: - value = _input_widget( - input_type="checkbox", - label=label, - value=value, - key=key, - help=help, - disabled=disabled, - label_visibility=label_visibility, - default_value_attr="defaultChecked", - ) - return bool(value) - - -def _input_widget( - *, - input_type: str, - label: str, - value: typing.Any = None, - key: str = None, - help: str = None, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", - default_value_attr: str = "defaultValue", - **kwargs, -) -> typing.Any: - # if key: - # assert not value, "only one of value or key can be provided" - # else: - if not key: - key = md5_values("input", input_type, label, help, label_visibility) - value = state.session_state.setdefault(key, value) - if label_visibility != "visible": - label = None - state.RenderTreeNode( - name="input", - props={ - "type": input_type, - "name": key, - "label": dedent(label), - default_value_attr: value, - "help": help, - "disabled": disabled, - **kwargs, - }, - ).mount() - return value - - -def breadcrumbs(divider: str = "/", **props) -> state.NestingCtx: - style = props.pop("style", {}) | {"--bs-breadcrumb-divider": f"'{divider}'"} - with tag("nav", style=style, **props): - return tag("ol", className="breadcrumb mb-0") - - -def breadcrumb_item(inner_html: str, link_to: str | None = None, **props): - className = "breadcrumb-item " + props.pop("className", "") - with tag("li", className=className, **props): - if link_to: - with tag("a", href=link_to): - html(inner_html) - else: - html(inner_html) - - -def dedent(text: str | None) -> str | None: - if not text: - return text - return textwrap.dedent(text) - - -def js(src: str, **kwargs): - state.RenderTreeNode( - name="script", - props=dict( - src=src, - args=kwargs, - ), - ).mount() diff --git a/gooey_ui/components/__init__.py b/gooey_ui/components/__init__.py index ce801dbce..20d66ae4c 100644 --- a/gooey_ui/components/__init__.py +++ b/gooey_ui/components/__init__.py @@ -2,7 +2,7 @@ import math import textwrap import typing -from dataclasses import asdict, dataclass +from datetime import datetime, timezone import numpy as np @@ -33,6 +33,17 @@ def dummy(*args, **kwargs): dataframe = dummy +def countdown_timer( + end_time: datetime, + delay_text: str, +) -> state.NestingCtx: + return _node( + "countdown-timer", + endTime=end_time.astimezone(timezone.utc).isoformat(), + delayText=delay_text, + ) + + def nav_tabs(): return _node("nav-tabs") @@ -203,6 +214,7 @@ def image( src: str | np.ndarray, caption: str = None, alt: str = None, + href: str = None, **props, ): if isinstance(src, np.ndarray): @@ -223,6 +235,7 @@ def image( src=src, caption=dedent(caption), alt=alt or caption, + href=href, **props, ), ).mount() @@ -401,50 +414,6 @@ def selectbox( return value -@dataclass -class Option: - label: str - value: typing.Any = None - isDisabled: bool = False - - def __post_init__(self): - if self.value is None: - self.value = self.label - - -def rich_selectbox( - label: str, - options: typing.Sequence[Option], - key: str | None = None, - help: str | None = None, - *, - disabled: bool = False, - label_visibility: LabelVisibility = "visible", - default_value: T | None = None, -) -> T | None: - if label_visibility != "visible": - label = None - options = list(options) - if not key: - key = md5_values("rich_select", label, options, help, label_visibility) - value = state.session_state.get(key) - if key not in state.session_state or value not in options: - value = default_value or options[0] - state.session_state.setdefault(key, value) - state.RenderTreeNode( - name="select", - props=dict( - name=key, - label=dedent(label), - help=help, - isDisabled=disabled, - defaultValue=value, - options=[asdict(option) for option in options], - ), - ).mount() - return value - - def button( label: str, key: str = None, @@ -608,6 +577,77 @@ def table(df: "pd.DataFrame"): ).mount() +def horizontal_radio( + label: str, + options: typing.Sequence[T], + format_func: typing.Callable[[T], typing.Any] = _default_format, + key: str = None, + help: str = None, + *, + disabled: bool = False, + label_visibility: LabelVisibility = "visible", +) -> T | None: + if not options: + return None + options = list(options) + if not key: + key = md5_values("horizontal_radio", label, options, help, label_visibility) + value = state.session_state.get(key) + if key not in state.session_state or value not in options: + value = options[0] + state.session_state.setdefault(key, value) + if label_visibility != "visible": + label = None + markdown(label) + for option in options: + if button( + format_func(option), + key=f"tab-{key}-{option}", + type="primary", + className="replicate-nav " + ("active" if value == option else ""), + disabled=disabled, + ): + state.session_state[key] = value = option + state.experimental_rerun() + return value + + +def horizontal_radio( + label: str, + options: typing.Sequence[T], + format_func: typing.Callable[[T], typing.Any] = _default_format, + key: str = None, + help: str = None, + *, + disabled: bool = False, + checked_by_default: bool = True, + label_visibility: LabelVisibility = "visible", +) -> T | None: + if not options: + return None + options = list(options) + if not key: + key = md5_values("horizontal_radio", label, options, help, label_visibility) + value = state.session_state.get(key) + if (key not in state.session_state or value not in options) and checked_by_default: + value = options[0] + state.session_state.setdefault(key, value) + if label_visibility != "visible": + label = None + markdown(label) + for option in options: + if button( + format_func(option), + key=f"tab-{key}-{option}", + type="primary", + className="replicate-nav " + ("active" if value == option else ""), + disabled=disabled, + ): + state.session_state[key] = value = option + state.experimental_rerun() + return value + + def radio( label: str, options: typing.Sequence[T], @@ -827,7 +867,7 @@ def breadcrumbs(divider: str = "/", **props) -> state.NestingCtx: def breadcrumb_item(inner_html: str, link_to: str | None = None, **props): - className = "breadcrumb-item lead " + props.pop("className", "") + className = "breadcrumb-item " + props.pop("className", "") with tag("li", className=className, **props): if link_to: with tag("a", href=link_to): From f95a5ae485ae4992c8ef87ed413a8ac86f3842bc Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:13:35 +0530 Subject: [PATCH 082/138] Add page_image to StateKeys --- daras_ai_v2/base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index b0a3ffa5c..9c39ab568 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -84,6 +84,8 @@ class StateKeys: + page_image = "__image" + created_at = "created_at" updated_at = "updated_at" From 627346a090ac6a0c68997bbc9cb9bb3765291414 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:29:08 +0530 Subject: [PATCH 083/138] Fix button spacing --- daras_ai_v2/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 9c39ab568..eb9007bb4 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -295,7 +295,7 @@ def _render_social_buttons(self, show_button_text: bool = False): f'{button_text}', value=self._get_current_app_url(), type="secondary", - className="mb-0", + className="mb-0 ms-lg-2", ) def _render_published_run_buttons( @@ -345,7 +345,7 @@ def _render_published_run_buttons( save_text = "Update" if is_update_mode else "Save" save_button = st.button( f'{save_icon} {save_text}', - className="mb-0 px-lg-4", + className="mb-0 ms-lg-2 px-lg-4", type="primary", ) publish_modal = Modal("", key="publish-modal") From 415fda946ac89080d5c8975666894dc8fa27045e Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:29:26 +0530 Subject: [PATCH 084/138] Add default value for published_run_notes to carry over notes between versions --- daras_ai_v2/base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index eb9007bb4..c641032f6 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -417,6 +417,10 @@ def _render_publish_modal( key="published_run_title", value=default_title, ) + st.session_state.setdefault( + "published_run_notes", + published_run and published_run.notes or "", + ) published_run_notes = st.text_area( "Notes", key="published_run_notes", From 9a07c95093bdf10a9e1df46e4f8946ec5e2e9c93 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:10:46 +0530 Subject: [PATCH 085/138] Reorder migrations after merge with conflicting migrations from master --- ... 0049_publishedrun_publishedrunversion.py} | 101 +----------------- 1 file changed, 2 insertions(+), 99 deletions(-) rename bots/migrations/{0047_publishedrun_publishedrunversion.py => 0049_publishedrun_publishedrunversion.py} (65%) diff --git a/bots/migrations/0047_publishedrun_publishedrunversion.py b/bots/migrations/0049_publishedrun_publishedrunversion.py similarity index 65% rename from bots/migrations/0047_publishedrun_publishedrunversion.py rename to bots/migrations/0049_publishedrun_publishedrunversion.py index 4fda8ec7e..73b9484bc 100644 --- a/bots/migrations/0047_publishedrun_publishedrunversion.py +++ b/bots/migrations/0049_publishedrun_publishedrunversion.py @@ -1,106 +1,13 @@ -# Generated by Django 4.2.7 on 2023-12-05 11:04 +# Generated by Django 4.2.7 on 2023-12-05 13:39 from django.db import migrations, models import django.db.models.deletion -from bots.models import PublishedRunVisibility -from daras_ai_v2.crypto import get_random_doc_id - - -def set_field_attribute(instance, field_name, **attrs): - for field in instance._meta.local_fields: - if field.name == field_name: - for attr, value in attrs.items(): - setattr(field, attr, value) - - -def create_published_run_from_example( - *, - published_run_model, - published_run_version_model, - saved_run, - user, - published_run_id, -): - published_run = published_run_model( - workflow=saved_run.workflow, - published_run_id=published_run_id, - created_by=user, - last_edited_by=user, - saved_run=saved_run, - title=saved_run.page_title, - notes=saved_run.page_notes, - visibility=PublishedRunVisibility.PUBLIC, - is_approved_example=True, - ) - set_field_attribute(published_run, "created_at", auto_now_add=False) - set_field_attribute(published_run, "updated_at", auto_now=False) - published_run.created_at = saved_run.created_at - published_run.updated_at = saved_run.updated_at - published_run.save() - set_field_attribute(published_run, "created_at", auto_now_add=True) - set_field_attribute(published_run, "updated_at", auto_now=True) - - version = published_run_version_model( - published_run=published_run, - version_id=get_random_doc_id(), - saved_run=saved_run, - changed_by=user, - title=saved_run.page_title, - notes=saved_run.page_notes, - visibility=PublishedRunVisibility.PUBLIC, - ) - set_field_attribute(published_run, "created_at", auto_now_add=False) - version.created_at = saved_run.updated_at - version.save() - set_field_attribute(published_run, "created_at", auto_now_add=True) - - return published_run - - -def forwards_func(apps, schema_editor): - # if example_id is not null, create published run with - # is_approved_example to True and visibility to Public - saved_run_model = apps.get_model("bots", "SavedRun") - published_run_model = apps.get_model("bots", "PublishedRun") - published_run_version_model = apps.get_model("bots", "PublishedRunVersion") - db_alias = schema_editor.connection.alias - - # all examples - for saved_run in saved_run_model.objects.using(db_alias).filter( - example_id__isnull=False, - ): - create_published_run_from_example( - published_run_model=published_run_model, - published_run_version_model=published_run_version_model, - saved_run=saved_run, - user=None, # TODO: use gooey-support user instead? - published_run_id=saved_run.example_id, - ) - - # recipe root examples - for saved_run in saved_run_model.objects.using(db_alias).filter( - example_id__isnull=True, - run_id__isnull=True, - uid__isnull=True, - ): - create_published_run_from_example( - published_run_model=published_run_model, - published_run_version_model=published_run_version_model, - saved_run=saved_run, - user=None, - published_run_id="", - ) - - -def backwards_func(apps, schema_editor): - pass - class Migration(migrations.Migration): dependencies = [ ("app_users", "0010_alter_appuser_balance_alter_appuser_created_at_and_more"), - ("bots", "0046_savedrun_bots_savedr_created_cb8e09_idx_and_more"), + ("bots", "0048_alter_messageattachment_url"), ] operations = [ @@ -258,8 +165,4 @@ class Migration(migrations.Migration): ], }, ), - migrations.RunPython( - forwards_func, - backwards_func, - ), ] From 2101c6450fe2408b6516da6c1265dc146d0e938b Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:11:39 +0530 Subject: [PATCH 086/138] Add published run to bot integrations --- bots/admin.py | 37 ++++++++++++++++++- .../0050_botintegration_published_run.py | 26 +++++++++++++ bots/models.py | 26 ++++++++++++- daras_ai_v2/bots.py | 10 ++++- 4 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 bots/migrations/0050_botintegration_published_run.py diff --git a/bots/admin.py b/bots/admin.py index fda4a9321..be085a044 100644 --- a/bots/admin.py +++ b/bots/admin.py @@ -16,6 +16,7 @@ FeedbackComment, CHATML_ROLE_ASSISSTANT, SavedRun, + PublishedRun, Message, Platform, Feedback, @@ -94,13 +95,14 @@ class BotIntegrationAdmin(admin.ModelAdmin): "updated_at", "billing_account_uid", "saved_run", + "published_run", "analysis_run", ] list_filter = ["platform"] form = BotIntegrationAdminForm - autocomplete_fields = ["saved_run", "analysis_run"] + autocomplete_fields = ["saved_run", "published_run", "analysis_run"] readonly_fields = [ "fb_page_access_token", @@ -120,6 +122,7 @@ class BotIntegrationAdmin(admin.ModelAdmin): "fields": [ "name", "saved_run", + "published_run", "billing_account_uid", "user_language", ], @@ -206,6 +209,38 @@ def view_analysis_results(self, bi: BotIntegration): return html +@admin.register(PublishedRun) +class PublishedRunAdmin(admin.ModelAdmin): + list_display = [ + "__str__", + "published_run_id", + "view_user", + "view_saved_run", + "created_at", + "updated_at", + ] + list_filter = ["workflow"] + search_fields = ["workflow", "published_run_id"] + + readonly_fields = [ + "open_in_gooey", + "created_at", + "updated_at", + ] + + def view_user(self, published_run: PublishedRun): + if published_run.created_by is None: + return None + return change_obj_url(published_run.created_by) + + view_user.short_description = "View User" + + def view_saved_run(self, published_run: PublishedRun): + return change_obj_url(published_run.saved_run) + + view_saved_run.short_description = "View Saved Run" + + @admin.register(SavedRun) class SavedRunAdmin(admin.ModelAdmin): list_display = [ diff --git a/bots/migrations/0050_botintegration_published_run.py b/bots/migrations/0050_botintegration_published_run.py new file mode 100644 index 000000000..2b8e2035d --- /dev/null +++ b/bots/migrations/0050_botintegration_published_run.py @@ -0,0 +1,26 @@ +# Generated by Django 4.2.7 on 2023-12-05 13:39 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + dependencies = [ + ("bots", "0049_publishedrun_publishedrunversion"), + ] + + operations = [ + migrations.AddField( + model_name="botintegration", + name="published_run", + field=models.ForeignKey( + blank=True, + default=None, + help_text="The saved run that the bot is based on", + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="botintegrations", + to="bots.publishedrun", + ), + ), + ] diff --git a/bots/models.py b/bots/models.py index 2cb80287b..2453fbff1 100644 --- a/bots/models.py +++ b/bots/models.py @@ -299,7 +299,7 @@ class BotIntegrationQuerySet(models.QuerySet): @transaction.atomic() def reset_fb_pages_for_user( self, uid: str, fb_pages: list[dict] - ) -> list["BotIntegration"]: + ) -> list[BotIntegration]: saved = [] for fb_page in fb_pages: fb_page_id = fb_page["id"] @@ -354,6 +354,15 @@ class BotIntegration(models.Model): blank=True, help_text="The saved run that the bot is based on", ) + published_run = models.ForeignKey( + "bots.PublishedRun", + on_delete=models.SET_NULL, + related_name="botintegrations", + null=True, + default=None, + blank=True, + help_text="The saved run that the bot is based on", + ) billing_account_uid = models.TextField( help_text="The gooey account uid where the credits will be deducted from", db_index=True, @@ -494,6 +503,14 @@ def __str__(self): else: return self.name or platform_name + def get_active_saved_run(self) -> SavedRun | None: + if self.published_run: + return self.published_run.saved_run + elif self.saved_run: + return self.saved_run + else: + return None + def get_display_name(self): return ( (self.wa_phone_number and self.wa_phone_number.as_international) @@ -1001,6 +1018,13 @@ class Meta: ["workflow", "published_run_id"], ] + def __str__(self): + return self.get_app_url() + + @admin.display(description="Open in Gooey") + def open_in_gooey(self): + return open_in_new_tab(self.get_app_url(), label=self.get_app_url()) + @classmethod def create_published_run( cls, diff --git a/daras_ai_v2/bots.py b/daras_ai_v2/bots.py index 3d19d99b3..d5c5c8eff 100644 --- a/daras_ai_v2/bots.py +++ b/daras_ai_v2/bots.py @@ -80,7 +80,15 @@ def nice_filename(self, mime_type: str) -> str: def _unpack_bot_integration(self): bi = self.convo.bot_integration - if bi.saved_run: + if bi.published_run: + self.page_cls = Workflow(bi.published_run.workflow).page_cls + self.query_params = self.page_cls.clean_query_params( + example_id=bi.published_run.example_id, + ) + saved_run = bi.published_run.saved_run + self.input_glossary = saved_run.state.get("input_glossary_document") + self.output_glossary = saved_run.state.get("output_glossary_document") + elif bi.saved_run: self.page_cls = Workflow(bi.saved_run.workflow).page_cls self.query_params = self.page_cls.clean_query_params( example_id=bi.saved_run.example_id, From 7486caff5d5795ec4159f3479dbc6bb73faf9eff Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:31:35 +0530 Subject: [PATCH 087/138] Fix migrations for published runs --- .../0049_publishedrun_publishedrunversion.py | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/bots/migrations/0049_publishedrun_publishedrunversion.py b/bots/migrations/0049_publishedrun_publishedrunversion.py index 73b9484bc..0eec76b80 100644 --- a/bots/migrations/0049_publishedrun_publishedrunversion.py +++ b/bots/migrations/0049_publishedrun_publishedrunversion.py @@ -3,6 +3,99 @@ from django.db import migrations, models import django.db.models.deletion +from bots.models import PublishedRunVisibility +from daras_ai_v2.crypto import get_random_doc_id + + +def set_field_attribute(instance, field_name, **attrs): + for field in instance._meta.local_fields: + if field.name == field_name: + for attr, value in attrs.items(): + setattr(field, attr, value) + + +def create_published_run_from_example( + *, + published_run_model, + published_run_version_model, + saved_run, + user, + published_run_id, +): + published_run = published_run_model( + workflow=saved_run.workflow, + published_run_id=published_run_id, + created_by=user, + last_edited_by=user, + saved_run=saved_run, + title=saved_run.page_title, + notes=saved_run.page_notes, + visibility=PublishedRunVisibility.PUBLIC, + is_approved_example=True, + ) + set_field_attribute(published_run, "created_at", auto_now_add=False) + set_field_attribute(published_run, "updated_at", auto_now=False) + published_run.created_at = saved_run.created_at + published_run.updated_at = saved_run.updated_at + published_run.save() + set_field_attribute(published_run, "created_at", auto_now_add=True) + set_field_attribute(published_run, "updated_at", auto_now=True) + + version = published_run_version_model( + published_run=published_run, + version_id=get_random_doc_id(), + saved_run=saved_run, + changed_by=user, + title=saved_run.page_title, + notes=saved_run.page_notes, + visibility=PublishedRunVisibility.PUBLIC, + ) + set_field_attribute(published_run, "created_at", auto_now_add=False) + version.created_at = saved_run.updated_at + version.save() + set_field_attribute(published_run, "created_at", auto_now_add=True) + + return published_run + + +def forwards_func(apps, schema_editor): + # if example_id is not null, create published run with + # is_approved_example to True and visibility to Public + saved_run_model = apps.get_model("bots", "SavedRun") + published_run_model = apps.get_model("bots", "PublishedRun") + published_run_version_model = apps.get_model("bots", "PublishedRunVersion") + db_alias = schema_editor.connection.alias + + # all examples + for saved_run in saved_run_model.objects.using(db_alias).filter( + example_id__isnull=False, + ): + create_published_run_from_example( + published_run_model=published_run_model, + published_run_version_model=published_run_version_model, + saved_run=saved_run, + user=None, # TODO: use gooey-support user instead? + published_run_id=saved_run.example_id, + ) + + # recipe root examples + for saved_run in saved_run_model.objects.using(db_alias).filter( + example_id__isnull=True, + run_id__isnull=True, + uid__isnull=True, + ): + create_published_run_from_example( + published_run_model=published_run_model, + published_run_version_model=published_run_version_model, + saved_run=saved_run, + user=None, + published_run_id="", + ) + + +def backwards_func(apps, schema_editor): + pass + class Migration(migrations.Migration): dependencies = [ @@ -165,4 +258,8 @@ class Migration(migrations.Migration): ], }, ), + migrations.RunPython( + forwards_func, + backwards_func, + ), ] From 11d5ab58eff16ab1f0abff1a6b9d85ab6082824c Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:34:45 +0530 Subject: [PATCH 088/138] migrations: reject hidden examples as unapproved --- bots/migrations/0049_publishedrun_publishedrunversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bots/migrations/0049_publishedrun_publishedrunversion.py b/bots/migrations/0049_publishedrun_publishedrunversion.py index 0eec76b80..51c209825 100644 --- a/bots/migrations/0049_publishedrun_publishedrunversion.py +++ b/bots/migrations/0049_publishedrun_publishedrunversion.py @@ -31,7 +31,7 @@ def create_published_run_from_example( title=saved_run.page_title, notes=saved_run.page_notes, visibility=PublishedRunVisibility.PUBLIC, - is_approved_example=True, + is_approved_example=not saved_run.hidden, ) set_field_attribute(published_run, "created_at", auto_now_add=False) set_field_attribute(published_run, "updated_at", auto_now=False) From c70c39d651e266a56252835a1f4c5d6ecb29c940 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Wed, 6 Dec 2023 18:49:03 +0530 Subject: [PATCH 089/138] show run/error in history add json to pdf tool to copilot ability to send documents to whatsapp --- Dockerfile | 2 + bots/admin.py | 2 +- daras_ai_v2/base.py | 4 + daras_ai_v2/bots.py | 299 ++++++++++++++-------------- daras_ai_v2/enum_selector_widget.py | 22 +- daras_ai_v2/facebook_bots.py | 162 +++++++++------ daras_ai_v2/functions.py | 70 +++++++ daras_ai_v2/language_model.py | 21 +- daras_ai_v2/slack_bot.py | 19 +- poetry.lock | 167 +++++++++------- pyproject.toml | 1 + recipes/VideoBots.py | 48 ++++- templates/form_output.html | 19 ++ 13 files changed, 538 insertions(+), 298 deletions(-) create mode 100644 daras_ai_v2/functions.py create mode 100644 templates/form_output.html diff --git a/Dockerfile b/Dockerfile index 38576f406..77394f97d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,6 +42,8 @@ RUN pip install --no-cache-dir -U poetry pip && poetry install --no-cache --only # install nltk stopwords RUN poetry run python -c 'import nltk; nltk.download("stopwords")' +# install playwright +RUN playwright install # copy the code into the container COPY . . diff --git a/bots/admin.py b/bots/admin.py index fda4a9321..b3e02bff9 100644 --- a/bots/admin.py +++ b/bots/admin.py @@ -254,7 +254,7 @@ def view_bots(self, saved_run: SavedRun): @admin.display(description="Input") def preview_input(self, saved_run: SavedRun): - return truncate_text_words(BasePage.preview_input(saved_run.state), 100) + return truncate_text_words(BasePage.preview_input(saved_run.state) or "", 100) class LastActiveDeltaFilter(admin.SimpleListFilter): diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 1d1a36fc5..2b0e37f0a 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -1068,6 +1068,10 @@ def _render(sr: SavedRun): url=url, query_params=dict(run_id=sr.run_id, uid=uid), ) + if sr.run_status: + html_spinner(sr.run_status) + elif sr.error_msg: + st.error(sr.error_msg) grid_layout(3, run_history, _render) diff --git a/daras_ai_v2/bots.py b/daras_ai_v2/bots.py index 3d19d99b3..44d5c8531 100644 --- a/daras_ai_v2/bots.py +++ b/daras_ai_v2/bots.py @@ -10,7 +10,6 @@ from app_users.models import AppUser from bots.models import ( - BotIntegration, Platform, Message, Conversation, @@ -25,6 +24,42 @@ from daras_ai_v2.language_model import CHATML_ROLE_USER, CHATML_ROLE_ASSISTANT from daras_ai_v2.vector_search import doc_url_to_file_metadata from gooeysite.bg_db_conn import db_middleware +from recipes.VideoBots import VideoBotsPage, ReplyButton + + +PAGE_NOT_CONNECTED_ERROR = ( + "💔 Looks like you haven't connected this page to a gooey.ai workflow. " + "Please go to the Integrations Tab and connect this page." +) +RESET_KEYWORD = "reset" +RESET_MSG = "♻️ Sure! Let's start fresh. How can I help you?" + +DEFAULT_RESPONSE = ( + "🤔🤖 Well that was Unexpected! I seem to be lost. Could you please try again?." +) + +INVALID_INPUT_FORMAT = ( + "⚠️ Sorry! I don't understand {} messsages. Please try with text or audio." +) + +AUDIO_ASR_CONFIRMATION = """ +🎧 I heard: “{}” +Working on your answer… +""".strip() + +ERROR_MSG = """ +`{0!r}` + +⚠️ Sorry, I ran into an error while processing your request. Please try again, or type "Reset" to start over. +""".strip() + +FEEDBACK_THUMBS_UP_MSG = "🎉 What did you like about my response?" +FEEDBACK_THUMBS_DOWN_MSG = "🤔 What was the issue with the response? How could it be improved? Please send me an voice note or text me." +FEEDBACK_CONFIRMED_MSG = ( + "🙏 Thanks! Your feedback helps us make {bot_name} better. How else can I help you?" +) + +TAPPED_SKIP_MSG = "🌱 Alright. What else can I help you with?" async def request_json(request: Request): @@ -51,13 +86,36 @@ class BotInterface: input_glossary: str | None = None output_glossary: str | None = None + def send_msg_or_default( + self, + *, + text: str | None = None, + audio: str = None, + video: str = None, + buttons: list[ReplyButton] = None, + documents: list[str] = None, + should_translate: bool = False, + default: str = DEFAULT_RESPONSE, + ): + if not (text or audio or video or documents): + text = default + return self.send_msg( + text=text, + audio=audio, + video=video, + buttons=buttons, + documents=documents, + should_translate=should_translate, + ) + def send_msg( self, *, text: str | None = None, audio: str = None, video: str = None, - buttons: list = None, + buttons: list[ReplyButton] = None, + documents: list[str] = None, should_translate: bool = False, ) -> str | None: raise NotImplementedError @@ -101,41 +159,6 @@ def get_interactive_msg_info(self) -> tuple[str, str]: raise NotImplementedError("This bot does not support interactive messages.") -PAGE_NOT_CONNECTED_ERROR = ( - "💔 Looks like you haven't connected this page to a gooey.ai workflow. " - "Please go to the Integrations Tab and connect this page." -) -RESET_KEYWORD = "reset" -RESET_MSG = "♻️ Sure! Let's start fresh. How can I help you?" - -DEFAULT_RESPONSE = ( - "🤔🤖 Well that was Unexpected! I seem to be lost. Could you please try again?." -) - -INVALID_INPUT_FORMAT = ( - "⚠️ Sorry! I don't understand {} messsages. Please try with text or audio." -) - -AUDIO_ASR_CONFIRMATION = """ -🎧 I heard: “{}” -Working on your answer… -""".strip() - -ERROR_MSG = """ -`{0!r}` - -⚠️ Sorry, I ran into an error while processing your request. Please try again, or type "Reset" to start over. -""".strip() - -FEEDBACK_THUMBS_UP_MSG = "🎉 What did you like about my response?" -FEEDBACK_THUMBS_DOWN_MSG = "🤔 What was the issue with the response? How could it be improved? Please send me an voice note or text me." -FEEDBACK_CONFIRMED_MSG = ( - "🙏 Thanks! Your feedback helps us make {bot_name} better. How else can I help you?" -) - -TAPPED_SKIP_MSG = "🌱 Alright. What else can I help you with?" - - def _echo(bot, input_text): response_text = f"You said ```{input_text}```\nhttps://www.youtube.com/" if bot.get_input_audio(): @@ -283,13 +306,7 @@ def _process_and_send_msg( # bot, input_text # ) # make API call to gooey bots to get the response - ( - response_text, - response_audio, - response_video, - user_msg, - assistant_msg, - ) = _process_msg( + response, url = _process_msg( page_cls=bot.page_cls, api_user=billing_account_user, query_params=bot.query_params, @@ -305,38 +322,110 @@ def _process_and_send_msg( # send error msg as repsonse bot.send_msg(text=ERROR_MSG.format(e)) return - # this really shouldn't happen, but just in case it does, we should have a nice message - response_text = response_text or DEFAULT_RESPONSE + # send the response to the user - msg_id = bot.send_msg( - text=response_text, - audio=response_audio, - video=response_video, + msg_id = bot.send_msg_or_default( + text=response.output_text and response.output_text[0], + audio=response.output_audio and response.output_audio[0], + video=response.output_video and response.output_video[0], + documents=response.output_documents or [], buttons=_feedback_start_buttons() if bot.show_feedback_buttons else None, ) - if not (user_msg and assistant_msg): - return - # save the message id for the received message - if bot.recieved_msg_id: - user_msg.platform_msg_id = bot.recieved_msg_id - # save the message id for the sent message - if msg_id: - assistant_msg.platform_msg_id = msg_id - - # get the attachments + + # save msgs to db + _save_msgs( + bot=bot, + input_images=input_images, + input_text=input_text, + speech_run=speech_run, + platform_msg_id=msg_id, + response=response, + url=url, + ) + + +def _save_msgs( + bot: BotInterface, + input_images: list[str] | None, + input_text: str, + speech_run: str | None, + platform_msg_id: str | None, + response: VideoBotsPage.ResponseModel, + url: str, +): + # create messages for future context + user_msg = Message( + platform_msg_id=bot.recieved_msg_id, + conversation=bot.convo, + role=CHATML_ROLE_USER, + content=response.raw_input_text, + display_content=input_text, + saved_run=SavedRun.objects.get_or_create( + workflow=Workflow.ASR, **furl(speech_run).query.params + )[0] + if speech_run + else None, + ) attachments = [] for img in input_images or []: metadata = doc_url_to_file_metadata(img) attachments.append( MessageAttachment(message=user_msg, url=img, metadata=metadata) ) + assistant_msg = Message( + platform_msg_id=platform_msg_id, + conversation=bot.convo, + role=CHATML_ROLE_ASSISTANT, + content=response.raw_output_text and response.raw_output_text[0], + display_content=response.output_text and response.output_text[0], + saved_run=SavedRun.objects.get_or_create( + workflow=Workflow.VIDEO_BOTS, **furl(url).query.params + )[0], + ) # save the messages & attachments with transaction.atomic(): user_msg.save() - assistant_msg.save() for attachment in attachments: attachment.metadata.save() attachment.save() + assistant_msg.save() + + +def _process_msg( + *, + page_cls, + api_user: AppUser, + query_params: dict, + convo: Conversation, + input_images: list[str] | None, + input_text: str, + user_language: str, + speech_run: str | None, +) -> tuple[VideoBotsPage.ResponseModel, str]: + from routers.api import call_api + + # get latest messages for context (upto 100) + saved_msgs = convo.messages.all().as_llm_context() + + # # mock testing + # result = _mock_api_output(input_text) + + # call the api with provided input + result = call_api( + page_cls=page_cls, + user=api_user, + request_body={ + "input_prompt": input_text, + "input_images": input_images, + "messages": saved_msgs, + "user_language": user_language, + }, + query_params=query_params, + ) + # parse result + response = page_cls.ResponseModel.parse_obj(result["output"]) + url = result.get("url", "") + return response, url def _handle_interactive_msg(bot: BotInterface): @@ -438,98 +527,20 @@ class ButtonIds: feedback_thumbs_down = "FEEDBACK_THUMBS_DOWN" -def _feedback_post_click_buttons(): +def _feedback_post_click_buttons() -> list[ReplyButton]: """ Buttons to show after the user has clicked on a feedback button """ return [ - { - "type": "reply", - "reply": {"id": ButtonIds.action_skip, "title": "🔀 Skip"}, - }, + {"id": ButtonIds.action_skip, "title": "🔀 Skip"}, ] -def _feedback_start_buttons(): +def _feedback_start_buttons() -> list[ReplyButton]: """ Buttons to show for collecting feedback after the bot has sent a response """ return [ - { - "type": "reply", - "reply": {"id": ButtonIds.feedback_thumbs_up, "title": "👍🏾"}, - }, - { - "type": "reply", - "reply": {"id": ButtonIds.feedback_thumbs_down, "title": "👎🏽"}, - }, + {"id": ButtonIds.feedback_thumbs_up, "title": "👍🏾"}, + {"id": ButtonIds.feedback_thumbs_down, "title": "👎🏽"}, ] - - -def _process_msg( - *, - page_cls, - api_user: AppUser, - query_params: dict, - convo: Conversation, - input_images: list[str] | None, - input_text: str, - user_language: str, - speech_run: str | None, -) -> tuple[str, str | None, str | None, Message, Message]: - from routers.api import call_api - - # get latest messages for context (upto 100) - saved_msgs = convo.messages.all().as_llm_context() - - # # mock testing - # result = _mock_api_output(input_text) - - # call the api with provided input - result = call_api( - page_cls=page_cls, - user=api_user, - request_body={ - "input_prompt": input_text, - "input_images": input_images, - "messages": saved_msgs, - "user_language": user_language, - }, - query_params=query_params, - ) - - # extract response video/audio/text - try: - response_video = result["output"]["output_video"][0] - except (KeyError, IndexError): - response_video = None - try: - response_audio = result["output"]["output_audio"][0] - except (KeyError, IndexError): - response_audio = None - raw_input_text = result["output"]["raw_input_text"] - output_text = result["output"]["output_text"][0] - raw_output_text = result["output"]["raw_output_text"][0] - response_text = result["output"]["output_text"][0] - # save new messages for future context - user_msg = Message( - conversation=convo, - role=CHATML_ROLE_USER, - content=raw_input_text, - display_content=input_text, - saved_run=SavedRun.objects.get_or_create( - workflow=Workflow.ASR, **furl(speech_run).query.params - )[0] - if speech_run - else None, - ) - assistant_msg = Message( - conversation=convo, - role=CHATML_ROLE_ASSISTANT, - content=raw_output_text, - display_content=output_text, - saved_run=SavedRun.objects.get_or_create( - workflow=Workflow.VIDEO_BOTS, **furl(result.get("url", "")).query.params - )[0], - ) - return response_text, response_audio, response_video, user_msg, assistant_msg diff --git a/daras_ai_v2/enum_selector_widget.py b/daras_ai_v2/enum_selector_widget.py index 098287f0d..512736ef4 100644 --- a/daras_ai_v2/enum_selector_widget.py +++ b/daras_ai_v2/enum_selector_widget.py @@ -1,4 +1,5 @@ import enum +import typing from typing import TypeVar, Type import gooey_ui as st @@ -35,7 +36,7 @@ def render(e): if inner_key not in st.session_state: st.session_state[inner_key] = e.name in selected - st.checkbox(e.value, key=inner_key) + st.checkbox(_format_func(enum_cls)(e.name), key=inner_key) if st.session_state.get(inner_key): selected.add(e.name) @@ -49,7 +50,7 @@ def render(e): else: return st.multiselect( options=[e.name for e in enums], - format_func=lambda k: enum_cls[k].value, + format_func=_format_func(enum_cls), label=label, key=key, allow_none=allow_none, @@ -82,8 +83,19 @@ def enum_selector( return widget( **kwargs, options=options, - format_func=lambda k: getattr(enum_cls[k], "label", enum_cls[k].value) - if k - else "———", + format_func=_format_func(enum_cls), label=label, ) + + +def _format_func(enum_cls: E) -> typing.Callable[[str], str]: + def _format(k): + if not k: + return "———" + e = enum_cls[k] + try: + return e.label + except AttributeError: + return e.value + + return _format diff --git a/daras_ai_v2/facebook_bots.py b/daras_ai_v2/facebook_bots.py index 33bdd7872..1064e7097 100644 --- a/daras_ai_v2/facebook_bots.py +++ b/daras_ai_v2/facebook_bots.py @@ -1,11 +1,12 @@ import requests +from furl import furl from bots.models import BotIntegration, Platform, Conversation from daras_ai.image_input import upload_file_from_bytes, get_mimetype_from_response from daras_ai_v2 import settings from daras_ai_v2.asr import run_google_translate, audio_bytes_to_wav +from daras_ai_v2.bots import BotInterface, ReplyButton from daras_ai_v2.text_splitter import text_splitter -from daras_ai_v2.bots import BotInterface WA_MSG_MAX_SIZE = 1024 @@ -88,19 +89,22 @@ def send_msg( text: str = None, audio: str = None, video: str = None, - buttons: list = None, + buttons: list[ReplyButton] = None, + documents: list[str] = None, should_translate: bool = False, ) -> str | None: - if should_translate and self.language and self.language != "en": + if text and should_translate and self.language and self.language != "en": text = run_google_translate( [text], self.language, glossary_url=self.output_glossary )[0] + text = text or "\u200b" # handle empty text with zero-width space return send_wa_msg( bot_number=self.bot_id, user_number=self.user_id, - response_text=text, - response_audio=audio, - response_video=video, + text=text, + audio=audio, + video=video, + documents=documents, buttons=buttons, ) @@ -112,18 +116,19 @@ def send_wa_msg( *, bot_number: str, user_number: str, - response_text: str, - response_audio: str = None, - response_video: str = None, - buttons: list = None, + text: str, + audio: str = None, + video: str = None, + documents: list[str] = None, + buttons: list[ReplyButton] = None, ) -> str | None: + # see https://developers.facebook.com/docs/whatsapp/api/messages/media/ + # split text into chunks if too long - if len(response_text) > WA_MSG_MAX_SIZE: - splits = text_splitter( - response_text, chunk_size=WA_MSG_MAX_SIZE, length_function=len - ) + if len(text) > WA_MSG_MAX_SIZE: + splits = text_splitter(text, chunk_size=WA_MSG_MAX_SIZE, length_function=len) # preserve last chunk for later - response_text = splits[-1].text + text = splits[-1].text # send all but last chunk send_wa_msgs_raw( bot_number=bot_number, @@ -141,19 +146,22 @@ def send_wa_msg( ], ) - if response_video: + if video: if buttons: messages = [ # interactive text msg + video in header - { - "body": { - "text": response_text, + _build_msg_buttons( + buttons, + { + "body": { + "text": text, + }, + "header": { + "type": "video", + "video": {"link": video}, + }, }, - "header": { - "type": "video", - "video": {"link": response_video}, - }, - }, + ), ] else: messages = [ @@ -161,19 +169,19 @@ def send_wa_msg( { "type": "video", "video": { - "link": response_video, - "caption": response_text, + "link": video, + "caption": text, }, }, ] - elif response_audio: + elif audio: if buttons: # audio can't be sent as an interaction, so send text and audio separately messages = [ # simple audio msg { "type": "audio", - "audio": {"link": response_audio}, + "audio": {"link": audio}, }, ] send_wa_msgs_raw( @@ -183,11 +191,14 @@ def send_wa_msg( ) messages = [ # interactive text msg - { - "body": { - "text": response_text, + _build_msg_buttons( + buttons, + { + "body": { + "text": text, + }, }, - }, + ) ] else: # audio doesn't support captions, so send text and audio separately @@ -196,14 +207,14 @@ def send_wa_msg( { "type": "text", "text": { - "body": response_text, + "body": text, "preview_url": True, }, }, # simple audio msg { "type": "audio", - "audio": {"link": response_audio}, + "audio": {"link": audio}, }, ] else: @@ -211,11 +222,14 @@ def send_wa_msg( if buttons: messages = [ # interactive text msg - { - "body": { - "text": response_text, - } - }, + _build_msg_buttons( + buttons, + { + "body": { + "text": text, + } + }, + ), ] else: messages = [ @@ -223,16 +237,29 @@ def send_wa_msg( { "type": "text", "text": { - "body": response_text, + "body": text, "preview_url": True, }, }, ] + + if documents: + messages += [ + # simple document msg + { + "type": "document", + "document": { + "link": link, + "filename": furl(link).path.segments[-1], + }, + } + for link in documents + ] + return send_wa_msgs_raw( bot_number=bot_number, user_number=user_number, messages=messages, - buttons=buttons, ) @@ -255,31 +282,38 @@ def retrieve_wa_media_by_id(media_id: str) -> (bytes, str): return content, media_info["mime_type"] -def send_wa_msgs_raw( - *, bot_number, user_number, messages: list, buttons: list = None -) -> str | None: +def _build_msg_buttons(buttons: list[ReplyButton], msg: dict) -> dict: + return { + "type": "interactive", + "interactive": { + "type": "button", + **msg, + "action": { + "buttons": [ + { + "type": "reply", + "reply": {"id": button["id"], "title": button["title"]}, + } + for button in buttons + ], + }, + }, + } + + +def send_wa_msgs_raw(*, bot_number, user_number, messages: list) -> str | None: msg_id = None for msg in messages: - body = { - "messaging_product": "whatsapp", - "to": user_number, - "preview_url": True, - } - if buttons: - body |= { - "type": "interactive", - "interactive": { - "type": "button", - **msg, - "action": {"buttons": buttons}, - }, - } - else: - body |= msg + print(f"send_wa_msgs_raw: {msg=}") r = requests.post( f"https://graph.facebook.com/v16.0/{bot_number}/messages", headers=WHATSAPP_AUTH_HEADER, - json=body, + json={ + "messaging_product": "whatsapp", + "to": user_number, + "preview_url": True, + **msg, + }, ) confirmation = r.json() print("send_wa_msgs_raw:", r.status_code, confirmation) @@ -347,13 +381,15 @@ def send_msg( text: str = None, audio: str = None, video: str = None, - buttons: list = None, + buttons: list[ReplyButton] = None, + documents: list[str] = None, should_translate: bool = False, ) -> str | None: - if should_translate and self.language and self.language != "en": + if text and should_translate and self.language and self.language != "en": text = run_google_translate( [text], self.language, glossary_url=self.output_glossary )[0] + text = text or "\u200b" # handle empty text with zero-width space return send_fb_msg( access_token=self._access_token, bot_id=self.bot_id, diff --git a/daras_ai_v2/functions.py b/daras_ai_v2/functions.py new file mode 100644 index 000000000..2c6c03348 --- /dev/null +++ b/daras_ai_v2/functions.py @@ -0,0 +1,70 @@ +import json +import tempfile +import typing +from enum import Enum + +from daras_ai.image_input import upload_file_from_bytes +from daras_ai_v2.settings import templates + + +def json_to_pdf(filename: str, data: str) -> str: + html = templates.get_template("form_output.html").render(data=json.loads(data)) + pdf_bytes = html_to_pdf(html) + if not filename.endswith(".pdf"): + filename += ".pdf" + return upload_file_from_bytes(filename, pdf_bytes, "application/pdf") + + +def html_to_pdf(html: str) -> bytes: + from playwright.sync_api import sync_playwright + + with sync_playwright() as p: + browser = p.chromium.launch() + page = browser.new_page() + page.set_content(html) + with tempfile.NamedTemporaryFile(suffix=".pdf") as outfile: + page.pdf(path=outfile.name, format="A4") + ret = outfile.read() + browser.close() + + return ret + + +class LLMTools(Enum): + json_to_pdf = ( + json_to_pdf, + "Save JSON as PDF", + { + "type": "function", + "function": { + "name": json_to_pdf.__name__, + "description": "Save JSON data to PDF", + "parameters": { + "type": "object", + "properties": { + "filename": { + "type": "string", + "description": "A short but descriptive filename for the PDF", + }, + "data": { + "type": "string", + "description": "The JSON data to write to the PDF", + }, + }, + "required": ["filename", "data"], + }, + }, + }, + ) + # send_reply_buttons = (print, "Send back reply buttons to the user.", {}) + + def __new__(cls, fn: typing.Callable, label: str, spec: dict): + obj = object.__new__(cls) + obj._value_ = fn.__name__ + obj.fn = fn + obj.label = label + obj.spec = spec + return obj + + # def __init__(self, *args, **kwargs): + # self._value_ = self.name diff --git a/daras_ai_v2/language_model.py b/daras_ai_v2/language_model.py index 8a4614449..38319a757 100644 --- a/daras_ai_v2/language_model.py +++ b/daras_ai_v2/language_model.py @@ -21,6 +21,7 @@ from daras_ai_v2.asr import get_google_auth_session from daras_ai_v2.functional import map_parallel +from daras_ai_v2.functions import LLMTools from daras_ai_v2.redis_cache import ( get_redis_cache, ) @@ -321,7 +322,8 @@ def run_language_model( temperature: float = 0.7, # Default value version 1.0 stop: list[str] | None = None, avoid_repetition: bool = False, -) -> list[str]: + tools: list[LLMTools] | None = None, +) -> list[str] | tuple[list[str], list[list[dict]]]: assert bool(prompt) != bool( messages ), "Pleave provide exactly one of { prompt, messages }" @@ -351,15 +353,22 @@ def run_language_model( temperature=temperature, stop=stop, avoid_repetition=avoid_repetition, + tools=tools, ) - return [ + output_text = [ # return messages back as either chatml or json messages format_chatml_message(entry) if is_chatml else (entry.get("content") or "").strip() for entry in result ] + if tools: + return output_text, [(entry.get("tool_calls") or []) for entry in result] + else: + return output_text else: + if tools: + raise ValueError("Only OpenAI chat models support Tools") logger.info(f"{model_name=}, {len(prompt)=}, {max_tokens=}, {temperature=}") result = _run_text_model( api=api, @@ -421,6 +430,7 @@ def _run_chat_model( model: str | tuple, stop: list[str] | None, avoid_repetition: bool, + tools: list[LLMTools] | None = None, ) -> list[ConversationEntry]: match api: case LLMApis.openai: @@ -432,8 +442,11 @@ def _run_chat_model( num_outputs=num_outputs, stop=stop, temperature=temperature, + tools=tools, ) case LLMApis.vertex_ai: + if tools: + raise ValueError("Only OpenAI chat models support Tools") return _run_palm_chat( model_id=model, messages=messages, @@ -442,6 +455,8 @@ def _run_chat_model( temperature=temperature, ) case LLMApis.together: + if tools: + raise ValueError("Only OpenAI chat models support Tools") return _run_together_chat( model=model, messages=messages, @@ -464,6 +479,7 @@ def _run_openai_chat( temperature: float, stop: list[str] | None, avoid_repetition: bool, + tools: list[LLMTools] | None = None, ) -> list[ConversationEntry]: from openai._types import NOT_GIVEN @@ -487,6 +503,7 @@ def _run_openai_chat( temperature=temperature, frequency_penalty=frequency_penalty, presence_penalty=presence_penalty, + tools=[tool.spec for tool in tools] if tools else NOT_GIVEN, ) for model_str in model ], diff --git a/daras_ai_v2/slack_bot.py b/daras_ai_v2/slack_bot.py index bf49ca874..82883879a 100644 --- a/daras_ai_v2/slack_bot.py +++ b/daras_ai_v2/slack_bot.py @@ -15,6 +15,7 @@ from daras_ai_v2.bots import BotInterface from daras_ai_v2.functional import fetch_parallel from daras_ai_v2.text_splitter import text_splitter +from recipes.VideoBots import ReplyButton SLACK_CONFIRMATION_MSG = """ Hi there! 👋 @@ -131,15 +132,15 @@ def send_msg( text: str | None = None, audio: str | None = None, video: str | None = None, - buttons: list | None = None, + buttons: list[ReplyButton] = None, + documents: list[str] = None, should_translate: bool = False, ) -> str | None: - if not text: - return None - if should_translate and self.language and self.language != "en": + if text and should_translate and self.language and self.language != "en": text = run_google_translate( [text], self.language, glossary_url=self.output_glossary )[0] + text = text or "\u200b" # handle empty text with zero-width space if self._read_rcpt_ts and self._read_rcpt_ts != self._msg_ts: delete_msg( @@ -498,7 +499,7 @@ def chat_post_message( audio: str | None = None, video: str | None = None, username: str = "Video Bot", - buttons: list | None = None, + buttons: list[ReplyButton] = None, ) -> str | None: if buttons is None: buttons = [] @@ -559,7 +560,7 @@ def create_file_block( ] -def create_button_block(buttons: list[dict]) -> list[dict]: +def create_button_block(buttons: list[ReplyButton]) -> list[dict]: if not buttons: return [] return [ @@ -568,9 +569,9 @@ def create_button_block(buttons: list[dict]) -> list[dict]: "elements": [ { "type": "button", - "text": {"type": "plain_text", "text": button["reply"]["title"]}, - "value": button["reply"]["id"], - "action_id": "button_" + button["reply"]["id"], + "text": {"type": "plain_text", "text": button["title"]}, + "value": button["id"], + "action_id": "button_" + button["id"], } for button in buttons ], diff --git a/poetry.lock b/poetry.lock index 88f4e863f..edbc668d1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "absl-py" @@ -1528,60 +1528,6 @@ grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio-status (>=1.33.2,<2.0dev)"] grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] -[[package]] -name = "google-api-core" -version = "2.11.0" -description = "Google API client core library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "google-api-core-2.11.0.tar.gz", hash = "sha256:4b9bb5d5a380a0befa0573b302651b8a9a89262c1730e37bf423cec511804c22"}, - {file = "google_api_core-2.11.0-py3-none-any.whl", hash = "sha256:ce222e27b0de0d7bc63eb043b956996d6dccab14cc3b690aaea91c9cc99dc16e"}, -] - -[package.dependencies] -google-auth = ">=2.14.1,<3.0dev" -googleapis-common-protos = ">=1.56.2,<2.0dev" -grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, - {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, -] -grpcio-status = {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""} -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" -requests = ">=2.18.0,<3.0.0dev" - -[package.extras] -grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0dev)", "grpcio-status (>=1.49.1,<2.0dev)"] -grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] -grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] - -[[package]] -name = "google-api-core" -version = "2.11.1" -description = "Google API client core library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "google-api-core-2.11.1.tar.gz", hash = "sha256:25d29e05a0058ed5f19c61c0a78b1b53adea4d9364b464d014fbda941f6d1c9a"}, - {file = "google_api_core-2.11.1-py3-none-any.whl", hash = "sha256:d92a5a92dc36dd4f4b9ee4e55528a90e432b059f93aee6ad857f9de8cc7ae94a"}, -] - -[package.dependencies] -google-auth = ">=2.14.1,<3.0.dev0" -googleapis-common-protos = ">=1.56.2,<2.0.dev0" -grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, - {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, -] -grpcio-status = {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "extra == \"grpc\""} -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" -requests = ">=2.18.0,<3.0.0.dev0" - -[package.extras] -grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] -grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] -grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] - [[package]] name = "google-api-core" version = "2.12.0" @@ -1596,11 +1542,8 @@ files = [ [package.dependencies] google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" -grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, - {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, -] -grpcio-status = {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "extra == \"grpc\""} +grpcio = {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""} +grpcio-status = {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""} protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" requests = ">=2.18.0,<3.0.0.dev0" @@ -1716,8 +1659,8 @@ files = [ google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} google-cloud-core = ">=1.4.1,<3.0.0dev" proto-plus = [ - {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" @@ -1735,8 +1678,8 @@ files = [ [package.dependencies] google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} proto-plus = [ - {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" @@ -1776,8 +1719,8 @@ files = [ [package.dependencies] google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} proto-plus = [ - {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" @@ -1796,8 +1739,8 @@ files = [ google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} google-cloud-core = ">=1.4.4,<3.0.0dev" proto-plus = [ - {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" @@ -3531,12 +3474,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.21.2", markers = "python_version >= \"3.10\""}, - {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, - {version = ">=1.19.3", markers = "python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\" or python_version >= \"3.9\""}, - {version = ">=1.17.0", markers = "python_version >= \"3.7\""}, - {version = ">=1.17.3", markers = "python_version >= \"3.8\""}, {version = ">=1.23.5", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, + {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, ] [[package]] @@ -3614,9 +3554,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -3820,6 +3760,26 @@ files = [ docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] +[[package]] +name = "playwright" +version = "1.40.0" +description = "A high-level API to automate web browsers" +optional = false +python-versions = ">=3.8" +files = [ + {file = "playwright-1.40.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:35b7e0b389df2aa632f3614d35be7bace35f6f634d880db44b035c83e4481312"}, + {file = "playwright-1.40.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:382a7465cc0ea3bf7fa66716bd37fd53f66af4bcc5c72283a8eff3f6e87758a8"}, + {file = "playwright-1.40.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:f11e1ec32f3b3dbd7f24d1481c313cb527001955004ee88a73f9b4a610d0db28"}, + {file = "playwright-1.40.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:6a842dca4dd53feda1d7bd0e14aa65140e4e816452ebddd307e90cad184d92bd"}, + {file = "playwright-1.40.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ec3746de69e7ba912b70e0fe3a3c6b8af97f21ece793c5db27c251da4d2f3e6"}, + {file = "playwright-1.40.0-py3-none-win32.whl", hash = "sha256:3ae90ea5ad776fe5e1300a9c730244c8e57a183c6eb261044418710d51ae03c0"}, + {file = "playwright-1.40.0-py3-none-win_amd64.whl", hash = "sha256:ba5a89953aedb158025e4581eafb6fdeebb3d58acd9ce24b59f691b1e2a861bc"}, +] + +[package.dependencies] +greenlet = "3.0.1" +pyee = "11.0.1" + [[package]] name = "plotly" version = "5.18.0" @@ -3963,6 +3923,7 @@ files = [ {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, @@ -3971,6 +3932,8 @@ files = [ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, @@ -4377,6 +4340,21 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-base-url" +version = "2.0.0" +description = "pytest plugin for URL based testing" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "pytest-base-url-2.0.0.tar.gz", hash = "sha256:e1e88a4fd221941572ccdcf3bf6c051392d2f8b6cef3e0bc7da95abec4b5346e"}, + {file = "pytest_base_url-2.0.0-py3-none-any.whl", hash = "sha256:ed36fd632c32af9f1c08f2c2835dcf42ca8fcd097d6ed44a09f253d365ad8297"}, +] + +[package.dependencies] +pytest = ">=3.0.0,<8.0.0" +requests = ">=2.9" + [[package]] name = "pytest-django" version = "4.6.0" @@ -4395,6 +4373,23 @@ pytest = ">=7.0.0" docs = ["sphinx", "sphinx-rtd-theme"] testing = ["Django", "django-configurations (>=2.0)"] +[[package]] +name = "pytest-playwright" +version = "0.4.3" +description = "A pytest wrapper with fixtures for Playwright to automate web browsers" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-playwright-0.4.3.tar.gz", hash = "sha256:699e2c47fbb1e6a57895454693eba278cf55d04d44c15017709b00e1de1d9ccd"}, + {file = "pytest_playwright-0.4.3-py3-none-any.whl", hash = "sha256:c9ff6e7ebfd967b562f5c3d67f1ae6b45a061d6ea51ad304fdd95aca9db20774"}, +] + +[package.dependencies] +playwright = ">=1.18" +pytest = ">=6.2.4,<8.0.0" +pytest-base-url = ">=1.0.0,<3.0.0" +python-slugify = ">=6.0.0,<9.0.0" + [[package]] name = "pytest-subtests" version = "0.11.0" @@ -4482,6 +4477,23 @@ files = [ [package.dependencies] six = ">=1.4.0" +[[package]] +name = "python-slugify" +version = "8.0.1" +description = "A Python slugify application that also handles Unicode" +optional = false +python-versions = ">=3.7" +files = [ + {file = "python-slugify-8.0.1.tar.gz", hash = "sha256:ce0d46ddb668b3be82f4ed5e503dbc33dd815d83e2eb6824211310d3fb172a27"}, + {file = "python_slugify-8.0.1-py2.py3-none-any.whl", hash = "sha256:70ca6ea68fe63ecc8fa4fcf00ae651fc8a5d02d93dcd12ae6d4fc7ca46c4d395"}, +] + +[package.dependencies] +text-unidecode = ">=1.3" + +[package.extras] +unidecode = ["Unidecode (>=1.1.1)"] + [[package]] name = "pytz" version = "2023.3.post1" @@ -5389,7 +5401,7 @@ files = [ ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\")"} +greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} [package.extras] aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] @@ -5545,6 +5557,17 @@ files = [ [package.extras] doc = ["reno", "sphinx", "tornado (>=4.5)"] +[[package]] +name = "text-unidecode" +version = "1.3" +description = "The most basic Text::Unidecode port" +optional = false +python-versions = "*" +files = [ + {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, + {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, +] + [[package]] name = "tiktoken" version = "0.3.3" @@ -6487,4 +6510,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "63cf40735aceae870d1bb4fb4101ccb5bc0d50d00541d97482dba9717f492663" +content-hash = "a5ef77e11ff5b9bb9a5ab5ec5a07c2d3ffa0ef3204d332653d3039e8aba04e14" diff --git a/pyproject.toml b/pyproject.toml index c5e61ec81..c6a699024 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,6 +82,7 @@ user-agents = "^2.2.0" openpyxl = "^3.1.2" loguru = "^0.7.2" aifail = "^0.1.0" +pytest-playwright = "^0.4.3" [tool.poetry.group.dev.dependencies] watchdog = "^2.1.9" diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index f57712dad..52c81b67d 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -1,3 +1,4 @@ +import json import os import os.path import re @@ -19,14 +20,15 @@ ) from daras_ai_v2.azure_doc_extract import ( azure_form_recognizer, - azure_form_recognizer_models, ) from daras_ai_v2.base import BasePage, MenuTabs, StateKeys from daras_ai_v2.doc_search_settings_widgets import ( doc_search_settings, document_uploader, ) +from daras_ai_v2.enum_selector_widget import enum_multiselect from daras_ai_v2.field_render import field_title_desc +from daras_ai_v2.functions import LLMTools from daras_ai_v2.glossary import glossary_input from daras_ai_v2.language_model import ( run_language_model, @@ -72,7 +74,7 @@ # start of line r"^" # name of bot / user - r"([\w\ \t]+)" + r"([\w\ \t]{3,30})" # colon r"\:\ ", flags=re.M, @@ -81,6 +83,19 @@ SAFETY_BUFFER = 100 +def exec_tool_call(call: dict): + tool_name = call["function"]["name"] + tool = LLMTools[tool_name] + yield f"🛠 {tool.label}..." + kwargs = json.loads(call["function"]["arguments"]) + return tool.fn(**kwargs) + + +class ReplyButton(typing.TypedDict): + id: str + title: str + + class VideoBotsPage(BasePage): title = "Copilot for your Enterprise" # "Create Interactive Video Bots" image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8c014530-88d4-11ee-aac9-02420a00016b/Copilot.png.png" @@ -205,6 +220,11 @@ class RequestModel(BaseModel): variables: dict[str, typing.Any] | None + tools: list[LLMTools] | None = Field( + title="🛠️ Tools", + description="Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).", + ) + class ResponseModel(BaseModel): final_prompt: str | list[ConversationEntry] @@ -226,6 +246,10 @@ class ResponseModel(BaseModel): final_search_query: str | None final_keyword_query: str | None + # function calls + output_documents: list[str] | None + reply_buttons: list[ReplyButton] | None + def preview_image(self, state: dict) -> str | None: return DEFAULT_COPILOT_META_IMG @@ -375,6 +399,13 @@ def render_settings(self): ) lipsync_settings() + st.write("---") + enum_multiselect( + enum_cls=LLMTools, + label="##### " + field_title_desc(self.RequestModel, "tools"), + key="tools", + ) + def fields_to_save(self) -> [str]: fields = super().fields_to_save() + ["landbot_url"] if "elevenlabs_api_key" in fields: @@ -714,6 +745,7 @@ def run(self, state: dict) -> typing.Iterator[str | None]: num_outputs=request.num_outputs, temperature=request.sampling_temperature, avoid_repetition=request.avoid_repetition, + tools=request.tools, ) else: prompt = "\n".join( @@ -729,6 +761,14 @@ def run(self, state: dict) -> typing.Iterator[str | None]: avoid_repetition=request.avoid_repetition, stop=[CHATML_END_TOKEN, CHATML_START_TOKEN], ) + if request.tools: + output_text, tool_call_choices = output_text + state["output_documents"] = output_documents = [] + for tool_calls in tool_call_choices: + for call in tool_calls: + result = yield from exec_tool_call(call) + output_documents.append(result) + # save model response state["raw_output_text"] = [ "".join(snippet for snippet, _ in parse_refs(text, references)) @@ -1071,6 +1111,10 @@ def chat_list_view(): st.audio(output_audio[idx]) except IndexError: pass + output_documents = st.session_state.get("output_documents", []) + if output_documents: + for doc in output_documents: + st.write(doc) messages = st.session_state.get("messages", []).copy() # add last input to history if present if show_raw_msgs: diff --git a/templates/form_output.html b/templates/form_output.html new file mode 100644 index 000000000..ecc5830bb --- /dev/null +++ b/templates/form_output.html @@ -0,0 +1,19 @@ + + + + + + + {% for k, v in data.items() %} + + + + + {% endfor %} + +
+ {{ k }} + + {{ v }} +
+ \ No newline at end of file From e4bb3f85a2280eeef1eb41b37ff00beb442389cc Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 7 Dec 2023 19:18:25 +0530 Subject: [PATCH 090/138] add fixture to get bot integrations --- README.md | 5 +++++ scripts/create_fixture.py | 30 +++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 6398e12ac..311cbcef7 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,11 @@ docker cp $cid:/app/fixture.json . echo $PWD/fixture.json ``` +```bash +# copy fixture.json from server to local +rsync -P -a @captain.us-1.gooey.ai:/home//fixture.json . +``` + ```bash # reset the database ./manage.py reset_db -c diff --git a/scripts/create_fixture.py b/scripts/create_fixture.py index 9482cef64..5f9bfd888 100644 --- a/scripts/create_fixture.py +++ b/scripts/create_fixture.py @@ -2,23 +2,39 @@ from django.core import serializers -from bots.models import SavedRun +from app_users.models import AppUser +from bots.models import SavedRun, BotIntegration def run(): - qs = SavedRun.objects.filter(run_id__isnull=True) with open("fixture.json", "w") as f: + objs = list(get_objects()) serializers.serialize( "json", - get_objects(qs), + objs, indent=2, stream=f, progress_output=sys.stdout, - object_count=qs.count(), + object_count=len(objs), ) -def get_objects(qs): - for obj in qs: - obj.parent = None +def get_objects(): + for obj in SavedRun.objects.filter(run_id__isnull=True): + set_fk_null(obj) yield obj + + for obj in BotIntegration.objects.all(): + if not obj.saved_run_id: + continue + set_fk_null(obj.saved_run) + yield obj.saved_run + + yield AppUser.objects.get(uid=obj.billing_account_uid) + yield obj + + +def set_fk_null(obj): + for field in obj._meta.get_fields(): + if field.is_relation and field.many_to_one: + setattr(obj, field.name, None) From 77da550b0090c24f019928355f4bf1c63f0df707 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 7 Dec 2023 19:37:47 +0530 Subject: [PATCH 091/138] fix: KeyError '__pixels' --- daras_ai_v2/img_model_settings_widgets.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/daras_ai_v2/img_model_settings_widgets.py b/daras_ai_v2/img_model_settings_widgets.py index d4291986e..e86c18873 100644 --- a/daras_ai_v2/img_model_settings_widgets.py +++ b/daras_ai_v2/img_model_settings_widgets.py @@ -314,9 +314,10 @@ def output_resolution_setting(): st.session_state.get("selected_model", st.session_state.get("selected_models")) or "" ) - allowed_shapes = RESOLUTIONS[st.session_state["__pixels"]].values() if not isinstance(selected_models, list): selected_models = [selected_models] + + allowed_shapes = None if "jack_qiao" in selected_models or "sd_1_4" in selected_models: pixel_options = [512] elif selected_models == ["deepfloyd_if"]: @@ -339,9 +340,9 @@ def output_resolution_setting(): ) with col2: res_options = [ - key - for key, val in RESOLUTIONS[pixels or pixel_options[0]].items() - if val in allowed_shapes + res + for res, shape in RESOLUTIONS[pixels or pixel_options[0]].items() + if not allowed_shapes or shape in allowed_shapes ] res = st.selectbox( "##### Resolution", From da2327592fd47a24c2162124db760773ff04f3e3 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 7 Dec 2023 20:59:16 +0530 Subject: [PATCH 092/138] remove bot script parsing, because people won't stop using colons in the bot script --- recipes/VideoBots.py | 109 +++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 56 deletions(-) diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index 52c81b67d..fef503416 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -1,7 +1,6 @@ import json import os import os.path -import re import typing from django.db.models import QuerySet @@ -70,15 +69,15 @@ DEFAULT_COPILOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/c8b24b0c-538a-11ee-a1a3-02420a00018d/meta%20tags1%201.png.png" -BOT_SCRIPT_RE = re.compile( - # start of line - r"^" - # name of bot / user - r"([\w\ \t]{3,30})" - # colon - r"\:\ ", - flags=re.M, -) +# BOT_SCRIPT_RE = re.compile( +# # start of line +# r"^" +# # name of bot / user +# r"([\w\ \t]{3,30})" +# # colon +# r"\:\ ", +# flags=re.M, +# ) SAFETY_BUFFER = 100 @@ -297,7 +296,7 @@ def render_form_v2(self): st.text_area( """ ##### 📝 Prompt - High-level system instructions to the copilot + optional example conversations between the bot and the user. + High-level system instructions. """, key="bot_script", height=300, @@ -604,7 +603,9 @@ def run(self, state: dict) -> typing.Iterator[str | None]: user_input = f"Image: {text!r}\n{user_input}" # parse the bot script - system_message, scripted_msgs = parse_script(bot_script) + # system_message, scripted_msgs = parse_script(bot_script) + system_message = bot_script.strip() + scripted_msgs = [] # consturct the system prompt if system_message: @@ -614,21 +615,20 @@ def run(self, state: dict) -> typing.Iterator[str | None]: else: system_prompt = None - # get user/bot display names - try: - bot_display_name = scripted_msgs[-1]["display_name"] - except IndexError: - bot_display_name = CHATML_ROLE_ASSISTANT - try: - user_display_name = scripted_msgs[-2]["display_name"] - except IndexError: - user_display_name = CHATML_ROLE_USER + # # get user/bot display names + # try: + # bot_display_name = scripted_msgs[-1]["display_name"] + # except IndexError: + # bot_display_name = CHATML_ROLE_ASSISTANT + # try: + # user_display_name = scripted_msgs[-2]["display_name"] + # except IndexError: + # user_display_name = CHATML_ROLE_USER # construct user prompt state["raw_input_text"] = user_input user_prompt = { "role": CHATML_ROLE_USER, - "display_name": user_display_name, "content": user_input, } @@ -718,7 +718,6 @@ def run(self, state: dict) -> typing.Iterator[str | None]: prompt_messages.append( { "role": CHATML_ROLE_ASSISTANT, - "display_name": bot_display_name, "content": "", } ) @@ -1046,36 +1045,36 @@ def show_landbot_widget(): ) -def parse_script(bot_script: str) -> (str, list[ConversationEntry]): - # run regex to find scripted messages in script text - script_matches = list(BOT_SCRIPT_RE.finditer(bot_script)) - # extract system message from script - system_message = bot_script - if script_matches: - system_message = system_message[: script_matches[0].start()] - system_message = system_message.strip() - # extract pre-scripted messages from script - scripted_msgs: list[ConversationEntry] = [] - for idx in range(len(script_matches)): - match = script_matches[idx] - try: - next_match = script_matches[idx + 1] - except IndexError: - next_match_start = None - else: - next_match_start = next_match.start() - if (len(script_matches) - idx) % 2 == 0: - role = CHATML_ROLE_USER - else: - role = CHATML_ROLE_ASSISTANT - scripted_msgs.append( - { - "role": role, - "display_name": match.group(1).strip(), - "content": bot_script[match.end() : next_match_start].strip(), - } - ) - return system_message, scripted_msgs +# def parse_script(bot_script: str) -> (str, list[ConversationEntry]): +# # run regex to find scripted messages in script text +# script_matches = list(BOT_SCRIPT_RE.finditer(bot_script)) +# # extract system message from script +# system_message = bot_script +# if script_matches: +# system_message = system_message[: script_matches[0].start()] +# system_message = system_message.strip() +# # extract pre-scripted messages from script +# scripted_msgs: list[ConversationEntry] = [] +# for idx in range(len(script_matches)): +# match = script_matches[idx] +# try: +# next_match = script_matches[idx + 1] +# except IndexError: +# next_match_start = None +# else: +# next_match_start = next_match.start() +# if (len(script_matches) - idx) % 2 == 0: +# role = CHATML_ROLE_USER +# else: +# role = CHATML_ROLE_ASSISTANT +# scripted_msgs.append( +# { +# "role": role, +# "display_name": match.group(1).strip(), +# "content": bot_script[match.end() : next_match_start].strip(), +# } +# ) +# return system_message, scripted_msgs def chat_list_view(): @@ -1131,12 +1130,10 @@ def chat_list_view(): # render history for entry in reversed(messages): with msg_container_widget(entry["role"]): - display_name = entry.get("display_name") or entry["role"] - display_name = display_name.capitalize() images = get_entry_images(entry) text = get_entry_text(entry) if text or images: - st.write(f"**{display_name}** \n{text}") + st.write(f"**{entry['role'].capitalize()}** \n{text}") if images: for im in images: st.image(im, style={"maxHeight": "200px"}) From 463022253dd32dd446fefddf8c3b9dc302dc6dd0 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 7 Dec 2023 21:51:13 +0530 Subject: [PATCH 093/138] fix translation issue --- daras_ai_v2/asr.py | 5 +++-- recipes/VideoBots.py | 28 +++++++++++++++++++--------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index 95979136d..65ed40ddc 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -236,19 +236,20 @@ def _translate_text( ) # prevent incorrect API calls - if source_language == target_language or not text: + if not text or source_language == target_language or source_language == "und": return text if source_language == "wo-SN" or target_language == "wo-SN": return _MinT_translate_one_text(text, source_language, target_language) config = { - "source_language_code": source_language, "target_language_code": target_language, "contents": text, "mime_type": "text/plain", "transliteration_config": {"enable_transliteration": enable_transliteration}, } + if source_language != "auto": + config["source_language_code"] = source_language # glossary does not work with transliteration if glossary_url and not enable_transliteration: diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index fef503416..be3d4d78e 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -592,15 +592,25 @@ def run(self, state: dict) -> typing.Iterator[str | None]: ocr_texts.append(ocr_text) # translate input text - yield f"Translating input to english..." - user_input, *ocr_texts = run_google_translate( - texts=[user_input, *ocr_texts], - target_language="en", - glossary_url=request.input_glossary_document, - ) - - for text in ocr_texts: - user_input = f"Image: {text!r}\n{user_input}" + if request.user_language and request.user_language != "en": + yield f"Translating Input to English..." + user_input = run_google_translate( + texts=[user_input], + source_language=request.user_language, + target_language="en", + glossary_url=request.input_glossary_document, + )[0] + + if ocr_texts: + yield f"Translating Images to English..." + ocr_texts = run_google_translate( + texts=ocr_texts, + source_language="auto", + target_language="en", + glossary_url=request.input_glossary_document, + ) + for text in ocr_texts: + user_input = f"Image: {text!r}\n{user_input}" # parse the bot script # system_message, scripted_msgs = parse_script(bot_script) From 805f3c24121ee5bab4be8319c94bde514b3a20c6 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 7 Dec 2023 22:21:26 +0530 Subject: [PATCH 094/138] playwright docker --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 77394f97d..cafd0b559 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,7 @@ RUN pip install --no-cache-dir -U poetry pip && poetry install --no-cache --only # install nltk stopwords RUN poetry run python -c 'import nltk; nltk.download("stopwords")' # install playwright -RUN playwright install +RUN poetry run playwright install # copy the code into the container COPY . . From aba52980093b60649e16687587eb5ef5404daccd Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 7 Dec 2023 22:32:38 +0530 Subject: [PATCH 095/138] playwright install-deps --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index cafd0b559..b3b015f4c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,7 @@ RUN pip install --no-cache-dir -U poetry pip && poetry install --no-cache --only # install nltk stopwords RUN poetry run python -c 'import nltk; nltk.download("stopwords")' # install playwright -RUN poetry run playwright install +RUN poetry run playwright install-deps && poetry run playwright install # copy the code into the container COPY . . From f0cd1fa0a2a762581c2760466468919d9723b2bc Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Fri, 8 Dec 2023 15:54:44 +0530 Subject: [PATCH 096/138] Fix bots usage with published runs --- daras_ai_v2/bots.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/daras_ai_v2/bots.py b/daras_ai_v2/bots.py index d5c5c8eff..c00232171 100644 --- a/daras_ai_v2/bots.py +++ b/daras_ai_v2/bots.py @@ -83,7 +83,9 @@ def _unpack_bot_integration(self): if bi.published_run: self.page_cls = Workflow(bi.published_run.workflow).page_cls self.query_params = self.page_cls.clean_query_params( - example_id=bi.published_run.example_id, + example_id=bi.published_run.published_run_id, + run_id="", + uid="", ) saved_run = bi.published_run.saved_run self.input_glossary = saved_run.state.get("input_glossary_document") From 2224e3b26b21426ac3e7129dded9d91a9446104a Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Fri, 8 Dec 2023 18:19:31 +0530 Subject: [PATCH 097/138] Add admin page for published run version --- bots/admin.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/bots/admin.py b/bots/admin.py index be085a044..e3a872644 100644 --- a/bots/admin.py +++ b/bots/admin.py @@ -17,6 +17,7 @@ CHATML_ROLE_ASSISSTANT, SavedRun, PublishedRun, + PublishedRunVersion, Message, Platform, Feedback, @@ -256,6 +257,7 @@ class SavedRunAdmin(admin.ModelAdmin): ] list_filter = ["workflow"] search_fields = ["workflow", "example_id", "run_id", "uid"] + autocomplete_fields = ["parent_version"] readonly_fields = [ "open_in_gooey", @@ -289,7 +291,12 @@ def view_bots(self, saved_run: SavedRun): @admin.display(description="Input") def preview_input(self, saved_run: SavedRun): - return truncate_text_words(BasePage.preview_input(saved_run.state), 100) + return truncate_text_words(BasePage.preview_input(saved_run.state) or "", 100) + + +@admin.register(PublishedRunVersion) +class PublishedRunVersionAdmin(admin.ModelAdmin): + search_fields = ["id", "version_id", "published_run__published_run_id"] class LastActiveDeltaFilter(admin.SimpleListFilter): From 39237f2105841c08f38df466720137b195e063af Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Fri, 8 Dec 2023 18:20:35 +0530 Subject: [PATCH 098/138] Add parent_version to SavedRun --- .../0051_savedrun_parent_version.py | 24 +++++++++++++++++++ bots/models.py | 12 ++++++++++ 2 files changed, 36 insertions(+) create mode 100644 bots/migrations/0051_savedrun_parent_version.py diff --git a/bots/migrations/0051_savedrun_parent_version.py b/bots/migrations/0051_savedrun_parent_version.py new file mode 100644 index 000000000..3c2c16b18 --- /dev/null +++ b/bots/migrations/0051_savedrun_parent_version.py @@ -0,0 +1,24 @@ +# Generated by Django 4.2.7 on 2023-12-08 10:57 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + dependencies = [ + ("bots", "0050_botintegration_published_run"), + ] + + operations = [ + migrations.AddField( + model_name="savedrun", + name="parent_version", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="children_runs", + to="bots.publishedrunversion", + ), + ), + ] diff --git a/bots/models.py b/bots/models.py index 2453fbff1..2164277f6 100644 --- a/bots/models.py +++ b/bots/models.py @@ -140,6 +140,13 @@ class SavedRun(models.Model): blank=True, related_name="children", ) + parent_version = models.ForeignKey( + "bots.PublishedRunVersion", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="children_runs", + ) workflow = models.IntegerField( choices=Workflow.choices, default=Workflow.VIDEO_BOTS @@ -1013,6 +1020,8 @@ class PublishedRun(models.Model): updated_at = models.DateTimeField(auto_now=True) class Meta: + get_latest_by = "updated_at" + ordering = ["-updated_at"] unique_together = [ ["workflow", "published_run_id"], @@ -1152,3 +1161,6 @@ class Meta: models.Index(fields=["published_run", "-created_at"]), models.Index(fields=["version_id"]), ] + + def __str__(self): + return f"{self.published_run} - {self.version_id}" From c0e0ada6f43d13177e72fb20491a2a0e4fc3b459 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Fri, 8 Dec 2023 18:38:42 +0530 Subject: [PATCH 099/138] fix bulk runner returning None --- recipes/BulkRunner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index 70b3c94a8..b710ab25c 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -387,7 +387,9 @@ def build_requests_for_df(df, request, df_ix, arr_len): else: request_body[field] = df.at[df_ix, col] # for validation - request_body = page_cls.RequestModel.parse_obj(request_body).dict() + request_body = page_cls.RequestModel.parse_obj(request_body).dict( + exclude_unset=True + ) yield url_ix, f, request_body, page_cls From 055c87ccfee319db619fb7923281e7906600a508 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Fri, 8 Dec 2023 18:45:06 +0530 Subject: [PATCH 100/138] Fix breadcrumbs and not use example_id when run_id is there in params --- daras_ai_v2/base.py | 48 +++++++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index c641032f6..9ba18c0d0 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -184,9 +184,7 @@ def render(self): example_id, run_id, uid = extract_query_params(gooey_get_query_params()) current_run = self.get_sr_from_query_params(example_id, run_id, uid) - published_run = self.get_published_run_from_query_params( - example_id, run_id, uid - ) + published_run = self.get_current_published_run() is_root_example = published_run and published_run.is_root_example() title, breadcrumbs = self._get_title_and_breadcrumbs( current_run=current_run, @@ -893,7 +891,16 @@ def get_sr_from_query_params_dict(self, query_params) -> SavedRun: def get_current_published_run(self) -> PublishedRun | None: example_id, run_id, uid = extract_query_params(gooey_get_query_params()) - return self.get_published_run_from_query_params(example_id, run_id, uid) + if run_id: + current_run = self.get_sr_from_query_params(example_id, run_id, uid) + if current_run.parent_version: + return current_run.parent_version.published_run + else: + return None + elif example_id: + return self.get_published_run_from_query_params(example_id, "", "") + else: + return self.get_root_published_run() @classmethod def get_sr_from_query_params( @@ -942,12 +949,18 @@ def recipe_doc_sr(cls) -> SavedRun: @classmethod def run_doc_sr( - cls, run_id: str, uid: str, create: bool = False, parent: SavedRun = None + cls, + run_id: str, + uid: str, + create: bool = False, + parent: SavedRun | None = None, + parent_version: PublishedRunVersion | None = None, ) -> SavedRun: config = dict(workflow=cls.workflow, uid=uid, run_id=run_id) if create: return SavedRun.objects.get_or_create( - **config, defaults=dict(parent=parent) + **config, + defaults=dict(parent=parent, parent_version=parent_version), )[0] else: return SavedRun.objects.get(**config) @@ -1279,7 +1292,7 @@ def on_submit(self): else: self.call_runner_task(example_id, run_id, uid) raise QueryParamsRedirectException( - self.clean_query_params(example_id=example_id, run_id=run_id, uid=uid) + self.clean_query_params(example_id=None, run_id=run_id, uid=uid) ) def should_submit_after_login(self) -> bool: @@ -1315,12 +1328,18 @@ def create_new_run(self): parent = self.get_sr_from_query_params( parent_example_id, parent_run_id, parent_uid ) + published_run = self.get_current_published_run() + parent_version = published_run and published_run.versions.latest() - self.run_doc_sr(run_id, uid, create=True, parent=parent).set( - self.state_to_doc(st.session_state) - ) + self.run_doc_sr( + run_id, + uid, + create=True, + parent=parent, + parent_version=parent_version, + ).set(self.state_to_doc(st.session_state)) - return parent_example_id, run_id, uid + return None, run_id, uid def call_runner_task(self, example_id, run_id, uid, is_api_call=False): from celeryapp.tasks import gui_runner @@ -1405,9 +1424,7 @@ def _render_save_options(self): example_id, run_id, uid = extract_query_params(gooey_get_query_params()) current_sr = self.get_sr_from_query_params(example_id, run_id, uid) - published_run = self.get_published_run_from_query_params( - example_id, run_id, uid - ) + published_run = self.get_current_published_run() with st.expander("🛠️ Admin Options"): if st.button("⭐️ Save Workflow"): @@ -1614,7 +1631,8 @@ def render_example(self, state: dict): def render_steps(self): raise NotImplementedError - def preview_input(self, state: dict) -> str | None: + @classmethod + def preview_input(cls, state: dict) -> str | None: return ( state.get("text_prompt") or state.get("input_prompt") From 1ce797851ab8febe8ebaf62da843142732114e3f Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Mon, 11 Dec 2023 10:47:16 +0530 Subject: [PATCH 101/138] Add help note for published_run_id and indexes for PublishedRun and related tables --- ...052_alter_publishedrun_options_and_more.py | 42 +++++++++++++++++++ bots/models.py | 8 ++++ 2 files changed, 50 insertions(+) create mode 100644 bots/migrations/0052_alter_publishedrun_options_and_more.py diff --git a/bots/migrations/0052_alter_publishedrun_options_and_more.py b/bots/migrations/0052_alter_publishedrun_options_and_more.py new file mode 100644 index 000000000..4d6dfe2f8 --- /dev/null +++ b/bots/migrations/0052_alter_publishedrun_options_and_more.py @@ -0,0 +1,42 @@ +# Generated by Django 4.2.7 on 2023-12-11 05:16 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("bots", "0051_savedrun_parent_version"), + ] + + operations = [ + migrations.AlterModelOptions( + name="publishedrun", + options={"get_latest_by": "updated_at", "ordering": ["-updated_at"]}, + ), + migrations.AddIndex( + model_name="publishedrun", + index=models.Index( + fields=["workflow"], name="bots_publis_workflo_a0953a_idx" + ), + ), + migrations.AddIndex( + model_name="publishedrun", + index=models.Index( + fields=["workflow", "created_by"], name="bots_publis_workflo_c75a55_idx" + ), + ), + migrations.AddIndex( + model_name="publishedrun", + index=models.Index( + fields=["workflow", "published_run_id"], + name="bots_publis_workflo_87bece_idx", + ), + ), + migrations.AddIndex( + model_name="publishedrun", + index=models.Index( + fields=["workflow", "visibility", "is_approved_example"], + name="bots_publis_workflo_36a83a_idx", + ), + ), + ] diff --git a/bots/models.py b/bots/models.py index 2164277f6..190907be7 100644 --- a/bots/models.py +++ b/bots/models.py @@ -982,6 +982,7 @@ class FeedbackComment(models.Model): class PublishedRun(models.Model): + # published_run_id was earlier SavedRun.example_id published_run_id = models.CharField( max_length=128, blank=True, @@ -1027,6 +1028,13 @@ class Meta: ["workflow", "published_run_id"], ] + indexes = [ + models.Index(fields=["workflow"]), + models.Index(fields=["workflow", "created_by"]), + models.Index(fields=["workflow", "published_run_id"]), + models.Index(fields=["workflow", "visibility", "is_approved_example"]), + ] + def __str__(self): return self.get_app_url() From 611240056d59280782eeda93690349ad5d713cef Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Mon, 11 Dec 2023 21:04:14 +0530 Subject: [PATCH 102/138] don't use glossary when translating image text --- recipes/VideoBots.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index be3d4d78e..29f077060 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -602,12 +602,11 @@ def run(self, state: dict) -> typing.Iterator[str | None]: )[0] if ocr_texts: - yield f"Translating Images to English..." + yield f"Translating Image Text to English..." ocr_texts = run_google_translate( texts=ocr_texts, source_language="auto", target_language="en", - glossary_url=request.input_glossary_document, ) for text in ocr_texts: user_input = f"Image: {text!r}\n{user_input}" From 2f3922cd25eea8bbb1bb9a89ce3244ad0c3ec51d Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Tue, 12 Dec 2023 03:23:08 +0530 Subject: [PATCH 103/138] bulk eval --- bots/models.py | 1 + daras_ai_v2/all_pages.py | 2 + daras_ai_v2/base.py | 5 +- daras_ai_v2/language_model.py | 52 +++-- gooey_ui/components.py | 25 ++- recipes/BulkEval.py | 373 ++++++++++++++++++++++++++++++++++ recipes/BulkRunner.py | 14 +- 7 files changed, 439 insertions(+), 33 deletions(-) create mode 100644 recipes/BulkEval.py diff --git a/bots/models.py b/bots/models.py index 07d52c4d3..2804d2fce 100644 --- a/bots/models.py +++ b/bots/models.py @@ -72,6 +72,7 @@ class Workflow(models.IntegerChoices): RELATED_QNA_MAKER_DOC = (28, "Related QnA Maker Doc") EMBEDDINGS = (29, "Embeddings") BULK_RUNNER = (30, "Bulk Runner") + BULK_EVAL = (31, "Bulk Evaluator") @property def short_slug(self): diff --git a/daras_ai_v2/all_pages.py b/daras_ai_v2/all_pages.py index 68231daee..34a37d011 100644 --- a/daras_ai_v2/all_pages.py +++ b/daras_ai_v2/all_pages.py @@ -3,6 +3,7 @@ from bots.models import Workflow from daras_ai_v2.base import BasePage +from recipes.BulkEval import BulkEvalPage from recipes.BulkRunner import BulkRunnerPage from recipes.ChyronPlant import ChyronPlantPage from recipes.CompareLLM import CompareLLMPage @@ -49,6 +50,7 @@ ], "LLMs, RAG, & Synthetic Data": [ BulkRunnerPage, + BulkEvalPage, DocExtractPage, CompareLLMPage, DocSearchPage, diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 2b0e37f0a..64df385f3 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -81,7 +81,6 @@ class RecipeRunState(Enum): class StateKeys: page_title = "__title" - page_image = "__image" page_notes = "__notes" created_at = "created_at" @@ -97,7 +96,7 @@ class StateKeys: class BasePage: title: str - image: str + image: str = None workflow: Workflow slug_versions: list[str] @@ -266,7 +265,7 @@ def get_recipe_title(self, state: dict) -> str: return state.get(StateKeys.page_title) or self.title or "" def get_recipe_image(self, state: dict) -> str: - return state.get(StateKeys.page_image) or self.image or "" + return self.image or "" def _user_disabled_check(self): if self.run_user and self.run_user.is_disabled: diff --git a/daras_ai_v2/language_model.py b/daras_ai_v2/language_model.py index 38319a757..76b46222a 100644 --- a/daras_ai_v2/language_model.py +++ b/daras_ai_v2/language_model.py @@ -1,5 +1,6 @@ import hashlib import io +import json import re import typing from enum import Enum @@ -314,16 +315,17 @@ def get_entry_text(entry: ConversationEntry) -> str: def run_language_model( *, model: str, - prompt: str | None = None, - messages: list[ConversationEntry] | None = None, - max_tokens: int = 512, # Default value version 1.0 - quality: float = 1.0, # Default value version 1.0 - num_outputs: int = 1, # Default value version 1.0 - temperature: float = 0.7, # Default value version 1.0 - stop: list[str] | None = None, + prompt: str = None, + messages: list[ConversationEntry] = None, + max_tokens: int = 512, + quality: float = 1.0, + num_outputs: int = 1, + temperature: float = 0.7, + stop: list[str] = None, avoid_repetition: bool = False, - tools: list[LLMTools] | None = None, -) -> list[str] | tuple[list[str], list[list[dict]]]: + tools: list[LLMTools] = None, + response_format_type: typing.Literal["text", "json_object"] = None, +) -> list[str] | tuple[list[str], list[list[dict]]] | list[dict]: assert bool(prompt) != bool( messages ), "Pleave provide exactly one of { prompt, messages }" @@ -354,18 +356,22 @@ def run_language_model( stop=stop, avoid_repetition=avoid_repetition, tools=tools, + response_format_type=response_format_type, ) - output_text = [ - # return messages back as either chatml or json messages - format_chatml_message(entry) - if is_chatml - else (entry.get("content") or "").strip() - for entry in result - ] + if response_format_type == "json_object": + out_content = [json.loads(entry["content"]) for entry in result] + else: + out_content = [ + # return messages back as either chatml or json messages + format_chatml_message(entry) + if is_chatml + else (entry.get("content") or "").strip() + for entry in result + ] if tools: - return output_text, [(entry.get("tool_calls") or []) for entry in result] + return out_content, [(entry.get("tool_calls") or []) for entry in result] else: - return output_text + return out_content else: if tools: raise ValueError("Only OpenAI chat models support Tools") @@ -430,7 +436,8 @@ def _run_chat_model( model: str | tuple, stop: list[str] | None, avoid_repetition: bool, - tools: list[LLMTools] | None = None, + tools: list[LLMTools] | None, + response_format_type: typing.Literal["text", "json_object"], ) -> list[ConversationEntry]: match api: case LLMApis.openai: @@ -443,6 +450,7 @@ def _run_chat_model( stop=stop, temperature=temperature, tools=tools, + response_format_type=response_format_type, ) case LLMApis.vertex_ai: if tools: @@ -479,7 +487,8 @@ def _run_openai_chat( temperature: float, stop: list[str] | None, avoid_repetition: bool, - tools: list[LLMTools] | None = None, + tools: list[LLMTools] | None, + response_format_type: typing.Literal["text", "json_object"], ) -> list[ConversationEntry]: from openai._types import NOT_GIVEN @@ -504,6 +513,9 @@ def _run_openai_chat( frequency_penalty=frequency_penalty, presence_penalty=presence_penalty, tools=[tool.spec for tool in tools] if tools else NOT_GIVEN, + response_format={"type": response_format_type} + if response_format_type + else NOT_GIVEN, ) for model_str in model ], diff --git a/gooey_ui/components.py b/gooey_ui/components.py index 1172d59fb..a63b0826b 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -29,7 +29,6 @@ def dummy(*args, **kwargs): spinner = dummy set_page_config = dummy form = dummy -plotly_chart = dummy dataframe = dummy @@ -287,13 +286,14 @@ def text_area( **props, ) -> str: style = props.setdefault("style", {}) - if key: - assert not value, "only one of value or key can be provided" - else: + # if key: + # assert not value, "only one of value or key can be provided" + # else: + if not key: key = md5_values( "textarea", label, height, help, value, placeholder, label_visibility ) - value = str(state.session_state.setdefault(key, value)) + value = str(state.session_state.setdefault(key, value) or "") if label_visibility != "visible": label = None if disabled: @@ -838,6 +838,21 @@ def breadcrumb_item(inner_html: str, link_to: str | None = None, **props): html(inner_html) +def plotly_chart(figure_or_data, **kwargs): + data = ( + figure_or_data.to_plotly_json() + if hasattr(figure_or_data, "to_plotly_json") + else figure_or_data + ) + state.RenderTreeNode( + name="plotly-chart", + props=dict( + chart=data, + args=kwargs, + ), + ).mount() + + def dedent(text: str | None) -> str | None: if not text: return text diff --git a/recipes/BulkEval.py b/recipes/BulkEval.py new file mode 100644 index 000000000..63311176e --- /dev/null +++ b/recipes/BulkEval.py @@ -0,0 +1,373 @@ +import itertools +import typing +import uuid +from itertools import zip_longest + +from pydantic import BaseModel, Field + +import gooey_ui as st +from bots.models import Workflow +from daras_ai.image_input import upload_file_from_bytes +from daras_ai_v2.base import BasePage +from daras_ai_v2.doc_search_settings_widgets import document_uploader +from daras_ai_v2.field_render import field_title_desc +from daras_ai_v2.functional import map_parallel +from daras_ai_v2.language_model import ( + run_language_model, + LargeLanguageModels, + llm_price, +) +from daras_ai_v2.language_model_settings_widgets import language_model_settings +from daras_ai_v2.prompt_vars import render_prompt_vars +from recipes.BulkRunner import read_df_any +from recipes.DocSearch import render_documents + +NROWS_CACHE_KEY = "__nrows" + +AggFunctionsList = [ + "mean", + "median", + "min", + "max", + "sum", + "cumsum", + "prod", + "cumprod", + "std", + "var", + "first", + "last", + "count", + "cumcount", + "nunique", + "rank", +] + + +class LLMSettingsMixin(BaseModel): + selected_model: typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None + avoid_repetition: bool | None + num_outputs: int | None + quality: float | None + max_tokens: int | None + sampling_temperature: float | None + + +class EvalPrompt(typing.TypedDict): + name: str + prompt: str + + +class AggFunction(typing.TypedDict): + column: str + function: typing.Literal[tuple(AggFunctionsList)] + + +class AggFunctionResult(typing.TypedDict): + column: str + function: typing.Literal[tuple(AggFunctionsList)] + count: int + value: float + + +def _render_results(results: list[AggFunctionResult]): + import plotly.graph_objects as go + from plotly.colors import sample_colorscale + from plotly.subplots import make_subplots + + for k, g in itertools.groupby(results, key=lambda d: d["function"]): + st.write("---\n##### " + k.capitalize()) + + g = list(g) + columns = [d["column"] for d in g] + values = [round(d["value"], 2) for d in g] + norm_values = [(v - min(values)) / (max(values) - min(values)) for v in values] + colors = sample_colorscale("RdYlGn", norm_values, colortype="tuple") + colors = [f"rgba{(r * 255, g * 255, b * 255, 0.5)}" for r, g, b in colors] + + fig = make_subplots( + rows=2, + shared_xaxes=True, + specs=[[{"type": "table"}], [{"type": "bar"}]], + vertical_spacing=0.03, + row_heights=[0.3, 0.7], + ) + counts = [d.get("count", 1) for d in g] + fig.add_trace( + go.Table( + header=dict(values=["Metric", "Value", "Count"]), + cells=dict( + values=[columns, values, counts], + fill_color=["aliceblue", colors, "aliceblue"], + ), + ), + row=1, + col=1, + ) + fig.add_trace( + go.Bar( + name=k, + x=columns, + y=values, + marker=dict(color=colors), + text=values, + texttemplate="%{text}", + insidetextanchor="middle", + insidetextfont=dict(size=24), + ), + row=2, + col=1, + ) + fig.update_layout( + margin=dict(l=0, r=0, t=24, b=0), + # autosize=True, + ) + st.plotly_chart(fig) + + +class BulkEvalPage(BasePage): + title = "Bulk Evaluator" + workflow = Workflow.BULK_EVAL + slug_versions = ["bulk-eval", "eval"] + + class RequestModel(LLMSettingsMixin, BaseModel): + documents: list[str] = Field( + title="Input Data Spreadsheet", + description=""" +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Remember to includes header names in your CSV too. + """, + ) + + eval_prompts: list[EvalPrompt] = Field( + title="Evaluation Prompts", + description=""" +Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. +_The `columns` dictionary can be used to reference the spreadsheet columns._ + """, + ) + + agg_functions: list[AggFunction] | None = Field( + title="Aggregations", + description=""" +Aggregate using one or more operations over the specified columns. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + """, + ) + + class ResponseModel(BaseModel): + output_documents: list[str] + aggregations: list[list[AggFunctionResult]] | None + + def render_form_v2(self): + files = document_uploader( + f"##### {field_title_desc(self.RequestModel, 'documents')}", + accept=(".csv", ".xlsx", ".xls", ".json", ".tsv", ".xml"), + ) + st.session_state[NROWS_CACHE_KEY] = get_nrows(files) + if not files: + return + + st.write( + """ +##### Input Data Preview +Here's what you uploaded: + """ + ) + for file in files: + st.data_table(file) + st.write("---") + + def render_inputs(key: str, del_key: str, d: EvalPrompt): + col1, col2 = st.columns([1, 8], responsive=False) + with col1: + st.button("❌️", key=del_key, type="tertiary") + with col2: + d["name"] = st.text_input( + label="", + label_visibility="collapsed", + placeholder="Metric Name", + key=key + ":name", + value=d.get("name"), + ).strip() + d["prompt"] = st.text_area( + label="", + label_visibility="collapsed", + placeholder="Prompt", + key=key + ":prompt", + value=d.get("prompt"), + height=500, + ).strip() + + st.write("##### " + field_title_desc(self.RequestModel, "eval_prompts")) + list_view_editor( + add_btn_label="➕ Add a Prompt", + key="eval_prompts", + render_inputs=render_inputs, + ) + + def render_inputs(key: str, del_key: str, d: AggFunction): + col1, col2, col3 = st.columns([1, 5, 3], responsive=False) + with col1: + st.button("❌️", key=del_key, type="tertiary") + with col2: + d["column"] = st.text_input( + "", + label_visibility="collapsed", + placeholder="Column Name", + key=key + ":column", + value=d.get("column"), + ).strip() + with col3: + d["function"] = st.selectbox( + "", + label_visibility="collapsed", + key=key + ":func", + options=AggFunctionsList, + default_value=d.get("function"), + ) + + st.html("
") + st.write("##### " + field_title_desc(self.RequestModel, "agg_functions")) + list_view_editor( + add_btn_label="➕ Add an Aggregation", + key="agg_functions", + render_inputs=render_inputs, + ) + + def render_settings(self): + language_model_settings() + + def render_example(self, state: dict): + render_documents(state) + + def render_output(self): + files = st.session_state.get("output_documents", []) + aggregations = st.session_state.get("aggregations", []) + + for file, results in zip_longest(files, aggregations): + st.write(file) + st.data_table(file) + + if not results: + continue + + _render_results(results) + + def run_v2( + self, + request: "BulkEvalPage.RequestModel", + response: "BulkEvalPage.ResponseModel", + ) -> typing.Iterator[str | None]: + import pandas as pd + + response.output_documents = [] + response.aggregations = [] + + for doc_ix, doc in enumerate(request.documents): + df = read_df_any(doc) + in_recs = df.to_dict(orient="records") + out_recs = [] + + out_df = None + f = upload_file_from_bytes( + filename=f"bulk-eval-{doc_ix}-0.csv", + data=df.to_csv(index=False).encode(), + content_type="text/csv", + ) + response.output_documents.append(f) + response.aggregations.append([]) + + for df_ix in range(len(in_recs)): + rec_ix = len(out_recs) + out_recs.append(in_recs[df_ix]) + + for ep_ix, ep in enumerate(request.eval_prompts): + progress = round( + (doc_ix + df_ix + ep_ix) + / (len(request.documents) + len(df) + len(request.eval_prompts)) + * 100 + ) + yield f"{progress}%" + prompt = render_prompt_vars( + ep["prompt"], + st.session_state | {"columns": out_recs[rec_ix]}, + ) + ret = run_language_model( + model=LargeLanguageModels.gpt_4_turbo.name, + prompt=prompt, + response_format_type="json_object", + )[0] + assert isinstance(ret, dict) + for metric_name, metric_value in ret.items(): + col = f"{ep['name']} - {metric_name}" + out_recs[rec_ix][col] = metric_value + + out_df = pd.DataFrame.from_records(out_recs) + f = upload_file_from_bytes( + filename=f"bulk-runner-{doc_ix}-{df_ix}.csv", + data=out_df.to_csv(index=False).encode(), + content_type="text/csv", + ) + response.output_documents[doc_ix] = f + + if out_df is None: + continue + for agg_ix, agg in enumerate(request.agg_functions): + col_values = out_df[agg["column"]].dropna() + agg_value = col_values.agg(agg["function"]) + response.aggregations[doc_ix].append( + { + "column": agg["column"], + "function": agg["function"], + "count": len(col_values), + "value": agg_value, + } + ) + + def fields_to_save(self) -> [str]: + return super().fields_to_save() + [NROWS_CACHE_KEY] + + def get_raw_price(self, state: dict) -> float: + try: + price = llm_price[LargeLanguageModels[state["selected_model"]]] + except KeyError: + price = 1 + nprompts = len(state.get("eval_prompts") or {}) or 1 + nrows = ( + state.get(NROWS_CACHE_KEY) or get_nrows(state.get("documents") or []) or 1 + ) + return price * nprompts * nrows + + +@st.cache_in_session_state +def get_nrows(files: list[str]) -> int: + dfs = map_parallel(read_df_any, files) + return sum((len(df) for df in dfs), 0) + + +def list_view_editor( + *, + add_btn_label: str, + key: str, + render_labels: typing.Callable = None, + render_inputs: typing.Callable[[str, str, dict], None], +): + old_lst = st.session_state.setdefault(key, []) + add_key = f"--{key}:add" + if st.session_state.get(add_key): + old_lst.append({}) + label_placeholder = st.div() + new_lst = [] + for d in old_lst: + entry_key = d.setdefault("__key__", f"--{key}:{uuid.uuid1()}") + del_key = entry_key + ":del" + if st.session_state.pop(del_key, None): + continue + render_inputs(entry_key, del_key, d) + new_lst.append(d) + if new_lst and render_labels: + with label_placeholder: + render_labels() + st.session_state[key] = new_lst + st.button(add_btn_label, key=add_key) diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index b710ab25c..449efd058 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -22,10 +22,11 @@ class BulkRunnerPage(BasePage): - title = "Bulk Runner & Evaluator" + title = "Bulk Runner" image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/87f35df4-88d7-11ee-aac9-02420a00016b/Bulk%20Runner.png.png" workflow = Workflow.BULK_RUNNER slug_versions = ["bulk-runner", "bulk"] + price = 1 class RequestModel(BaseModel): documents: list[str] = Field( @@ -244,7 +245,7 @@ def run_v2( response.output_documents = [] for doc_ix, doc in enumerate(request.documents): - df = _read_df(doc) + df = read_df_any(doc) in_recs = df.to_dict(orient="records") out_recs = [] @@ -290,7 +291,10 @@ def run_v2( for field, col in request.output_columns.items(): if len(request.run_urls) > 1: - col = f"({url_ix + 1}) {col}" + if sr.page_title: + col = f"({sr.page_title}) {col}" + else: + col = f"({url_ix + 1}) {col}" out_val = state.get(field) if isinstance(out_val, list): for arr_ix, item in enumerate(out_val): @@ -441,7 +445,7 @@ def is_arr(field_props: dict) -> bool: @st.cache_in_session_state def get_columns(files: list[str]) -> list[str]: - dfs = map_parallel(_read_df, files) + dfs = map_parallel(read_df_any, files) return list( { col: None @@ -452,7 +456,7 @@ def get_columns(files: list[str]) -> list[str]: ) -def _read_df(f_url: str) -> "pd.DataFrame": +def read_df_any(f_url: str) -> "pd.DataFrame": import pandas as pd doc_meta = doc_url_to_metadata(f_url) From 73113964f5b27f30cdf204638e4336c55925f3ed Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Tue, 12 Dec 2023 03:56:45 +0530 Subject: [PATCH 104/138] fix telugu text in bulk eval table header metric name in bulk eval --- daras_ai_v2/vector_search.py | 5 +++-- recipes/BulkEval.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/daras_ai_v2/vector_search.py b/daras_ai_v2/vector_search.py index d5cf5549d..35dd02de9 100644 --- a/daras_ai_v2/vector_search.py +++ b/daras_ai_v2/vector_search.py @@ -495,9 +495,10 @@ def download_content_bytes(*, f_url: str, mime_type: str) -> tuple[bytes, str]: return b"", "" f_bytes = r.content # if it's a known encoding, standardize to utf-8 - if r.encoding: + encoding = r.apparent_encoding or r.encoding + if encoding: try: - codec = codecs.lookup(r.encoding) + codec = codecs.lookup(encoding) except LookupError: pass else: diff --git a/recipes/BulkEval.py b/recipes/BulkEval.py index 63311176e..83c3172ad 100644 --- a/recipes/BulkEval.py +++ b/recipes/BulkEval.py @@ -95,7 +95,7 @@ def _render_results(results: list[AggFunctionResult]): counts = [d.get("count", 1) for d in g] fig.add_trace( go.Table( - header=dict(values=["Metric", "Value", "Count"]), + header=dict(values=["Metric", k.capitalize(), "Count"]), cells=dict( values=[columns, values, counts], fill_color=["aliceblue", colors, "aliceblue"], From b7da177f5dcc13c492f124b9e8d323391dfc0bbe Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 12 Dec 2023 16:09:10 +0530 Subject: [PATCH 105/138] Add Save as New button for runs with unpublished changes --- daras_ai_v2/base.py | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index e5a5aa878..d5866de46 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -366,6 +366,7 @@ def _render_published_run_buttons( style={"min-width": "min(300px, 100vw)"} ): self._render_run_actions_modal( + current_run=current_run, published_run=published_run, modal=run_actions_modal, ) @@ -480,14 +481,28 @@ def _has_published_run_changed( def _render_run_actions_modal( self, *, + current_run: SavedRun, published_run: PublishedRun, modal: Modal, ): + assert published_run is not None + + is_latest_version = published_run.saved_run == current_run + with st.div(className="mt-4"): - duplicate_icon = '' - duplicate_button = st.button( - f"{duplicate_icon} Duplicate", type="secondary", className="w-100" - ) + duplicate_button = None + save_as_new_button = None + duplicate_icon = save_as_new_icon = '' + if is_latest_version: + duplicate_button = st.button( + f"{duplicate_icon} Duplicate", type="secondary", className="w-100" + ) + else: + save_as_new_button = st.button( + f"{save_as_new_icon} Save as New", + type="secondary", + className="w-100", + ) delete_icon = '' delete_button = st.button( f"{delete_icon} Delete", type="secondary", className="w-100 text-danger" @@ -504,6 +519,19 @@ def _render_run_actions_modal( query_params=dict(example_id=duplicate_pr.published_run_id), ) + if save_as_new_button: + new_pr = self.create_published_run( + published_run_id=get_random_doc_id(), + saved_run=current_run, + user=self.request.user, + title=f"{published_run.title} (Copy)", + notes=published_run.notes, + visibility=PublishedRunVisibility(PublishedRunVisibility.UNLISTED), + ) + raise QueryParamsRedirectException( + query_params=dict(example_id=new_pr.published_run_id) + ) + confirm_delete_modal = Modal("Confirm Delete", key="confirm-delete-modal") if delete_button: if not published_run.published_run_id: From 7e75270afdf1889161df150a6b85c22c69d31454 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Tue, 12 Dec 2023 22:35:04 +0530 Subject: [PATCH 106/138] handle UnicodeDecodeError --- daras_ai_v2/vector_search.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/daras_ai_v2/vector_search.py b/daras_ai_v2/vector_search.py index 35dd02de9..fbbc3b206 100644 --- a/daras_ai_v2/vector_search.py +++ b/daras_ai_v2/vector_search.py @@ -502,7 +502,10 @@ def download_content_bytes(*, f_url: str, mime_type: str) -> tuple[bytes, str]: except LookupError: pass else: - f_bytes = codec.decode(f_bytes)[0].encode() + try: + f_bytes = codec.decode(f_bytes)[0].encode() + except UnicodeDecodeError: + pass ext = guess_ext_from_response(r) return f_bytes, ext From f473bc17184fd253faa041937848aafe61af0e54 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Wed, 13 Dec 2023 16:42:24 +0530 Subject: [PATCH 107/138] Rename published -> save in view --- daras_ai_v2/base.py | 15 ++++++++------- daras_ai_v2/tabs_widget.py | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index d5866de46..4952bc624 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -358,7 +358,6 @@ def _render_published_run_buttons( current_run=current_run, published_run=published_run, is_update_mode=is_update_mode, - modal=publish_modal, ) if run_actions_modal.is_open(): @@ -375,7 +374,6 @@ def _render_publish_modal( self, *, current_run: SavedRun, - modal: Modal, published_run: PublishedRun | None, is_update_mode: bool = False, ): @@ -426,7 +424,10 @@ def _render_publish_modal( ) with st.div(className="mt-4 d-flex justify-content-center"): - publish_button = st.button("🌻 Publish", type="primary") + save_icon = '' + publish_button = st.button( + f"{save_icon} Save", className="px-4", type="primary" + ) if publish_button: recipe_title = self.get_root_published_run().title or self.title @@ -682,7 +683,7 @@ def get_tabs(self): tabs = [MenuTabs.run, MenuTabs.examples, MenuTabs.run_as_api] if self.request.user: tabs.extend([MenuTabs.history]) - tabs.extend([MenuTabs.published]) + tabs.extend([MenuTabs.saved]) return tabs def render_selected_tab(self, selected_tab: str): @@ -716,8 +717,8 @@ def render_selected_tab(self, selected_tab: str): case MenuTabs.run_as_api: self.run_as_api_tab() - case MenuTabs.published: - self._published_tab() + case MenuTabs.saved: + self._saved_tab() render_js_dynamic_dates() def _render_version_history(self): @@ -1524,7 +1525,7 @@ def _render(pr: PublishedRun): grid_layout(3, example_runs, _render) - def _published_tab(self): + def _saved_tab(self): if not self.request.user or self.request.user.is_anonymous: redirect_url = furl( "/login", query_params={"next": furl(self.request.url).set(origin=None)} diff --git a/daras_ai_v2/tabs_widget.py b/daras_ai_v2/tabs_widget.py index 0f945eb70..f6511e58e 100644 --- a/daras_ai_v2/tabs_widget.py +++ b/daras_ai_v2/tabs_widget.py @@ -10,7 +10,7 @@ class MenuTabs: run_as_api = "🚀 API" history = "📖 History" integrations = "🔌 Integrations" - published = "📁 Published" + saved = "📁 Saved" paths = { run: "", @@ -18,7 +18,7 @@ class MenuTabs: run_as_api: "api", history: "history", integrations: "integrations", - published: "published", + saved: "saved", } paths_reverse = {v: k for k, v in paths.items()} From 0d6d5f0804848d83543d0cc30a2fb76d67160e55 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Wed, 13 Dec 2023 17:07:18 +0530 Subject: [PATCH 108/138] Silently create root published run for new recipes --- daras_ai_v2/base.py | 30 +++++++++++++++++++++++++++--- explore.py | 4 ++-- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 4f8636ec0..365688f06 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -664,7 +664,7 @@ def _render_breadcrumbs(self, items: list[tuple[str, str | None]]): ) def get_recipe_title(self) -> str: - return self.get_root_published_run().title or self.title or "" + return self.get_or_create_root_published_run().title or self.title or "" def get_recipe_image(self, state: dict) -> str: return self.image or "" @@ -972,8 +972,32 @@ def get_root_published_run(cls) -> PublishedRun: return cls.get_published_run(published_run_id="") @classmethod - def recipe_doc_sr(cls) -> SavedRun: - return cls.get_root_published_run().saved_run + def get_or_create_root_published_run(cls) -> PublishedRun: + try: + return cls.get_root_published_run() + except PublishedRun.DoesNotExist: + saved_run = cls.run_doc_sr( + run_id="", + uid="", + create=True, + parent=None, + parent_version=None, + ) + return cls.create_published_run( + published_run_id="", + saved_run=saved_run, + user=None, + title=cls.title, + notes=cls().preview_description(state=saved_run.to_dict()), + visibility=PublishedRunVisibility(PublishedRunVisibility.PUBLIC), + ) + + @classmethod + def recipe_doc_sr(cls, create: bool = False) -> SavedRun: + if create: + return cls.get_or_create_root_published_run().saved_run + else: + return cls.get_root_published_run().saved_run @classmethod def run_doc_sr( diff --git a/explore.py b/explore.py index 65b505524..f2f0b64d4 100644 --- a/explore.py +++ b/explore.py @@ -14,7 +14,7 @@ def render(): def _render_non_featured(page_cls): page = page_cls() - state = page.recipe_doc_sr().to_dict() + state = page.recipe_doc_sr(create=True).to_dict() # total_runs = page.get_total_runs() col1, col2 = gui.columns([1, 2]) @@ -27,7 +27,7 @@ def _render_non_featured(page_cls): def _render_as_featured(page_cls): page = page_cls() - state = page.recipe_doc_sr().to_dict() + state = page.recipe_doc_sr(create=True).to_dict() # total_runs = page.get_total_runs() render_image(page, state) # render_description(page, state, total_runs) From a6674a1a33aed00b582300ce9e16d872a82ebc52 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Wed, 6 Dec 2023 08:54:29 -0800 Subject: [PATCH 109/138] Changed workflow meta images --- daras_ai_v2/base.py | 2 +- recipes/BulkRunner.py | 5 +++++ recipes/CompareLLM.py | 2 +- recipes/CompareText2Img.py | 5 +++++ recipes/CompareUpscaler.py | 2 +- recipes/DeforumSD.py | 5 +++++ recipes/DocExtract.py | 2 +- recipes/DocSearch.py | 2 +- recipes/DocSummary.py | 2 +- recipes/FaceInpainting.py | 5 +++++ recipes/GoogleGPT.py | 4 ++-- recipes/GoogleImageGen.py | 5 +++++ recipes/ImageSegmentation.py | 5 +++++ recipes/Img2Img.py | 5 +++++ recipes/Lipsync.py | 4 ++-- recipes/LipsyncTTS.py | 2 +- recipes/ObjectInpainting.py | 5 +++++ recipes/QRCodeGenerator.py | 2 +- recipes/RelatedQnA.py | 2 +- recipes/SEOSummary.py | 2 +- recipes/SmartGPT.py | 2 +- recipes/SocialLookupEmail.py | 2 +- recipes/Text2Audio.py | 2 +- recipes/TextToSpeech.py | 2 +- recipes/VideoBots.py | 2 +- recipes/asr.py | 2 +- 26 files changed, 60 insertions(+), 20 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 64df385f3..ea15d9c03 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -60,7 +60,7 @@ DEFAULT_META_IMG = ( # Small - "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/optimized%20hp%20gif.gif" + "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/b0f328d0-93f7-11ee-bd89-02420a0001cc/Main.jpg.png" # "https://storage.googleapis.com/dara-c1b52.appspot.com/meta_tag_default_img.jpg" # Big # "https://storage.googleapis.com/dara-c1b52.appspot.com/meta_tag_gif.gif" diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index 449efd058..ea3c2d9fe 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -20,6 +20,8 @@ ) from recipes.DocSearch import render_documents +DEFAULT_BULK_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/d80fd4d8-93fa-11ee-bc13-02420a0001cc/Bulk%20Runner.jpg.png" + class BulkRunnerPage(BasePage): title = "Bulk Runner" @@ -61,6 +63,9 @@ class RequestModel(BaseModel): class ResponseModel(BaseModel): output_documents: list[str] + def preview_image(self, state: dict) -> str | None: + return DEFAULT_BULK_META_IMG + def render_form_v2(self): from daras_ai_v2.all_pages import page_slug_map, normalize_slug diff --git a/recipes/CompareLLM.py b/recipes/CompareLLM.py index 867fc31ef..8c60ba154 100644 --- a/recipes/CompareLLM.py +++ b/recipes/CompareLLM.py @@ -17,7 +17,7 @@ from daras_ai_v2.loom_video_widget import youtube_video from daras_ai_v2.prompt_vars import prompt_vars_widget, render_prompt_vars -DEFAULT_COMPARE_LM_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/compare%20llm%20under%201%20mg%20gif.gif" +DEFAULT_COMPARE_LM_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5e4f4c58-93fc-11ee-a39e-02420a0001ce/LLMs.jpg.png" class CompareLLMPage(BasePage): diff --git a/recipes/CompareText2Img.py b/recipes/CompareText2Img.py index 2b6757020..96013554f 100644 --- a/recipes/CompareText2Img.py +++ b/recipes/CompareText2Img.py @@ -25,6 +25,8 @@ Schedulers, ) +DEFAULT_COMPARE_TEXT2IMG_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ae7b2940-93fc-11ee-8edc-02420a0001cc/Compare%20image%20generators.jpg.png" + class CompareText2ImgPage(BasePage): title = "Compare AI Image Generators" @@ -74,6 +76,9 @@ class ResponseModel(BaseModel): typing.Literal[tuple(e.name for e in Text2ImgModels)], list[str] ] + def preview_image(self, state: dict) -> str | None: + return DEFAULT_COMPARE_TEXT2IMG_META_IMG + def related_workflows(self) -> list: from recipes.FaceInpainting import FaceInpaintingPage from recipes.ObjectInpainting import ObjectInpaintingPage diff --git a/recipes/CompareUpscaler.py b/recipes/CompareUpscaler.py index 428d80322..9d5a2d802 100644 --- a/recipes/CompareUpscaler.py +++ b/recipes/CompareUpscaler.py @@ -9,7 +9,7 @@ from daras_ai_v2.face_restoration import UpscalerModels, run_upscaler_model from daras_ai_v2.stable_diffusion import SD_IMG_MAX_SIZE -DEFAULT_COMPARE_UPSCALER_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/COMPARE%20IMAGE%20UPSCALERS.jpg" +DEFAULT_COMPARE_UPSCALER_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/2e8ee512-93fe-11ee-a083-02420a0001c8/Image%20upscaler.jpg.png" class CompareUpscalerPage(BasePage): diff --git a/recipes/DeforumSD.py b/recipes/DeforumSD.py index a8b87ac6e..3fcf8da14 100644 --- a/recipes/DeforumSD.py +++ b/recipes/DeforumSD.py @@ -15,6 +15,8 @@ from daras_ai_v2.safety_checker import safety_checker from daras_ai_v2.tabs_widget import MenuTabs +DEFAULT_DEFORUMSD_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/7dc25196-93fe-11ee-9e3a-02420a0001ce/AI%20Animation%20generator.jpg.png" + class AnimationModels(TextChoices): protogen_2_2 = ("Protogen_V2.2.ckpt", "Protogen V2.2 (darkstorm2150)") @@ -199,6 +201,9 @@ class RequestModel(BaseModel): class ResponseModel(BaseModel): output_video: str + def preview_image(self, state: dict) -> str | None: + return DEFAULT_DEFORUMSD_META_IMG + def related_workflows(self) -> list: from recipes.VideoBots import VideoBotsPage from recipes.LipsyncTTS import LipsyncTTSPage diff --git a/recipes/DocExtract.py b/recipes/DocExtract.py index bbb0b2277..9cb744529 100644 --- a/recipes/DocExtract.py +++ b/recipes/DocExtract.py @@ -40,7 +40,7 @@ from daras_ai_v2.vector_search import doc_url_to_metadata from recipes.DocSearch import render_documents -DEFAULT_YOUTUBE_BOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/6c8f6876-538c-11ee-bea7-02420a000195/youtube%20bot%201.png.png" +DEFAULT_YOUTUBE_BOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ddc8ffac-93fb-11ee-89fb-02420a0001cb/Youtube%20transcripts.jpg.png" class Columns(IntegerChoices): diff --git a/recipes/DocSearch.py b/recipes/DocSearch.py index 70a3c43b5..0f804bbc7 100644 --- a/recipes/DocSearch.py +++ b/recipes/DocSearch.py @@ -33,7 +33,7 @@ render_sources_widget, ) -DEFAULT_DOC_SEARCH_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/DOC%20SEARCH.gif" +DEFAULT_DOC_SEARCH_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/bcc7aa58-93fe-11ee-a083-02420a0001c8/Search%20your%20docs.jpg.png" class DocSearchPage(BasePage): diff --git a/recipes/DocSummary.py b/recipes/DocSummary.py index 8e099e947..b45fea097 100644 --- a/recipes/DocSummary.py +++ b/recipes/DocSummary.py @@ -27,7 +27,7 @@ ) from recipes.GoogleGPT import render_output_with_refs, GoogleGPTPage -DEFAULT_DOC_SUMMARY_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/db70c56e-585a-11ee-990b-02420a00018f/doc%20summary.png.png" +DEFAULT_DOC_SUMMARY_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f35796d2-93fe-11ee-b86c-02420a0001c7/Summarize%20with%20GPT.jpg.png" class CombineDocumentsChains(Enum): diff --git a/recipes/FaceInpainting.py b/recipes/FaceInpainting.py index 3702652ae..1fee77221 100644 --- a/recipes/FaceInpainting.py +++ b/recipes/FaceInpainting.py @@ -23,6 +23,8 @@ from daras_ai_v2.repositioning import repositioning_preview_img from daras_ai_v2.stable_diffusion import InpaintingModels +DEFAULT_FACE_INPAINTING_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a146bfc0-93ff-11ee-b86c-02420a0001c7/Face%20in%20painting.jpg.png" + class FaceInpaintingPage(BasePage): title = "AI Image with a Face" @@ -77,6 +79,9 @@ class ResponseModel(BaseModel): diffusion_images: list[str] output_images: list[str] + def preview_image(self, state: dict) -> str | None: + return DEFAULT_FACE_INPAINTING_META_IMG + def preview_description(self, state: dict) -> str: return "Upload & extract a face into an AI-generated photo using your text + the latest Stable Diffusion or DallE image generator." diff --git a/recipes/GoogleGPT.py b/recipes/GoogleGPT.py index dde12482e..c4cad5535 100644 --- a/recipes/GoogleGPT.py +++ b/recipes/GoogleGPT.py @@ -37,12 +37,12 @@ EmptySearchResults, ) -DEFAULT_GOOGLE_GPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/WEBSEARCH%20%2B%20CHATGPT.jpg" +DEFAULT_GOOGLE_GPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85ed60a2-9405-11ee-9747-02420a0001ce/Web%20search%20GPT.jpg.png" class GoogleGPTPage(BasePage): title = "Web Search + GPT3" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1de97d80-88d7-11ee-ad97-02420a00016c/Websearch%20GPT.png.png" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/28649544-9406-11ee-bba3-02420a0001cc/Websearch%20GPT%20option%202.png.png" workflow = Workflow.GOOGLE_GPT slug_versions = ["google-gpt"] diff --git a/recipes/GoogleImageGen.py b/recipes/GoogleImageGen.py index e02833063..6033ede36 100644 --- a/recipes/GoogleImageGen.py +++ b/recipes/GoogleImageGen.py @@ -29,6 +29,8 @@ instruct_pix2pix, ) +DEFAULT_GOOGLE_IMG_GEN_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/dcd82b68-9400-11ee-9e3a-02420a0001ce/Search%20result%20photo.jpg.png" + class GoogleImageGenPage(BasePage): title = "Render Image Search Results with AI" @@ -74,6 +76,9 @@ class ResponseModel(BaseModel): image_urls: list[str] selected_image: str | None + def preview_image(self, state: dict) -> str | None: + return DEFAULT_GOOGLE_IMG_GEN_META_IMG + def related_workflows(self): from recipes.ObjectInpainting import ObjectInpaintingPage from recipes.QRCodeGenerator import QRCodeGeneratorPage diff --git a/recipes/ImageSegmentation.py b/recipes/ImageSegmentation.py index 72b6ec193..e1c313f06 100644 --- a/recipes/ImageSegmentation.py +++ b/recipes/ImageSegmentation.py @@ -28,6 +28,8 @@ repositioning_preview_widget, ) +DEFAULT_IMG_SEGMENTATION_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8363ed50-9401-11ee-878f-02420a0001cb/AI%20bg%20changer.jpg.png" + class ImageSegmentationPage(BasePage): title = "AI Background Changer" @@ -65,6 +67,9 @@ class ResponseModel(BaseModel): resized_image: str resized_mask: str + def preview_image(self, state: dict) -> str | None: + return DEFAULT_IMG_SEGMENTATION_META_IMG + def related_workflows(self) -> list: from recipes.ObjectInpainting import ObjectInpaintingPage from recipes.Img2Img import Img2ImgPage diff --git a/recipes/Img2Img.py b/recipes/Img2Img.py index 228c3ed73..631d2e0ed 100644 --- a/recipes/Img2Img.py +++ b/recipes/Img2Img.py @@ -19,6 +19,8 @@ ) from daras_ai_v2.safety_checker import safety_checker +DEFAULT_IMG2IMG_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cc2804ea-9401-11ee-940a-02420a0001c7/Edit%20an%20image.jpg.png" + class Img2ImgPage(BasePage): title = "Edit An Image with AI prompt" @@ -68,6 +70,9 @@ class RequestModel(BaseModel): class ResponseModel(BaseModel): output_images: list[str] + def preview_image(self, state: dict) -> str | None: + return DEFAULT_IMG2IMG_META_IMG + def related_workflows(self) -> list: from recipes.QRCodeGenerator import QRCodeGeneratorPage from recipes.ObjectInpainting import ObjectInpaintingPage diff --git a/recipes/Lipsync.py b/recipes/Lipsync.py index 74c4671b8..cb3022361 100644 --- a/recipes/Lipsync.py +++ b/recipes/Lipsync.py @@ -15,7 +15,7 @@ CREDITS_PER_MB = 2 -DEFAULT_LIPSYNC_GIF = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/91acbbde-5857-11ee-920a-02420a000194/lipsync%20audio.png.png" +DEFAULT_LIPSYNC_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/7fc4d302-9402-11ee-98dc-02420a0001ca/Lip%20Sync.jpg.png" class LipsyncPage(BasePage): @@ -37,7 +37,7 @@ class ResponseModel(BaseModel): output_video: str def preview_image(self, state: dict) -> str | None: - return DEFAULT_LIPSYNC_GIF + return DEFAULT_LIPSYNC_META_IMG def render_form_v2(self) -> bool: st.file_uploader( diff --git a/recipes/LipsyncTTS.py b/recipes/LipsyncTTS.py index 5ec6d7518..8a1802296 100644 --- a/recipes/LipsyncTTS.py +++ b/recipes/LipsyncTTS.py @@ -9,7 +9,7 @@ from daras_ai_v2.safety_checker import safety_checker from daras_ai_v2.loom_video_widget import youtube_video -DEFAULT_LIPSYNC_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/lipsync_meta_img.gif" +DEFAULT_LIPSYNC_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/13b4d352-9456-11ee-8edd-02420a0001c7/Lipsync%20TTS.jpg.png" class LipsyncTTSPage(LipsyncPage, TextToSpeechPage): diff --git a/recipes/ObjectInpainting.py b/recipes/ObjectInpainting.py index be04355dd..1893c667d 100644 --- a/recipes/ObjectInpainting.py +++ b/recipes/ObjectInpainting.py @@ -24,6 +24,8 @@ ) from daras_ai_v2.stable_diffusion import InpaintingModels +DEFAULT_OBJECT_INPAINTING_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/4bca6982-9456-11ee-bc12-02420a0001cc/Product%20photo%20backgrounds.jpg.png" + class ObjectInpaintingPage(BasePage): title = "Generate Product Photo Backgrounds" @@ -74,6 +76,9 @@ class ResponseModel(BaseModel): # diffusion_images: list[str] output_images: list[str] + def preview_image(self, state: dict) -> str | None: + return DEFAULT_OBJECT_INPAINTING_META_IMG + def related_workflows(self) -> list: from recipes.ImageSegmentation import ImageSegmentationPage from recipes.GoogleImageGen import GoogleImageGenPage diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index 693d60537..bdc57b70f 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -39,7 +39,7 @@ from url_shortener.models import ShortenedURL ATTEMPTS = 1 -DEFAULT_QR_CODE_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f09c8cfa-5393-11ee-a837-02420a000190/ai%20art%20qr%20codes1%201.png.png" +DEFAULT_QR_CODE_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a679a410-9456-11ee-bd77-02420a0001ce/QR%20Code.jpg.png" class QrSources(Enum): diff --git a/recipes/RelatedQnA.py b/recipes/RelatedQnA.py index 67467b5eb..e1c9e71f7 100644 --- a/recipes/RelatedQnA.py +++ b/recipes/RelatedQnA.py @@ -16,7 +16,7 @@ from recipes.GoogleGPT import GoogleGPTPage from recipes.RelatedQnADoc import render_qna_outputs -DEFAULT_SEO_CONTENT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/9b415768-5393-11ee-a837-02420a000190/RQnA%20SEO%20content%201.png.png" +DEFAULT_SEO_CONTENT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cbd2c94e-9456-11ee-a95e-02420a0001cc/People%20also%20ask.jpg.png" class RelatedGoogleGPTResponse(GoogleGPTPage.ResponseModel): diff --git a/recipes/SEOSummary.py b/recipes/SEOSummary.py index 1227e52e0..882c8827e 100644 --- a/recipes/SEOSummary.py +++ b/recipes/SEOSummary.py @@ -36,7 +36,7 @@ KEYWORDS_SEP = re.compile(r"[\n,]") STOP_SEQ = "$" * 10 -SEO_SUMMARY_DEFAULT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/seo.png" +SEO_SUMMARY_DEFAULT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/13d3ab1e-9457-11ee-98a6-02420a0001c9/SEO.jpg.png" BANNED_HOSTS = [ # youtube generally returns garbage diff --git a/recipes/SmartGPT.py b/recipes/SmartGPT.py index 56dd886c5..b79bdf192 100644 --- a/recipes/SmartGPT.py +++ b/recipes/SmartGPT.py @@ -17,7 +17,7 @@ from daras_ai_v2.language_model_settings_widgets import language_model_settings from daras_ai_v2.pt import PromptTree -DEFAULT_SMARTGPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/e02d1582-538a-11ee-9d7b-02420a000194/smartgpt%201.png.png" +DEFAULT_SMARTGPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3d71b434-9457-11ee-8edd-02420a0001c7/Smart%20GPT.jpg.png" class SmartGPTPage(BasePage): diff --git a/recipes/SocialLookupEmail.py b/recipes/SocialLookupEmail.py index 23a455c03..3c45a4ea2 100644 --- a/recipes/SocialLookupEmail.py +++ b/recipes/SocialLookupEmail.py @@ -14,7 +14,7 @@ from daras_ai_v2.redis_cache import redis_cache_decorator email_regex = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b" -DEFAULT_SOCIAL_LOOKUP_EMAIL_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/email%20ver%202.png" +DEFAULT_SOCIAL_LOOKUP_EMAIL_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/6729ea44-9457-11ee-bd77-02420a0001ce/Profile%20look%20up%20gpt%20email.jpg.png" class SocialLookupEmailPage(BasePage): diff --git a/recipes/Text2Audio.py b/recipes/Text2Audio.py index 589800d92..a959b62f3 100644 --- a/recipes/Text2Audio.py +++ b/recipes/Text2Audio.py @@ -14,7 +14,7 @@ num_outputs_setting, ) -DEFAULT_TEXT2AUDIO_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ddc6e894-538b-11ee-a837-02420a000190/text2audio1%201.png.png" +DEFAULT_TEXT2AUDIO_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85cf8ea4-9457-11ee-bd77-02420a0001ce/Text%20guided%20audio.jpg.png" class Text2AudioModels(Enum): diff --git a/recipes/TextToSpeech.py b/recipes/TextToSpeech.py index b4c3702b9..7a9a561ee 100644 --- a/recipes/TextToSpeech.py +++ b/recipes/TextToSpeech.py @@ -22,7 +22,7 @@ TextToSpeechProviders, ) -DEFAULT_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/cropped_tts_compare_meta_img.gif" +DEFAULT_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a73181ce-9457-11ee-8edd-02420a0001c7/Voice%20generators.jpg.png" class TextToSpeechPage(BasePage): diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index 29f077060..53513c73f 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -67,7 +67,7 @@ from recipes.TextToSpeech import TextToSpeechPage from url_shortener.models import ShortenedURL -DEFAULT_COPILOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/c8b24b0c-538a-11ee-a1a3-02420a00018d/meta%20tags1%201.png.png" +DEFAULT_COPILOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f454d64a-9457-11ee-b6d5-02420a0001cb/Copilot.jpg.png" # BOT_SCRIPT_RE = re.compile( # # start of line diff --git a/recipes/asr.py b/recipes/asr.py index 603b31c59..96b48850f 100644 --- a/recipes/asr.py +++ b/recipes/asr.py @@ -25,7 +25,7 @@ from daras_ai_v2.text_output_widget import text_outputs from recipes.DocSearch import render_documents -DEFAULT_ASR_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3b98d906-538b-11ee-9c77-02420a000193/Speech1%201.png.png" +DEFAULT_ASR_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1916825c-93fa-11ee-97be-02420a0001c8/Speech.jpg.png" class AsrPage(BasePage): From b2b080b3a8e8e9d10978160798afa741ce311416 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Sun, 26 Nov 2023 23:20:04 -0800 Subject: [PATCH 110/138] implemented --- daras_ai_v2/asr.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index 65ed40ddc..78d75a8c9 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -49,6 +49,7 @@ class AsrModels(Enum): whisper_large_v2 = "Whisper Large v2 (openai)" + whisper_large_v3 = "Whisper Large v3 (replicate)" whisper_hindi_large_v2 = "Whisper Hindi Large v2 (Bhashini)" whisper_telugu_large_v2 = "Whisper Telugu Large v2 (Bhashini)" nemo_english = "Conformer English (ai4bharat.org)" @@ -66,6 +67,7 @@ def supports_auto_detect(self) -> bool: asr_model_ids = { + AsrModels.whisper_large_v3: "vaibhavs10/incredibly-fast-whisper:37dfc0d6a7eb43ff84e230f74a24dab84e6bb7756c9b457dbdcceca3de7a4a04", AsrModels.whisper_large_v2: "openai/whisper-large-v2", AsrModels.whisper_hindi_large_v2: "vasista22/whisper-hindi-large-v2", AsrModels.whisper_telugu_large_v2: "vasista22/whisper-telugu-large-v2", @@ -84,6 +86,7 @@ def supports_auto_detect(self) -> bool: } asr_supported_languages = { + AsrModels.whisper_large_v3: WHISPER_SUPPORTED, AsrModels.whisper_large_v2: WHISPER_SUPPORTED, AsrModels.usm: CHIRP_SUPPORTED, AsrModels.deepgram: DEEPGRAM_SUPPORTED, @@ -358,6 +361,39 @@ def run_asr( if selected_model == AsrModels.azure: return azure_asr(audio_url, language) + elif selected_model == AsrModels.whisper_large_v3: + import replicate + from tempfile import NamedTemporaryFile + + with NamedTemporaryFile(suffix=".wav") as f: + subprocess.run( + [ + "ffmpeg", + "-y", + "-i", + audio_url, + "-vn", + "-ar", + "16000", + "-ac", + "1", + "-f", + "wav", + f.name, + ], + check=True, + ) + config = { + "audio": open(f.name, "rb"), + "return_timestamps": output_format != AsrOutputFormat.text, + } + if language: + config["language"] = language + output = replicate.run( + asr_model_ids[AsrModels.whisper_large_v3], + input=config, + ) + return output["text"] if output_format == AsrOutputFormat.text else output elif selected_model == AsrModels.deepgram: r = requests.post( "https://api.deepgram.com/v1/listen", From a3bee03b502b94a335f19a5644bfadf393664760 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Mon, 27 Nov 2023 08:56:50 -0800 Subject: [PATCH 111/138] rename replicate to openai --- daras_ai_v2/asr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index 78d75a8c9..70298b16c 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -49,7 +49,7 @@ class AsrModels(Enum): whisper_large_v2 = "Whisper Large v2 (openai)" - whisper_large_v3 = "Whisper Large v3 (replicate)" + whisper_large_v3 = "Whisper Large v3 (openai)" whisper_hindi_large_v2 = "Whisper Hindi Large v2 (Bhashini)" whisper_telugu_large_v2 = "Whisper Telugu Large v2 (Bhashini)" nemo_english = "Conformer English (ai4bharat.org)" From d360f814e1c1d1bf361c80ead314a2d4827a268f Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Wed, 13 Dec 2023 21:54:15 +0530 Subject: [PATCH 112/138] remove redundant ffmpeg calls --- daras_ai_v2/asr.py | 48 ++++++++++++++-------------------------------- 1 file changed, 14 insertions(+), 34 deletions(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index 70298b16c..ae5f22122 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -3,6 +3,7 @@ import subprocess import tempfile from enum import Enum +from time import sleep import langcodes import requests @@ -12,17 +13,16 @@ import gooey_ui as st from daras_ai.image_input import upload_file_from_bytes, gs_url_to_uri +from daras_ai_v2 import settings +from daras_ai_v2.functional import map_parallel from daras_ai_v2.gdrive_downloader import ( is_gdrive_url, gdrive_download, gdrive_metadata, url_to_gdrive_file_id, ) -from daras_ai_v2 import settings -from daras_ai_v2.functional import map_parallel from daras_ai_v2.gpu_server import call_celery_task from daras_ai_v2.redis_cache import redis_cache_decorator -from time import sleep SHORT_FILE_CUTOFF = 5 * 1024 * 1024 # 1 MB @@ -363,37 +363,17 @@ def run_asr( return azure_asr(audio_url, language) elif selected_model == AsrModels.whisper_large_v3: import replicate - from tempfile import NamedTemporaryFile - - with NamedTemporaryFile(suffix=".wav") as f: - subprocess.run( - [ - "ffmpeg", - "-y", - "-i", - audio_url, - "-vn", - "-ar", - "16000", - "-ac", - "1", - "-f", - "wav", - f.name, - ], - check=True, - ) - config = { - "audio": open(f.name, "rb"), - "return_timestamps": output_format != AsrOutputFormat.text, - } - if language: - config["language"] = language - output = replicate.run( - asr_model_ids[AsrModels.whisper_large_v3], - input=config, - ) - return output["text"] if output_format == AsrOutputFormat.text else output + + config = { + "audio": audio_url, + "return_timestamps": output_format != AsrOutputFormat.text, + } + if language: + config["language"] = language + data = replicate.run( + asr_model_ids[AsrModels.whisper_large_v3], + input=config, + ) elif selected_model == AsrModels.deepgram: r = requests.post( "https://api.deepgram.com/v1/listen", From 0b56bcb68f36e3eba9b0e6651fcdbd52bb8a10ae Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Wed, 13 Dec 2023 03:39:20 +0530 Subject: [PATCH 113/138] Escape HTML in gooey-ui markdown and write components --- daras_ai_v2/api_examples_widget.py | 35 ++++++++++++---------- daras_ai_v2/base.py | 5 ++-- daras_ai_v2/doc_search_settings_widgets.py | 13 ++++---- gooey_ui/components.py | 5 +++- recipes/RelatedQnADoc.py | 2 +- 5 files changed, 34 insertions(+), 26 deletions(-) diff --git a/daras_ai_v2/api_examples_widget.py b/daras_ai_v2/api_examples_widget.py index da9a25a5e..8f5e1cf7e 100644 --- a/daras_ai_v2/api_examples_widget.py +++ b/daras_ai_v2/api_examples_widget.py @@ -93,19 +93,20 @@ def api_example_generator( """ 1. Generate an api key [below👇](#api-keys) -2. Install [curl](https://everything.curl.dev/get) & add the `GOOEY_API_KEY` to your environment variables. -Never store the api key [in your code](https://12factor.net/config). +2. Install [curl](https://everything.curl.dev/get) & add the `GOOEY_API_KEY` to your environment variables. +Never store the api key [in your code](https://12factor.net/config). ```bash export GOOEY_API_KEY=sk-xxxx ``` -3. Run the following `curl` command in your terminal. +3. Run the following `curl` command in your terminal. If you encounter any issues, write to us at support@gooey.ai and make sure to include the full curl command and the error message. ```bash %s ``` """ - % curl_code.strip() + % curl_code.strip(), + unsafe_allow_html=True, ) with python: @@ -157,8 +158,8 @@ def api_example_generator( ) if as_async: py_code += r""" -from time import sleep - +from time import sleep + status_url = response.headers["Location"] while True: response = requests.get(status_url, headers={"Authorization": "%(auth_keyword)s " + os.environ["GOOEY_API_KEY"]}) @@ -188,20 +189,21 @@ def api_example_generator( rf""" 1. Generate an api key [below👇](#api-keys) -2. Install [requests](https://requests.readthedocs.io/en/latest/) & add the `GOOEY_API_KEY` to your environment variables. -Never store the api key [in your code](https://12factor.net/config). +2. Install [requests](https://requests.readthedocs.io/en/latest/) & add the `GOOEY_API_KEY` to your environment variables. +Never store the api key [in your code](https://12factor.net/config). ```bash $ python3 -m pip install requests $ export GOOEY_API_KEY=sk-xxxx ``` - -3. Use this sample code to call the API. + +3. Use this sample code to call the API. If you encounter any issues, write to us at support@gooey.ai and make sure to include the full code snippet and the error message. ```python %s ``` """ - % py_code + % py_code, + unsafe_allow_html=True, ) with js: @@ -276,7 +278,7 @@ def api_example_generator( if (!response.ok) { throw new Error(response.status); } - + const result = await response.json(); if (result.status === "completed") { console.log(response.status, result); @@ -302,18 +304,19 @@ def api_example_generator( r""" 1. Generate an api key [below👇](#api-keys) -2. Install [node-fetch](https://www.npmjs.com/package/node-fetch) & add the `GOOEY_API_KEY` to your environment variables. -Never store the api key [in your code](https://12factor.net/config) and don't use direcly in the browser. +2. Install [node-fetch](https://www.npmjs.com/package/node-fetch) & add the `GOOEY_API_KEY` to your environment variables. +Never store the api key [in your code](https://12factor.net/config) and don't use direcly in the browser. ```bash $ npm install node-fetch $ export GOOEY_API_KEY=sk-xxxx ``` -3. Use this sample code to call the API. +3. Use this sample code to call the API. If you encounter any issues, write to us at support@gooey.ai and make sure to include the full code snippet and the error message. ```js %s ``` """ - % js_code + % js_code, + unsafe_allow_html=True, ) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index ea15d9c03..483ff6728 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -328,7 +328,7 @@ def _render(page_cls): ) with st.link(to=page.app_url()): - st.markdown( + st.html( # language=html f"""
@@ -1177,7 +1177,8 @@ def run_as_api_tab(self): ) st.markdown( - f'📖 To learn more, take a look at our complete API' + f'📖 To learn more, take a look at our complete API', + unsafe_allow_html=True, ) st.write("#### 📤 Example Request") diff --git a/daras_ai_v2/doc_search_settings_widgets.py b/daras_ai_v2/doc_search_settings_widgets.py index 06be1c12d..84a4ae1a0 100644 --- a/daras_ai_v2/doc_search_settings_widgets.py +++ b/daras_ai_v2/doc_search_settings_widgets.py @@ -98,8 +98,8 @@ def doc_search_settings( st.text_area( """ ###### 👁‍🗨 Summarization Instructions -Prompt to transform the conversation history into a vector search query. -These instructions run before the workflow performs a search of the knowledge base documents and should summarize the conversation into a VectorDB query most relevant to the user's last message. In general, you shouldn't need to adjust these instructions. +Prompt to transform the conversation history into a vector search query. +These instructions run before the workflow performs a search of the knowledge base documents and should summarize the conversation into a VectorDB query most relevant to the user's last message. In general, you shouldn't need to adjust these instructions. """, key="query_instructions", height=300, @@ -107,7 +107,7 @@ def doc_search_settings( if keyword_instructions_allowed: st.text_area( """ -###### 🔑 Keyword Extraction +###### 🔑 Keyword Extraction """, key="keyword_instructions", height=300, @@ -135,7 +135,7 @@ def doc_search_settings( label=""" ###### Max Snippet Words -After a document search, relevant snippets of your documents are returned as results. This setting adjusts the maximum number of words in each snippet. A high snippet size allows the LLM to access more information from your document results, at the cost of being verbose and potentially exhausting input tokens (which can cause a failure of the copilot to respond). Default: 300 +After a document search, relevant snippets of your documents are returned as results. This setting adjusts the maximum number of words in each snippet. A high snippet size allows the LLM to access more information from your document results, at the cost of being verbose and potentially exhausting input tokens (which can cause a failure of the copilot to respond). Default: 300 """, key="max_context_words", min_value=10, @@ -160,8 +160,9 @@ def doc_search_settings( st.write( """ ##### 🎤 Knowledge Base Speech Recognition - If your knowledge base documents contain audio or video files, we'll transcribe and optionally translate them to English, given we've found most vectorDBs and LLMs perform best in English (even if their final answers are translated into another language). - """ + If your knowledge base documents contain audio or video files, we'll transcribe and optionally translate them to English, given we've found most vectorDBs and LLMs perform best in English (even if their final answers are translated into another language). + """, + unsafe_allow_html=True, ) enum_selector( diff --git a/gooey_ui/components.py b/gooey_ui/components.py index a63b0826b..8692cd8c3 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -1,4 +1,5 @@ import base64 +import html as html_lib import math import textwrap import typing @@ -82,9 +83,11 @@ def write(*objs: typing.Any, unsafe_allow_html=False, **props): ) -def markdown(body: str, *, unsafe_allow_html=False, **props): +def markdown(body: str | None, *, unsafe_allow_html=False, **props): if body is None: return _node("markdown", body="", **props) + if not unsafe_allow_html: + body = html_lib.escape(body) props["className"] = ( props.get("className", "") + " gui-html-container gui-md-container" ) diff --git a/recipes/RelatedQnADoc.py b/recipes/RelatedQnADoc.py index 93a68b963..fc0542caf 100644 --- a/recipes/RelatedQnADoc.py +++ b/recipes/RelatedQnADoc.py @@ -155,4 +155,4 @@ def render_qna_outputs(state, height, show_count=None): {"output_text": output_text, "references": references}, height ) render_sources_widget(references) - st.write("
") + st.html("
") From 6bc93b3e9b11cacf3297e735232a3b0bf69bdaa8 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Thu, 14 Dec 2023 15:09:49 +0530 Subject: [PATCH 114/138] Fix remaining unsafe_allow_html conditions --- daras_ai_v2/base.py | 4 ++-- daras_ai_v2/manage_api_keys_widget.py | 12 ++++++------ daras_ai_v2/text_to_speech_settings_widgets.py | 3 +-- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 483ff6728..de9a656ba 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -761,7 +761,7 @@ def _render_completed_output(self): def _render_failed_output(self): err_msg = st.session_state.get(StateKeys.error_msg) - st.error(err_msg) + st.error(err_msg, unsafe_allow_html=True) def _render_running_output(self): run_status = st.session_state.get(StateKeys.run_status) @@ -1070,7 +1070,7 @@ def _render(sr: SavedRun): if sr.run_status: html_spinner(sr.run_status) elif sr.error_msg: - st.error(sr.error_msg) + st.error(sr.error_msg, unsafe_allow_html=True) grid_layout(3, run_history, _render) diff --git a/daras_ai_v2/manage_api_keys_widget.py b/daras_ai_v2/manage_api_keys_widget.py index 1c7dc71e8..9c6664def 100644 --- a/daras_ai_v2/manage_api_keys_widget.py +++ b/daras_ai_v2/manage_api_keys_widget.py @@ -19,12 +19,12 @@ def manage_api_keys(user: AppUser): st.write( """ -Your secret API keys are listed below. +Your secret API keys are listed below. Please note that we do not display your secret API keys again after you generate them. -Do not share your API key with others, or expose it in the browser or other client-side code. +Do not share your API key with others, or expose it in the browser or other client-side code. -In order to protect the security of your account, +In order to protect the security of your account, Gooey.AI may also automatically rotate any API key that we've found has leaked publicly. """ ) @@ -74,10 +74,10 @@ def _generate_new_key_doc() -> dict: st.success( f""" -
API key generated
+##### API key generated -Please save this secret key somewhere safe and accessible. -For security reasons, **you won't be able to view it again** through your account. +Please save this secret key somewhere safe and accessible. +For security reasons, **you won't be able to view it again** through your account. If you lose this secret key, you'll need to generate a new one. """ ) diff --git a/daras_ai_v2/text_to_speech_settings_widgets.py b/daras_ai_v2/text_to_speech_settings_widgets.py index 0526c441f..ce5da2141 100644 --- a/daras_ai_v2/text_to_speech_settings_widgets.py +++ b/daras_ai_v2/text_to_speech_settings_widgets.py @@ -289,8 +289,7 @@ def text_to_speech_settings(page): ): st.caption( """ - Note: Please purchase Gooey.AI credits to use ElevenLabs voices - here.
+ Note: Please purchase Gooey.AI credits to use ElevenLabs voices [here](/account). Alternatively, you can use your own ElevenLabs API key by selecting the checkbox above. """ ) From 245033d30c124bcd86b37521259ae8b7b445255e Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Mon, 4 Dec 2023 09:50:14 -0800 Subject: [PATCH 115/138] Changed image links --- explore.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/explore.py b/explore.py index 22d8fcc63..091eeda4d 100644 --- a/explore.py +++ b/explore.py @@ -19,7 +19,8 @@ def _render_non_featured(page_cls): col1, col2 = gui.columns([1, 2]) with col1: - render_image(page, state) + with gui.link(to=page.app_url()): + render_image(page, state) with col2: # render_description(page, state, total_runs) @@ -29,15 +30,15 @@ def _render_as_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() # total_runs = page.get_total_runs() - render_image(page, state) + with gui.link(to=page.app_url()): + render_image(page, state) # render_description(page, state, total_runs) render_description(page, state) def render_image(page, state): gui.image( page.get_recipe_image(state), - href=page.app_url(), - style={"border-radius": 5}, + style={"border-radius": 5, "pointer-events": "none"}, ) def render_description(page, state): From ee988791395241ea93c20a1771ca2be1eefbd6d6 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Thu, 7 Dec 2023 00:08:01 -0800 Subject: [PATCH 116/138] Adding margin on the bottom on workflows --- explore.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/explore.py b/explore.py index 091eeda4d..30103ef96 100644 --- a/explore.py +++ b/explore.py @@ -46,7 +46,7 @@ def render_description(page, state): gui.markdown(f"#### {page.get_recipe_title(state)}") preview = page.preview_description(state) if preview: - with gui.tag("p", style={"margin-bottom": "2px"}): + with gui.tag("p", style={"margin-bottom": "25px"}): gui.html( truncate_text_words(preview, 150), ) From 84ed3d6710e4003131981d8b7a0ee07306f20170 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Thu, 7 Dec 2023 09:01:36 -0800 Subject: [PATCH 117/138] Changed email profile workflow image --- recipes/EmailFaceInpainting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes/EmailFaceInpainting.py b/recipes/EmailFaceInpainting.py index 8550cb46b..4ccb0bbf8 100644 --- a/recipes/EmailFaceInpainting.py +++ b/recipes/EmailFaceInpainting.py @@ -20,7 +20,7 @@ class EmailFaceInpaintingPage(FaceInpaintingPage): title = "AI Generated Photo from Email Profile Lookup" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/2affefa6-88da-11ee-aa86-02420a000165/AI%20generated%20photo%20with%20email%20profile%20lookup.png.png" + image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ec0df5aa-9521-11ee-93d3-02420a0001e5/Email%20Profile%20Lookup.png.png" workflow = Workflow.EMAIL_FACE_INPAINTING slug_versions = ["EmailFaceInpainting", "ai-image-from-email-lookup"] From c9435e24590193b6d9e336171689ba27f949b02d Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Thu, 7 Dec 2023 09:04:31 -0800 Subject: [PATCH 118/138] Email face inpainting meta image --- recipes/EmailFaceInpainting.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/recipes/EmailFaceInpainting.py b/recipes/EmailFaceInpainting.py index 4ccb0bbf8..5e492974c 100644 --- a/recipes/EmailFaceInpainting.py +++ b/recipes/EmailFaceInpainting.py @@ -17,6 +17,8 @@ email_regex = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b" twitter_handle_regex = r"(@)?[A-Za-z0-9_]{1,15}" +DEFAULT_EMAIL_FACE_INPAINTING_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/6937427a-9522-11ee-b6d3-02420a0001ea/Email%20photo.jpg.png" + class EmailFaceInpaintingPage(FaceInpaintingPage): title = "AI Generated Photo from Email Profile Lookup" @@ -85,6 +87,9 @@ class ResponseModel(BaseModel): output_images: list[str] email_sent: bool = False + def preview_image(self, state: dict) -> str | None: + return DEFAULT_EMAIL_FACE_INPAINTING_META_IMG + def preview_description(self, state: dict) -> str: return "Find an email's public photo and then draw the face into an AI generated scene using your own prompt + the latest Stable Diffusion or DallE image generator." From 20d7b013470ce4aedb3b4984c751690d3e187cec Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Sat, 9 Dec 2023 21:08:10 -0800 Subject: [PATCH 119/138] Added unique users column --- pages/UsageDashboard.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pages/UsageDashboard.py b/pages/UsageDashboard.py index 5d023e887..e3c51c012 100644 --- a/pages/UsageDashboard.py +++ b/pages/UsageDashboard.py @@ -172,14 +172,24 @@ def main(): ) total_runs = ( - counts_df.sum(numeric_only=True) + counts_df.drop(columns=["display_name", "email"]) + .sum() .rename("Total Runs") .to_frame() .reset_index(names=["label"]) - .sort_values("Total Runs", ascending=False) .reset_index(drop=True) ) + total_runs["Unique Users"] = ( + counts_df.drop(columns=["display_name", "email"]) + .astype(bool) + .sum(numeric_only=True) + .rename("Unique Users") + .to_frame() + .reset_index(drop=True) + ) + total_runs.sort_values("Total Runs", ascending=False, inplace=True) + col1, col2 = st.columns(2) with col1: From 0656162abf3eb7917bc6663271490ee46b097592 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Sat, 9 Dec 2023 23:53:17 -0800 Subject: [PATCH 120/138] Added pie chart --- pages/UsageDashboard.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/pages/UsageDashboard.py b/pages/UsageDashboard.py index e3c51c012..bda123f23 100644 --- a/pages/UsageDashboard.py +++ b/pages/UsageDashboard.py @@ -172,23 +172,39 @@ def main(): ) total_runs = ( - counts_df.drop(columns=["display_name", "email"]) - .sum() + counts_df.sum(numeric_only=True) .rename("Total Runs") .to_frame() .reset_index(names=["label"]) + .sort_values("Total Runs", ascending=False) .reset_index(drop=True) ) - total_runs["Unique Users"] = ( + col1, col2 = st.columns(2) + + with col1: + st.write(total_runs) + + with col2: + st.plotly_chart( + px.pie( + total_runs.iloc[2:], + values="Total Runs", + names="label", + ), + use_container_width=True, + ) + + total_runs = ( counts_df.drop(columns=["display_name", "email"]) .astype(bool) .sum(numeric_only=True) .rename("Unique Users") .to_frame() + .reset_index(names=["label"]) + .sort_values("Unique Users", ascending=False) .reset_index(drop=True) ) - total_runs.sort_values("Total Runs", ascending=False, inplace=True) col1, col2 = st.columns(2) @@ -199,7 +215,7 @@ def main(): st.plotly_chart( px.pie( total_runs.iloc[2:], - values="Total Runs", + values="Unique Users", names="label", ), use_container_width=True, From 4d47983cbb4b90f103da94a5bab8d029cee238b9 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Mon, 11 Dec 2023 08:49:28 -0800 Subject: [PATCH 121/138] Checkbox toggle --- pages/UsageDashboard.py | 56 ++++++++++++++++------------------------- 1 file changed, 22 insertions(+), 34 deletions(-) diff --git a/pages/UsageDashboard.py b/pages/UsageDashboard.py index bda123f23..b4faf6a5d 100644 --- a/pages/UsageDashboard.py +++ b/pages/UsageDashboard.py @@ -171,40 +171,28 @@ def main(): """ ) - total_runs = ( - counts_df.sum(numeric_only=True) - .rename("Total Runs") - .to_frame() - .reset_index(names=["label"]) - .sort_values("Total Runs", ascending=False) - .reset_index(drop=True) - ) - - col1, col2 = st.columns(2) - - with col1: - st.write(total_runs) - - with col2: - st.plotly_chart( - px.pie( - total_runs.iloc[2:], - values="Total Runs", - names="label", - ), - use_container_width=True, + if st.checkbox("Show Uniques"): + calc = "Unique Users" + total_runs = ( + counts_df.drop(columns=["display_name", "email"]) + .astype(bool) + .sum(numeric_only=True) + .rename(calc) + .to_frame() + .reset_index(names=["label"]) + .sort_values(calc, ascending=False) + .reset_index(drop=True) + ) + else: + calc = "Total Runs" + total_runs = ( + counts_df.sum(numeric_only=True) + .rename(calc) + .to_frame() + .reset_index(names=["label"]) + .sort_values(calc, ascending=False) + .reset_index(drop=True) ) - - total_runs = ( - counts_df.drop(columns=["display_name", "email"]) - .astype(bool) - .sum(numeric_only=True) - .rename("Unique Users") - .to_frame() - .reset_index(names=["label"]) - .sort_values("Unique Users", ascending=False) - .reset_index(drop=True) - ) col1, col2 = st.columns(2) @@ -215,7 +203,7 @@ def main(): st.plotly_chart( px.pie( total_runs.iloc[2:], - values="Unique Users", + values=calc, names="label", ), use_container_width=True, From 94c99a84716f45afbabdd029dbab3d198ca7bcc8 Mon Sep 17 00:00:00 2001 From: clr-li <111320104+clr-li@users.noreply.github.com> Date: Wed, 13 Dec 2023 08:44:11 -0800 Subject: [PATCH 122/138] Explore page fixes --- explore.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/explore.py b/explore.py index 30103ef96..471e6181b 100644 --- a/explore.py +++ b/explore.py @@ -19,8 +19,7 @@ def _render_non_featured(page_cls): col1, col2 = gui.columns([1, 2]) with col1: - with gui.link(to=page.app_url()): - render_image(page, state) + render_image(page, state) with col2: # render_description(page, state, total_runs) @@ -30,15 +29,15 @@ def _render_as_featured(page_cls): page = page_cls() state = page.recipe_doc_sr().to_dict() # total_runs = page.get_total_runs() - with gui.link(to=page.app_url()): - render_image(page, state) + render_image(page, state) # render_description(page, state, total_runs) render_description(page, state) def render_image(page, state): gui.image( page.get_recipe_image(state), - style={"border-radius": 5, "pointer-events": "none"}, + href=page.app_url(), + style={"border-radius": 5}, ) def render_description(page, state): From 8e7f555e671003ac1f224742b9d01acc15ff0775 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Thu, 14 Dec 2023 19:04:54 +0530 Subject: [PATCH 123/138] Redirect anonymous users to /login on trying to Save a run --- daras_ai_v2/base.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 365688f06..9012f06dc 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -209,7 +209,6 @@ def render(self): is_current_user_creator = ( self.request and self.request.user - and not self.request.user.is_anonymous and current_run.get_creator() == self.request.user ) has_unpublished_changes = ( @@ -347,7 +346,18 @@ def _render_published_run_buttons( ) publish_modal = Modal("", key="publish-modal") if save_button: - publish_modal.open() + if self.request.user.is_anonymous: + redirect_url = furl( + "/login", + query_params={ + "next": furl(self.request.url).set(origin=None) + }, + ) + # TODO: investigate why RedirectException does not work here + force_redirect(redirect_url) + return + else: + publish_modal.open() if publish_modal.is_open(): with publish_modal.container( From 9fcbccb3afaa699fdc913c1da0fdb68c5fd87354 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Thu, 14 Dec 2023 20:31:51 +0530 Subject: [PATCH 124/138] Only show saved tab to non anonymous users --- daras_ai_v2/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 9012f06dc..aee17d39d 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -692,6 +692,7 @@ def get_tabs(self): tabs = [MenuTabs.run, MenuTabs.examples, MenuTabs.run_as_api] if self.request.user: tabs.extend([MenuTabs.history]) + if self.request.user and not self.request.user.is_anonymous: tabs.extend([MenuTabs.saved]) return tabs From e5ba772165d6100b22fb7877594f6268d5a17e84 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 14 Dec 2023 21:17:27 +0530 Subject: [PATCH 125/138] add bulk eval metadata rename image -> explore_image render resized images on explore page --- daras_ai_v2/base.py | 9 +++++---- daras_ai_v2/meta_preview_url.py | 12 ++++++++---- explore.py | 11 +++++++---- recipes/BulkEval.py | 22 +++++++++++++++++++++- recipes/BulkRunner.py | 2 +- recipes/ChyronPlant.py | 2 +- recipes/CompareLLM.py | 2 +- recipes/CompareText2Img.py | 2 +- recipes/CompareUpscaler.py | 2 +- recipes/DeforumSD.py | 2 +- recipes/DocExtract.py | 2 +- recipes/DocSearch.py | 2 +- recipes/DocSummary.py | 2 +- recipes/EmailFaceInpainting.py | 2 +- recipes/FaceInpainting.py | 2 +- recipes/GoogleGPT.py | 2 +- recipes/GoogleImageGen.py | 2 +- recipes/ImageSegmentation.py | 2 +- recipes/Img2Img.py | 2 +- recipes/LetterWriter.py | 2 +- recipes/Lipsync.py | 2 +- recipes/LipsyncTTS.py | 2 +- recipes/ObjectInpainting.py | 2 +- recipes/QRCodeGenerator.py | 2 +- recipes/RelatedQnA.py | 2 +- recipes/RelatedQnADoc.py | 2 +- recipes/SEOSummary.py | 2 +- recipes/SmartGPT.py | 2 +- recipes/SocialLookupEmail.py | 2 +- recipes/Text2Audio.py | 2 +- recipes/TextToSpeech.py | 2 +- recipes/VideoBots.py | 2 +- recipes/asr.py | 2 +- recipes/embeddings_page.py | 2 +- 34 files changed, 71 insertions(+), 43 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index de9a656ba..a506509d6 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -96,12 +96,13 @@ class StateKeys: class BasePage: title: str - image: str = None workflow: Workflow slug_versions: list[str] sane_defaults: dict = {} + explore_image: str = None + RequestModel: typing.Type[BaseModel] ResponseModel: typing.Type[BaseModel] @@ -264,8 +265,8 @@ def _render_page_title_with_breadcrumbs( def get_recipe_title(self, state: dict) -> str: return state.get(StateKeys.page_title) or self.title or "" - def get_recipe_image(self, state: dict) -> str: - return self.image or "" + def get_explore_image(self, state: dict) -> str: + return self.explore_image or "" def _user_disabled_check(self): if self.run_user and self.run_user.is_disabled: @@ -324,7 +325,7 @@ def _render(page_cls): page = page_cls() state = page_cls().recipe_doc_sr().to_dict() preview_image = meta_preview_url( - page.get_recipe_image(state), page.fallback_preivew_image() + page.get_explore_image(state), page.fallback_preivew_image() ) with st.link(to=page.app_url()): diff --git a/daras_ai_v2/meta_preview_url.py b/daras_ai_v2/meta_preview_url.py index 3d02e0c62..369900c90 100644 --- a/daras_ai_v2/meta_preview_url.py +++ b/daras_ai_v2/meta_preview_url.py @@ -1,12 +1,17 @@ import mimetypes import os -from time import time +import typing -import requests from furl import furl -def meta_preview_url(file_url: str | None, fallback_img: str | None) -> str | None: +def meta_preview_url( + file_url: str | None, + fallback_img: str | None, + size: typing.Literal[ + "400x400", "1170x1560", "40x40", "72x72", "80x80", "96x96" + ] = "400x400", +) -> str | None: if not file_url: return fallback_img @@ -22,7 +27,6 @@ def meta_preview_url(file_url: str | None, fallback_img: str | None) -> str | No file_url = fallback_img elif content_type in ["image/png", "image/jpeg", "image/tiff", "image/webp"]: # sizes: 400x400,1170x1560,40x40,72x72,80x80,96x96 - size = "400x400" f.path.segments = dir_segments + ["thumbs", f"{base}_{size}{ext}"] new_url = str(f) diff --git a/explore.py b/explore.py index 471e6181b..f3950bdbb 100644 --- a/explore.py +++ b/explore.py @@ -1,8 +1,11 @@ +import typing + import gooey_ui as gui from daras_ai.image_input import truncate_text_words from daras_ai_v2.all_pages import all_home_pages_by_category +from daras_ai_v2.base import BasePage from daras_ai_v2.grid_layout_widget import grid_layout - +from daras_ai_v2.meta_preview_url import meta_preview_url META_TITLE = "Explore AI workflows" META_DESCRIPTION = "Find, fork and run your field’s favorite AI recipes on Gooey.AI" @@ -25,7 +28,7 @@ def _render_non_featured(page_cls): # render_description(page, state, total_runs) render_description(page, state) - def _render_as_featured(page_cls): + def _render_as_featured(page_cls: typing.Type[BasePage]): page = page_cls() state = page.recipe_doc_sr().to_dict() # total_runs = page.get_total_runs() @@ -33,9 +36,9 @@ def _render_as_featured(page_cls): # render_description(page, state, total_runs) render_description(page, state) - def render_image(page, state): + def render_image(page: BasePage, state: dict): gui.image( - page.get_recipe_image(state), + meta_preview_url(page.get_explore_image(state), page.preview_image(state)), href=page.app_url(), style={"border-radius": 5}, ) diff --git a/recipes/BulkEval.py b/recipes/BulkEval.py index 83c3172ad..d178fa57c 100644 --- a/recipes/BulkEval.py +++ b/recipes/BulkEval.py @@ -126,10 +126,30 @@ def _render_results(results: list[AggFunctionResult]): class BulkEvalPage(BasePage): - title = "Bulk Evaluator" + title = "Evaluator" workflow = Workflow.BULK_EVAL slug_versions = ["bulk-eval", "eval"] + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aad314f0-9a97-11ee-8318-02420a0001c7/W.I.9.png.png" + + def preview_image(self, state: dict) -> str | None: + return "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/9631fb74-9a97-11ee-971f-02420a0001c4/evaluator.png.png" + + def render_description(self): + st.write( + """ +Summarize and score every row of any CSV, google sheet or excel with GPT4 (or any LLM you choose). Then average every score in any column to generate automated evaluations. + """ + ) + + def related_workflows(self) -> list: + from recipes.BulkRunner import BulkRunnerPage + from recipes.VideoBots import VideoBotsPage + from recipes.asr import AsrPage + from recipes.DocSearch import DocSearchPage + + return [BulkRunnerPage, VideoBotsPage, AsrPage, DocSearchPage] + class RequestModel(LLMSettingsMixin, BaseModel): documents: list[str] = Field( title="Input Data Spreadsheet", diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index ea3c2d9fe..8af80eca0 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -25,7 +25,7 @@ class BulkRunnerPage(BasePage): title = "Bulk Runner" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/87f35df4-88d7-11ee-aac9-02420a00016b/Bulk%20Runner.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/87f35df4-88d7-11ee-aac9-02420a00016b/Bulk%20Runner.png.png" workflow = Workflow.BULK_RUNNER slug_versions = ["bulk-runner", "bulk"] price = 1 diff --git a/recipes/ChyronPlant.py b/recipes/ChyronPlant.py index 4c371abf3..116a8ad6a 100644 --- a/recipes/ChyronPlant.py +++ b/recipes/ChyronPlant.py @@ -10,7 +10,7 @@ class ChyronPlantPage(BasePage): title = "Chyron Plant Bot" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.CHYRON_PLANT slug_versions = ["ChyronPlant"] diff --git a/recipes/CompareLLM.py b/recipes/CompareLLM.py index 8c60ba154..583317ddc 100644 --- a/recipes/CompareLLM.py +++ b/recipes/CompareLLM.py @@ -22,7 +22,7 @@ class CompareLLMPage(BasePage): title = "Large Language Models: GPT-3" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ae42015e-88d7-11ee-aac9-02420a00016b/Compare%20LLMs.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ae42015e-88d7-11ee-aac9-02420a00016b/Compare%20LLMs.png.png" workflow = Workflow.COMPARE_LLM slug_versions = ["CompareLLM", "llm", "compare-large-language-models"] diff --git a/recipes/CompareText2Img.py b/recipes/CompareText2Img.py index 96013554f..80fe16392 100644 --- a/recipes/CompareText2Img.py +++ b/recipes/CompareText2Img.py @@ -30,7 +30,7 @@ class CompareText2ImgPage(BasePage): title = "Compare AI Image Generators" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/d127484e-88d9-11ee-b549-02420a000167/Compare%20AI%20Image%20generators.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/d127484e-88d9-11ee-b549-02420a000167/Compare%20AI%20Image%20generators.png.png" workflow = Workflow.COMPARE_TEXT2IMG slug_versions = [ "CompareText2Img", diff --git a/recipes/CompareUpscaler.py b/recipes/CompareUpscaler.py index 9d5a2d802..84973380f 100644 --- a/recipes/CompareUpscaler.py +++ b/recipes/CompareUpscaler.py @@ -14,7 +14,7 @@ class CompareUpscalerPage(BasePage): title = "Compare AI Image Upscalers" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/64393e0c-88db-11ee-b428-02420a000168/AI%20Image%20Upscaler.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/64393e0c-88db-11ee-b428-02420a000168/AI%20Image%20Upscaler.png.png" workflow = Workflow.COMPARE_UPSCALER slug_versions = ["compare-ai-upscalers"] diff --git a/recipes/DeforumSD.py b/recipes/DeforumSD.py index 3fcf8da14..7ec57bc19 100644 --- a/recipes/DeforumSD.py +++ b/recipes/DeforumSD.py @@ -163,7 +163,7 @@ def get_last_frame(prompt_list: list) -> int: class DeforumSDPage(BasePage): title = "AI Animation Generator" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/media/users/kxmNIYAOJbfOURxHBKNCWeUSKiP2/dd88c110-88d6-11ee-9b4f-2b58bd50e819/animation.gif" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/media/users/kxmNIYAOJbfOURxHBKNCWeUSKiP2/dd88c110-88d6-11ee-9b4f-2b58bd50e819/animation.gif" workflow = Workflow.DEFORUM_SD slug_versions = ["DeforumSD", "animation-generator"] diff --git a/recipes/DocExtract.py b/recipes/DocExtract.py index 9cb744529..fc2b8b3c8 100644 --- a/recipes/DocExtract.py +++ b/recipes/DocExtract.py @@ -56,7 +56,7 @@ class Columns(IntegerChoices): class DocExtractPage(BasePage): title = "Youtube Transcripts + GPT extraction to Google Sheets" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.DOC_EXTRACT slug_versions = [ "doc-extract", diff --git a/recipes/DocSearch.py b/recipes/DocSearch.py index 0f804bbc7..2a509d8fd 100644 --- a/recipes/DocSearch.py +++ b/recipes/DocSearch.py @@ -38,7 +38,7 @@ class DocSearchPage(BasePage): title = "Search your Docs with GPT" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cbbb4dc6-88d7-11ee-bf6c-02420a000166/Search%20your%20docs%20with%20gpt.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cbbb4dc6-88d7-11ee-bf6c-02420a000166/Search%20your%20docs%20with%20gpt.png.png" workflow = Workflow.DOC_SEARCH slug_versions = ["doc-search"] diff --git a/recipes/DocSummary.py b/recipes/DocSummary.py index b45fea097..4b9283cde 100644 --- a/recipes/DocSummary.py +++ b/recipes/DocSummary.py @@ -38,7 +38,7 @@ class CombineDocumentsChains(Enum): class DocSummaryPage(BasePage): title = "Summarize your Docs with GPT" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1f858a7a-88d8-11ee-a658-02420a000163/Summarize%20your%20docs%20with%20gpt.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1f858a7a-88d8-11ee-a658-02420a000163/Summarize%20your%20docs%20with%20gpt.png.png" workflow = Workflow.DOC_SUMMARY slug_versions = ["doc-summary"] diff --git a/recipes/EmailFaceInpainting.py b/recipes/EmailFaceInpainting.py index 5e492974c..86428c4ae 100644 --- a/recipes/EmailFaceInpainting.py +++ b/recipes/EmailFaceInpainting.py @@ -22,7 +22,7 @@ class EmailFaceInpaintingPage(FaceInpaintingPage): title = "AI Generated Photo from Email Profile Lookup" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ec0df5aa-9521-11ee-93d3-02420a0001e5/Email%20Profile%20Lookup.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ec0df5aa-9521-11ee-93d3-02420a0001e5/Email%20Profile%20Lookup.png.png" workflow = Workflow.EMAIL_FACE_INPAINTING slug_versions = ["EmailFaceInpainting", "ai-image-from-email-lookup"] diff --git a/recipes/FaceInpainting.py b/recipes/FaceInpainting.py index 1fee77221..a9353ea07 100644 --- a/recipes/FaceInpainting.py +++ b/recipes/FaceInpainting.py @@ -28,7 +28,7 @@ class FaceInpaintingPage(BasePage): title = "AI Image with a Face" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/10c2ce06-88da-11ee-b428-02420a000168/ai%20image%20with%20a%20face.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/10c2ce06-88da-11ee-b428-02420a000168/ai%20image%20with%20a%20face.png.png" workflow = Workflow.FACE_INPAINTING slug_versions = ["FaceInpainting", "face-in-ai-generated-photo"] diff --git a/recipes/GoogleGPT.py b/recipes/GoogleGPT.py index c4cad5535..57b1078ba 100644 --- a/recipes/GoogleGPT.py +++ b/recipes/GoogleGPT.py @@ -42,7 +42,7 @@ class GoogleGPTPage(BasePage): title = "Web Search + GPT3" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/28649544-9406-11ee-bba3-02420a0001cc/Websearch%20GPT%20option%202.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/28649544-9406-11ee-bba3-02420a0001cc/Websearch%20GPT%20option%202.png.png" workflow = Workflow.GOOGLE_GPT slug_versions = ["google-gpt"] diff --git a/recipes/GoogleImageGen.py b/recipes/GoogleImageGen.py index 6033ede36..c3dab52f0 100644 --- a/recipes/GoogleImageGen.py +++ b/recipes/GoogleImageGen.py @@ -34,7 +34,7 @@ class GoogleImageGenPage(BasePage): title = "Render Image Search Results with AI" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/eb23c078-88da-11ee-aa86-02420a000165/web%20search%20render.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/eb23c078-88da-11ee-aa86-02420a000165/web%20search%20render.png.png" workflow = Workflow.GOOGLE_IMAGE_GEN slug_versions = ["GoogleImageGen", "render-images-with-ai"] diff --git a/recipes/ImageSegmentation.py b/recipes/ImageSegmentation.py index e1c313f06..543664065 100644 --- a/recipes/ImageSegmentation.py +++ b/recipes/ImageSegmentation.py @@ -33,7 +33,7 @@ class ImageSegmentationPage(BasePage): title = "AI Background Changer" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/06fc595e-88db-11ee-b428-02420a000168/AI%20Background%20Remover.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/06fc595e-88db-11ee-b428-02420a000168/AI%20Background%20Remover.png.png" workflow = Workflow.IMAGE_SEGMENTATION slug_versions = ["ImageSegmentation", "remove-image-background-with-ai"] diff --git a/recipes/Img2Img.py b/recipes/Img2Img.py index 631d2e0ed..da330639b 100644 --- a/recipes/Img2Img.py +++ b/recipes/Img2Img.py @@ -24,7 +24,7 @@ class Img2ImgPage(BasePage): title = "Edit An Image with AI prompt" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/bcc9351a-88d9-11ee-bf6c-02420a000166/Edit%20an%20image%20with%20AI%201.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/bcc9351a-88d9-11ee-bf6c-02420a000166/Edit%20an%20image%20with%20AI%201.png.png" workflow = Workflow.IMG_2_IMG slug_versions = ["Img2Img", "ai-photo-editor"] diff --git a/recipes/LetterWriter.py b/recipes/LetterWriter.py index d4cd34fce..87e0aa550 100644 --- a/recipes/LetterWriter.py +++ b/recipes/LetterWriter.py @@ -14,7 +14,7 @@ class LetterWriterPage(BasePage): title = "Letter Writer" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.LETTER_WRITER slug_versions = ["LetterWriter"] diff --git a/recipes/Lipsync.py b/recipes/Lipsync.py index cb3022361..5fe08a09b 100644 --- a/recipes/Lipsync.py +++ b/recipes/Lipsync.py @@ -20,7 +20,7 @@ class LipsyncPage(BasePage): title = "Lip Syncing" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f33e6332-88d8-11ee-89f9-02420a000169/Lipsync%20TTS.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f33e6332-88d8-11ee-89f9-02420a000169/Lipsync%20TTS.png.png" workflow = Workflow.LIPSYNC slug_versions = ["Lipsync"] diff --git a/recipes/LipsyncTTS.py b/recipes/LipsyncTTS.py index 8a1802296..dcdb7d422 100644 --- a/recipes/LipsyncTTS.py +++ b/recipes/LipsyncTTS.py @@ -14,7 +14,7 @@ class LipsyncTTSPage(LipsyncPage, TextToSpeechPage): title = "Lipsync Video with Any Text" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1acfa370-88d9-11ee-bf6c-02420a000166/Lipsync%20with%20audio%201.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1acfa370-88d9-11ee-bf6c-02420a000166/Lipsync%20with%20audio%201.png.png" workflow = Workflow.LIPSYNC_TTS slug_versions = ["LipsyncTTS", "lipsync-maker"] diff --git a/recipes/ObjectInpainting.py b/recipes/ObjectInpainting.py index 1893c667d..d84c59069 100644 --- a/recipes/ObjectInpainting.py +++ b/recipes/ObjectInpainting.py @@ -29,7 +29,7 @@ class ObjectInpaintingPage(BasePage): title = "Generate Product Photo Backgrounds" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f07b731e-88d9-11ee-a658-02420a000163/W.I.3.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f07b731e-88d9-11ee-a658-02420a000163/W.I.3.png.png" workflow = Workflow.OBJECT_INPAINTING slug_versions = ["ObjectInpainting", "product-photo-background-generator"] diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py index bdc57b70f..30e109994 100644 --- a/recipes/QRCodeGenerator.py +++ b/recipes/QRCodeGenerator.py @@ -51,7 +51,7 @@ class QrSources(Enum): class QRCodeGeneratorPage(BasePage): title = "AI Art QR Code" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/03d6538e-88d5-11ee-ad97-02420a00016c/W.I.2.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/03d6538e-88d5-11ee-ad97-02420a00016c/W.I.2.png.png" workflow = Workflow.QR_CODE slug_versions = ["art-qr-code", "qr", "qr-code"] diff --git a/recipes/RelatedQnA.py b/recipes/RelatedQnA.py index e1c9e71f7..efddba5ba 100644 --- a/recipes/RelatedQnA.py +++ b/recipes/RelatedQnA.py @@ -25,7 +25,7 @@ class RelatedGoogleGPTResponse(GoogleGPTPage.ResponseModel): class RelatedQnAPage(BasePage): title = "Generate “People Also Ask” SEO Content " - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/37b0ba22-88d6-11ee-b549-02420a000167/People%20also%20ask.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/37b0ba22-88d6-11ee-b549-02420a000167/People%20also%20ask.png.png" workflow = Workflow.RELATED_QNA_MAKER slug_versions = ["related-qna-maker"] diff --git a/recipes/RelatedQnADoc.py b/recipes/RelatedQnADoc.py index fc0542caf..7ebc87080 100644 --- a/recipes/RelatedQnADoc.py +++ b/recipes/RelatedQnADoc.py @@ -24,7 +24,7 @@ class RelatedDocSearchResponse(DocSearchPage.ResponseModel): class RelatedQnADocPage(BasePage): title = '"People Also Ask" Answers from a Doc' - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.RELATED_QNA_MAKER_DOC slug_versions = ["related-qna-maker-doc"] diff --git a/recipes/SEOSummary.py b/recipes/SEOSummary.py index 882c8827e..51b4e21db 100644 --- a/recipes/SEOSummary.py +++ b/recipes/SEOSummary.py @@ -56,7 +56,7 @@ class SEOSummaryPage(BasePage): title = "Create a perfect SEO-optimized Title & Paragraph" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85f38b42-88d6-11ee-ad97-02420a00016c/Create%20SEO%20optimized%20content%20option%202.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85f38b42-88d6-11ee-ad97-02420a00016c/Create%20SEO%20optimized%20content%20option%202.png.png" workflow = Workflow.SEO_SUMMARY slug_versions = ["SEOSummary", "seo-paragraph-generator"] diff --git a/recipes/SmartGPT.py b/recipes/SmartGPT.py index b79bdf192..554a74940 100644 --- a/recipes/SmartGPT.py +++ b/recipes/SmartGPT.py @@ -22,7 +22,7 @@ class SmartGPTPage(BasePage): title = "SmartGPT" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ffd24ad8-88d7-11ee-a658-02420a000163/SmartGPT.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ffd24ad8-88d7-11ee-a658-02420a000163/SmartGPT.png.png" workflow = Workflow.SMART_GPT slug_versions = ["SmartGPT"] price = 20 diff --git a/recipes/SocialLookupEmail.py b/recipes/SocialLookupEmail.py index 3c45a4ea2..5ad154f9f 100644 --- a/recipes/SocialLookupEmail.py +++ b/recipes/SocialLookupEmail.py @@ -19,7 +19,7 @@ class SocialLookupEmailPage(BasePage): title = "Profile Lookup + GPT3 for AI-Personalized Emails" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fbd475a-88d7-11ee-aac9-02420a00016b/personalized%20email.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fbd475a-88d7-11ee-aac9-02420a00016b/personalized%20email.png.png" workflow = Workflow.SOCIAL_LOOKUP_EMAIL slug_versions = ["SocialLookupEmail", "email-writer-with-profile-lookup"] diff --git a/recipes/Text2Audio.py b/recipes/Text2Audio.py index a959b62f3..77776ddf5 100644 --- a/recipes/Text2Audio.py +++ b/recipes/Text2Audio.py @@ -28,7 +28,7 @@ class Text2AudioModels(Enum): class Text2AudioPage(BasePage): title = "Text guided audio generator" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a4481d58-88d9-11ee-aa86-02420a000165/Text%20guided%20audio%20generator.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a4481d58-88d9-11ee-aa86-02420a000165/Text%20guided%20audio%20generator.png.png" workflow = Workflow.TEXT_2_AUDIO slug_versions = ["text2audio"] diff --git a/recipes/TextToSpeech.py b/recipes/TextToSpeech.py index 7a9a561ee..1ee2296ef 100644 --- a/recipes/TextToSpeech.py +++ b/recipes/TextToSpeech.py @@ -27,7 +27,7 @@ class TextToSpeechPage(BasePage): title = "Compare AI Voice Generators" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3621e11a-88d9-11ee-b549-02420a000167/Compare%20AI%20voice%20generators.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3621e11a-88d9-11ee-b549-02420a000167/Compare%20AI%20voice%20generators.png.png" workflow = Workflow.TEXT_TO_SPEECH slug_versions = [ "TextToSpeech", diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py index 53513c73f..348aeef07 100644 --- a/recipes/VideoBots.py +++ b/recipes/VideoBots.py @@ -97,7 +97,7 @@ class ReplyButton(typing.TypedDict): class VideoBotsPage(BasePage): title = "Copilot for your Enterprise" # "Create Interactive Video Bots" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8c014530-88d4-11ee-aac9-02420a00016b/Copilot.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8c014530-88d4-11ee-aac9-02420a00016b/Copilot.png.png" workflow = Workflow.VIDEO_BOTS slug_versions = ["video-bots", "bots", "copilot"] diff --git a/recipes/asr.py b/recipes/asr.py index 96b48850f..a297bf9c3 100644 --- a/recipes/asr.py +++ b/recipes/asr.py @@ -30,7 +30,7 @@ class AsrPage(BasePage): title = "Speech Recognition & Translation" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fb7e5f6-88d9-11ee-aa86-02420a000165/Speech.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fb7e5f6-88d9-11ee-aa86-02420a000165/Speech.png.png" workflow = Workflow.ASR slug_versions = ["asr", "speech"] diff --git a/recipes/embeddings_page.py b/recipes/embeddings_page.py index 8c2214a2d..76efe16d9 100644 --- a/recipes/embeddings_page.py +++ b/recipes/embeddings_page.py @@ -39,7 +39,7 @@ class EmbeddingModels(models.TextChoices): class EmbeddingsPage(BasePage): title = "Embeddings" - image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" + explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png" workflow = Workflow.EMBEDDINGS slug_versions = ["embeddings", "embed", "text-embedings"] price = 1 From c4d7288b3fdb101c9a5ec47c598f96f8c243634f Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Sat, 16 Dec 2023 21:55:00 +0530 Subject: [PATCH 126/138] 11labs: Add style exaggeration, speaker boost settings, and multilingual v1 model --- .../text_to_speech_settings_widgets.py | 21 +++++++++++++++++++ recipes/LipsyncTTS.py | 2 ++ recipes/TextToSpeech.py | 15 +++++++++---- 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/daras_ai_v2/text_to_speech_settings_widgets.py b/daras_ai_v2/text_to_speech_settings_widgets.py index ce5da2141..701dc6141 100644 --- a/daras_ai_v2/text_to_speech_settings_widgets.py +++ b/daras_ai_v2/text_to_speech_settings_widgets.py @@ -79,6 +79,7 @@ class TextToSpeechProviders(Enum): "eleven_multilingual_v2": "Multilingual V2 - High quality speech in 29 languages", "eleven_turbo_v2": "English V2 - Very low latency text-to-speech", "eleven_monolingual_v1": "English V1 - Low latency text-to-speech", + "eleven_multilingual_v1": "Multilingual V1", } ELEVEN_LABS_SUPPORTED_LANGS = [ @@ -346,6 +347,26 @@ def text_to_speech_settings(page): key="elevenlabs_similarity_boost", ) + if st.session_state.get("elevenlabs_model") == "eleven_multilingual_v2": + col1, col2 = st.columns(2) + with col1: + st.slider( + """ + ###### Style Exaggeration + """, + min_value=0, + max_value=1.0, + step=0.0, + key="elevenlabs_style", + value=0.0, + ) + with col2: + st.checkbox( + "Speaker Boost", + key="elevenlabs_speaker_boost", + value=True, + ) + with st.expander( "Eleven Labs Supported Languages", style={"fontSize": "0.9rem", "textDecoration": "underline"}, diff --git a/recipes/LipsyncTTS.py b/recipes/LipsyncTTS.py index dcdb7d422..f85d4550a 100644 --- a/recipes/LipsyncTTS.py +++ b/recipes/LipsyncTTS.py @@ -53,6 +53,8 @@ class RequestModel(BaseModel): elevenlabs_model: str | None elevenlabs_stability: float | None elevenlabs_similarity_boost: float | None + elevenlabs_style: float | None + elevenlabs_speaker_boost: bool | None class ResponseModel(BaseModel): output_video: str diff --git a/recipes/TextToSpeech.py b/recipes/TextToSpeech.py index 1ee2296ef..1f67fbe20 100644 --- a/recipes/TextToSpeech.py +++ b/recipes/TextToSpeech.py @@ -71,6 +71,8 @@ class RequestModel(BaseModel): elevenlabs_model: str | None elevenlabs_stability: float | None elevenlabs_similarity_boost: float | None + elevenlabs_style: float | None + elevenlabs_speaker_boost: bool | None class ResponseModel(BaseModel): audio_url: str @@ -268,6 +270,14 @@ def run(self, state: dict): stability = state.get("elevenlabs_stability", 0.5) similarity_boost = state.get("elevenlabs_similarity_boost", 0.75) + voice_settings = dict( + stability=stability, similarity_boost=similarity_boost + ) + if voice_model == "eleven_multilingual_v2": + voice_settings["style"] = state.get("elevenlabs_style", 0.0) + voice_settings["speaker_boost"] = state.get( + "elevenlabs_speaker_boost", True + ) response = requests.post( f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}", @@ -278,10 +288,7 @@ def run(self, state: dict): json={ "text": text, "model_id": voice_model, - "voice_settings": { - "stability": stability, - "similarity_boost": similarity_boost, - }, + "voice_settings": voice_settings, }, ) response.raise_for_status() From 8d4243f7dc75dd116729b79978365db31e5ff802 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Sun, 17 Dec 2023 00:11:48 +0530 Subject: [PATCH 127/138] Set step=0.05 for style exaggeration slider --- daras_ai_v2/text_to_speech_settings_widgets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daras_ai_v2/text_to_speech_settings_widgets.py b/daras_ai_v2/text_to_speech_settings_widgets.py index 701dc6141..df9ec3ad1 100644 --- a/daras_ai_v2/text_to_speech_settings_widgets.py +++ b/daras_ai_v2/text_to_speech_settings_widgets.py @@ -356,7 +356,7 @@ def text_to_speech_settings(page): """, min_value=0, max_value=1.0, - step=0.0, + step=0.05, key="elevenlabs_style", value=0.0, ) From aad7c63c790d344259b454093080fd8873bc4278 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:31:50 +0530 Subject: [PATCH 128/138] Fix render_running_output: repeated output --- daras_ai_v2/base.py | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 7a7e0c414..63dad7755 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -1377,25 +1377,8 @@ def _render_failed_output(self): def _render_running_output(self): run_status = st.session_state.get(StateKeys.run_status) - if run_status: - st.caption("Your changes are saved in the above URL. Save it for later!") - html_spinner(run_status) - else: - err_msg = st.session_state.get(StateKeys.error_msg) - run_time = st.session_state.get(StateKeys.run_time, 0) - - # render errors - if err_msg is not None: - st.error(err_msg, unsafe_allow_html=True) - # render run time - elif run_time: - st.success(f"Success! Run Time: `{run_time:.2f}` seconds.") - - # render outputs - self.render_output() - - if not run_status: - self._render_after_output() + st.caption("Your changes are saved in the above URL. Save it for later!") + html_spinner(run_status) def on_submit(self): example_id, run_id, uid = self.create_new_run() From 41bacbea8150b25f1c4008aafd0c60bfc7fc512a Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:40:08 +0530 Subject: [PATCH 129/138] Exclude root published run from examples --- bots/models.py | 8 ++++++++ daras_ai_v2/base.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/bots/models.py b/bots/models.py index 408daae59..1968569c1 100644 --- a/bots/models.py +++ b/bots/models.py @@ -1034,6 +1034,14 @@ class Meta: models.Index(fields=["workflow", "created_by"]), models.Index(fields=["workflow", "published_run_id"]), models.Index(fields=["workflow", "visibility", "is_approved_example"]), + models.Index( + fields=[ + "workflow", + "visibility", + "is_approved_example", + "published_run_id", + ] + ), ] def __str__(self): diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 63dad7755..bce5a879d 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -1591,7 +1591,7 @@ def _render(pr: PublishedRun): workflow=self.workflow, visibility=PublishedRunVisibility.PUBLIC, is_approved_example=True, - )[:50] + ).exclude(published_run_id="")[:50] grid_layout(3, example_runs, _render) From b301179c297c2f8b0ac5daecfea202ae26bad414 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:43:35 +0530 Subject: [PATCH 130/138] Allow setting pub-run title to root title when pub-run is root --- daras_ai_v2/base.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index bce5a879d..01284e773 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -455,7 +455,11 @@ def _render_publish_modal( if publish_button: recipe_title = self.get_root_published_run().title or self.title - if published_run_title.strip() == recipe_title.strip(): + is_root_published_run = is_update_mode and published_run.is_root_example() + if ( + not is_root_published_run + and published_run_title.strip() == recipe_title.strip() + ): st.error("Title can't be the same as the recipe title") return if not is_update_mode: From f8bc27cbd67d1bd0b04ce7e3ed9e5c93cc5b3ce4 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:49:21 +0530 Subject: [PATCH 131/138] Undo lost changes for countdown timer --- daras_ai_v2/base.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index 01284e773..0b053e5a1 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -1383,6 +1383,32 @@ def _render_running_output(self): run_status = st.session_state.get(StateKeys.run_status) st.caption("Your changes are saved in the above URL. Save it for later!") html_spinner(run_status) + self.render_extra_waiting_output() + + def render_extra_waiting_output(self): + estimated_run_time = self.estimate_run_duration() + if not estimated_run_time: + return + if created_at := st.session_state.get("created_at"): + if isinstance(created_at, datetime.datetime): + start_time = created_at + else: + start_time = datetime.datetime.fromisoformat(created_at) + with st.countdown_timer( + end_time=start_time + datetime.timedelta(seconds=estimated_run_time), + delay_text="Sorry for the wait. Your run is taking longer than we expected.", + ): + if self.is_current_user_owner() and self.request.user.email: + st.write( + f"""We'll email **{self.request.user.email}** when your workflow is done.""" + ) + st.write( + f"""In the meantime, check out [🚀 Examples]({self.get_tab_url(MenuTabs.examples)}) + for inspiration.""" + ) + + def estimate_run_duration(self) -> int | None: + pass def on_submit(self): example_id, run_id, uid = self.create_new_run() From c839d91dd1c347ac9e9cdc3b6059e07db0bb810f Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Tue, 19 Dec 2023 22:47:01 +0530 Subject: [PATCH 132/138] Add autocomplete fields for FKs --- bots/admin.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bots/admin.py b/bots/admin.py index e3a872644..6749d802f 100644 --- a/bots/admin.py +++ b/bots/admin.py @@ -222,7 +222,7 @@ class PublishedRunAdmin(admin.ModelAdmin): ] list_filter = ["workflow"] search_fields = ["workflow", "published_run_id"] - + autocomplete_fields = ["saved_run", "created_by", "last_edited_by"] readonly_fields = [ "open_in_gooey", "created_at", @@ -297,6 +297,7 @@ def preview_input(self, saved_run: SavedRun): @admin.register(PublishedRunVersion) class PublishedRunVersionAdmin(admin.ModelAdmin): search_fields = ["id", "version_id", "published_run__published_run_id"] + autocomplete_fields = ["published_run", "saved_run", "changed_by"] class LastActiveDeltaFilter(admin.SimpleListFilter): From 37574e0f7e1d22c3b8f471bf71071d6e3eca1811 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Mon, 18 Dec 2023 21:27:20 +0530 Subject: [PATCH 133/138] better table rendering of bulk eval results add workflow picker for bulk runner add ability to pick an eval workflow from bulk runner remove column name picker from bulk eval aggregation add font awesome icons for del/edit --- daras_ai_v2/base.py | 2 +- gooey_ui/components.py | 9 +- recipes/BulkEval.py | 188 ++++++++++------------ recipes/BulkRunner.py | 355 +++++++++++++++++++++++++++++++++++------ 4 files changed, 403 insertions(+), 151 deletions(-) diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index a506509d6..644e0f04c 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -263,7 +263,7 @@ def _render_page_title_with_breadcrumbs( st.write(f"# {self.get_recipe_title(st.session_state)}") def get_recipe_title(self, state: dict) -> str: - return state.get(StateKeys.page_title) or self.title or "" + return state.get(StateKeys.page_title) or self.title or self.workflow.label def get_explore_image(self, state: dict) -> str: return self.explore_image or "" diff --git a/gooey_ui/components.py b/gooey_ui/components.py index 8692cd8c3..67265c420 100644 --- a/gooey_ui/components.py +++ b/gooey_ui/components.py @@ -526,8 +526,13 @@ def json(value: typing.Any, expanded: bool = False, depth: int = 1): ).mount() -def data_table(file_url: str): - return _node("data-table", fileUrl=file_url) +def data_table(file_url_or_cells: str | list): + if isinstance(file_url_or_cells, str): + file_url = file_url_or_cells + return _node("data-table", fileUrl=file_url) + else: + cells = file_url_or_cells + return _node("data-table-raw", cells=cells) def table(df: "pd.DataFrame"): diff --git a/recipes/BulkEval.py b/recipes/BulkEval.py index d178fa57c..0672f662b 100644 --- a/recipes/BulkEval.py +++ b/recipes/BulkEval.py @@ -1,8 +1,8 @@ import itertools import typing -import uuid from itertools import zip_longest +import typing_extensions from pydantic import BaseModel, Field import gooey_ui as st @@ -19,7 +19,7 @@ ) from daras_ai_v2.language_model_settings_widgets import language_model_settings from daras_ai_v2.prompt_vars import render_prompt_vars -from recipes.BulkRunner import read_df_any +from recipes.BulkRunner import read_df_any, list_view_editor, del_button from recipes.DocSearch import render_documents NROWS_CACHE_KEY = "__nrows" @@ -58,8 +58,8 @@ class EvalPrompt(typing.TypedDict): prompt: str -class AggFunction(typing.TypedDict): - column: str +class AggFunction(typing_extensions.TypedDict): + column: typing_extensions.NotRequired[str] function: typing.Literal[tuple(AggFunctionsList)] @@ -73,54 +73,57 @@ class AggFunctionResult(typing.TypedDict): def _render_results(results: list[AggFunctionResult]): import plotly.graph_objects as go from plotly.colors import sample_colorscale - from plotly.subplots import make_subplots for k, g in itertools.groupby(results, key=lambda d: d["function"]): - st.write("---\n##### " + k.capitalize()) + st.write("---\n###### **Aggregate**: " + k.capitalize()) g = list(g) + columns = [d["column"] for d in g] values = [round(d["value"], 2) for d in g] - norm_values = [(v - min(values)) / (max(values) - min(values)) for v in values] + + norm_values = [ + (v - min(values)) / ((max(values) - min(values)) or 1) for v in values + ] colors = sample_colorscale("RdYlGn", norm_values, colortype="tuple") colors = [f"rgba{(r * 255, g * 255, b * 255, 0.5)}" for r, g, b in colors] - fig = make_subplots( - rows=2, - shared_xaxes=True, - specs=[[{"type": "table"}], [{"type": "bar"}]], - vertical_spacing=0.03, - row_heights=[0.3, 0.7], + st.data_table( + [ + ["Metric", k.capitalize(), "Count"], + ] + + [ + [ + columns[i], + dict( + kind="number", + readonly=True, + displayData=str(values[i]), + data=values[i], + themeOverride=dict(bgCell=colors[i]), + ), + g[i].get("count", 1), + ] + for i in range(len(g)) + ] ) - counts = [d.get("count", 1) for d in g] - fig.add_trace( - go.Table( - header=dict(values=["Metric", k.capitalize(), "Count"]), - cells=dict( - values=[columns, values, counts], - fill_color=["aliceblue", colors, "aliceblue"], + + fig = go.Figure( + data=[ + go.Bar( + name=k, + x=columns, + y=values, + marker=dict(color=colors), + text=values, + texttemplate="%{text}", + insidetextanchor="middle", + insidetextfont=dict(size=24), ), + ], + layout=dict( + margin=dict(l=0, r=0, t=24, b=0), ), - row=1, - col=1, - ) - fig.add_trace( - go.Bar( - name=k, - x=columns, - y=values, - marker=dict(color=colors), - text=values, - texttemplate="%{text}", - insidetextanchor="middle", - insidetextfont=dict(size=24), - ), - row=2, - col=1, - ) - fig.update_layout( - margin=dict(l=0, r=0, t=24, b=0), - # autosize=True, ) st.plotly_chart(fig) @@ -160,7 +163,7 @@ class RequestModel(LLMSettingsMixin, BaseModel): """, ) - eval_prompts: list[EvalPrompt] = Field( + eval_prompts: list[EvalPrompt] | None = Field( title="Evaluation Prompts", description=""" Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. @@ -171,7 +174,7 @@ class RequestModel(LLMSettingsMixin, BaseModel): agg_functions: list[AggFunction] | None = Field( title="Aggregations", description=""" -Aggregate using one or more operations over the specified columns. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). +Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). """, ) @@ -199,10 +202,8 @@ def render_form_v2(self): st.write("---") def render_inputs(key: str, del_key: str, d: EvalPrompt): - col1, col2 = st.columns([1, 8], responsive=False) + col1, col2 = st.columns([8, 1], responsive=False) with col1: - st.button("❌️", key=del_key, type="tertiary") - with col2: d["name"] = st.text_input( label="", label_visibility="collapsed", @@ -218,6 +219,8 @@ def render_inputs(key: str, del_key: str, d: EvalPrompt): value=d.get("prompt"), height=500, ).strip() + with col2: + del_button(del_key) st.write("##### " + field_title_desc(self.RequestModel, "eval_prompts")) list_view_editor( @@ -226,33 +229,34 @@ def render_inputs(key: str, del_key: str, d: EvalPrompt): render_inputs=render_inputs, ) - def render_inputs(key: str, del_key: str, d: AggFunction): - col1, col2, col3 = st.columns([1, 5, 3], responsive=False) + def render_agg_inputs(key: str, del_key: str, d: AggFunction): + col1, col3 = st.columns([8, 1], responsive=False) with col1: - st.button("❌️", key=del_key, type="tertiary") - with col2: - d["column"] = st.text_input( - "", - label_visibility="collapsed", - placeholder="Column Name", - key=key + ":column", - value=d.get("column"), - ).strip() + # d["column"] = st.text_input( + # "", + # label_visibility="collapsed", + # placeholder="Column Name", + # key=key + ":column", + # value=d.get("column"), + # ).strip() + # with col2: + with st.div(className="pt-1"): + d["function"] = st.selectbox( + "", + label_visibility="collapsed", + key=key + ":func", + options=AggFunctionsList, + default_value=d.get("function"), + ) with col3: - d["function"] = st.selectbox( - "", - label_visibility="collapsed", - key=key + ":func", - options=AggFunctionsList, - default_value=d.get("function"), - ) + del_button(del_key) st.html("
") st.write("##### " + field_title_desc(self.RequestModel, "agg_functions")) list_view_editor( add_btn_label="➕ Add an Aggregation", key="agg_functions", - render_inputs=render_inputs, + render_inputs=render_agg_inputs, ) def render_settings(self): @@ -325,7 +329,7 @@ def run_v2( out_df = pd.DataFrame.from_records(out_recs) f = upload_file_from_bytes( - filename=f"bulk-runner-{doc_ix}-{df_ix}.csv", + filename=f"evaluator-{doc_ix}-{df_ix}.csv", data=out_df.to_csv(index=False).encode(), content_type="text/csv", ) @@ -333,17 +337,22 @@ def run_v2( if out_df is None: continue - for agg_ix, agg in enumerate(request.agg_functions): - col_values = out_df[agg["column"]].dropna() - agg_value = col_values.agg(agg["function"]) - response.aggregations[doc_ix].append( - { - "column": agg["column"], - "function": agg["function"], - "count": len(col_values), - "value": agg_value, - } - ) + for agg in request.agg_functions: + if agg.get("column"): + cols = [agg["column"]] + else: + cols = out_df.select_dtypes(include=["float", "int"]).columns + for col in cols: + col_values = out_df[col].dropna() + agg_value = col_values.agg(agg["function"]) + response.aggregations[doc_ix].append( + { + "column": col, + "function": agg["function"], + "count": len(col_values), + "value": agg_value, + } + ) def fields_to_save(self) -> [str]: return super().fields_to_save() + [NROWS_CACHE_KEY] @@ -364,30 +373,3 @@ def get_raw_price(self, state: dict) -> float: def get_nrows(files: list[str]) -> int: dfs = map_parallel(read_df_any, files) return sum((len(df) for df in dfs), 0) - - -def list_view_editor( - *, - add_btn_label: str, - key: str, - render_labels: typing.Callable = None, - render_inputs: typing.Callable[[str, str, dict], None], -): - old_lst = st.session_state.setdefault(key, []) - add_key = f"--{key}:add" - if st.session_state.get(add_key): - old_lst.append({}) - label_placeholder = st.div() - new_lst = [] - for d in old_lst: - entry_key = d.setdefault("__key__", f"--{key}:{uuid.uuid1()}") - del_key = entry_key + ":del" - if st.session_state.pop(del_key, None): - continue - render_inputs(entry_key, del_key, d) - new_lst.append(d) - if new_lst and render_labels: - with label_placeholder: - render_labels() - st.session_state[key] = new_lst - st.button(add_btn_label, key=add_key) diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index 8af80eca0..7c133925a 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -1,13 +1,13 @@ import datetime import io import typing +import uuid -from fastapi import HTTPException from furl import furl from pydantic import BaseModel, Field import gooey_ui as st -from bots.models import Workflow +from bots.models import Workflow, SavedRun from daras_ai.image_input import upload_file_from_bytes from daras_ai_v2.base import BasePage from daras_ai_v2.doc_search_settings_widgets import document_uploader @@ -34,16 +34,16 @@ class RequestModel(BaseModel): documents: list[str] = Field( title="Input Data Spreadsheet", description=""" -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. Remember to includes header names in your CSV too. """, ) run_urls: list[str] = Field( - title="Gooey Workflow URL(s)", + title="Gooey Workflows", description=""" -Paste in one or more Gooey.AI workflow links (on separate lines). -You can add multiple URLs runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. +Provide one or more Gooey.AI workflow runs. +You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. """, ) @@ -60,29 +60,37 @@ class RequestModel(BaseModel): """, ) + eval_urls: list[str] | None = Field( + title="Evaluation Workflows", + description=""" +_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + """, + ) + class ResponseModel(BaseModel): output_documents: list[str] + eval_runs: list[str] | None = Field( + title="Evaluation Run URLs", + description=""" +List of URLs to the evaluation runs that you requested. + """, + ) + def preview_image(self, state: dict) -> str | None: return DEFAULT_BULK_META_IMG def render_form_v2(self): - from daras_ai_v2.all_pages import page_slug_map, normalize_slug - - run_urls = st.session_state.get("run_urls", "") - st.session_state.setdefault("__run_urls", "\n".join(run_urls)) - run_urls = ( - st.text_area( - f"##### {field_title_desc(self.RequestModel, 'run_urls')}", - key="__run_urls", - ) - .strip() - .splitlines() + st.write(f"##### {field_title_desc(self.RequestModel, 'run_urls')}") + run_urls = list_view_editor( + add_btn_label="➕ Add a Workflow", + key="run_urls", + render_inputs=render_run_url_inputs, + flatten_dict_key="url", ) - st.session_state["run_urls"] = run_urls files = document_uploader( - f"##### {field_title_desc(self.RequestModel, 'documents')}", + f"---\n##### {field_title_desc(self.RequestModel, 'documents')}", accept=(".csv", ".xlsx", ".xls", ".json", ".tsv", ".xml"), ) @@ -91,19 +99,9 @@ def render_form_v2(self): output_fields = {} for url in run_urls: - f = furl(url) - slug = f.path.segments[0] - try: - page_cls = page_slug_map[normalize_slug(slug)] - except KeyError as e: - st.error(repr(e)) - continue - - example_id, run_id, uid = extract_query_params(f.query.params) try: - sr = page_cls.get_sr_from_query_params(example_id, run_id, uid) - except HTTPException as e: - st.error(repr(e)) + page_cls, sr = url_to_sr(url) + except: continue schema = page_cls.RequestModel.schema(ref_template="{model}") @@ -144,8 +142,7 @@ def render_form_v2(self): st.write( """ -##### Input Data Preview -Here's what you uploaded: +###### **Preview**: Here's what you uploaded """ ) for file in files: @@ -154,14 +151,15 @@ def render_form_v2(self): if not (required_input_fields or optional_input_fields): return - st.write( - """ ---- -Please select which CSV column corresponds to your workflow's input fields. -For the outputs, please fill in what the column name should be that corresponds to each output too. + with st.div(className="pt-3"): + st.write( + """ +###### **Columns** +Please select which CSV column corresponds to your workflow's input fields. +For the outputs, select the fields that should be included in the output CSV. To understand what each field represents, check out our [API docs](https://api.gooey.ai/docs). - """ - ) + """, + ) visible_col1, visible_col2 = st.columns(2) with st.expander("🤲 Show All Columns"): @@ -231,14 +229,34 @@ def render_form_v2(self): if col: output_columns_new[field] = title + st.write("---") + st.write(f"##### {field_title_desc(self.RequestModel, 'eval_urls')}") + list_view_editor( + add_btn_label="➕ Add an Eval", + key="eval_urls", + render_inputs=render_eval_url_inputs, + flatten_dict_key="url", + ) + def render_example(self, state: dict): render_documents(state) def render_output(self): - files = st.session_state.get("output_documents", []) - for file in files: - st.write(file) - st.data_table(file) + eval_runs = st.session_state.get("eval_runs") + + if eval_runs: + _backup = st.session_state + for url in eval_runs: + page_cls, sr = url_to_sr(url) + st.set_session_state(sr.state) + page_cls().render_output() + st.write("---") + st.set_session_state(_backup) + else: + files = st.session_state.get("output_documents", []) + for file in files: + st.write(file) + st.data_table(file) def run_v2( self, @@ -334,6 +352,23 @@ def run_v2( ) response.output_documents[doc_ix] = f + if not request.eval_urls: + return + + response.eval_runs = [] + for url in request.eval_urls: + page_cls, sr = url_to_sr(url) + yield f"Running {page_cls().get_recipe_title(sr.state)}..." + request_body = page_cls.RequestModel( + documents=response.output_documents + ).dict(exclude_unset=True) + result, sr = sr.submit_api_call( + current_user=self.request.user, request_body=request_body + ) + result.get(disable_sync_subtasks=False) + sr.refresh_from_db() + response.eval_runs.append(sr.get_app_url()) + def preview_description(self, state: dict) -> str: return """ Which AI model actually works best for your needs? @@ -364,6 +399,191 @@ def render_description(self): ) +def render_run_url_inputs(key: str, del_key: str, d: dict): + from daras_ai_v2.all_pages import all_home_pages + + _prefill_workflow(d, key) + + col1, col2, col3 = st.columns([10, 1, 1], responsive=False) + if not d.get("workflow") and d.get("url"): + with col1: + url = st.text_input( + "", + key=key + ":url", + value=d.get("url"), + placeholder="https://gooey.ai/.../?run_id=...", + ) + else: + with col1: + scol1, scol2, scol3 = st.columns([5, 6, 1], responsive=False) + with scol1: + with st.div(className="pt-1"): + options = { + page_cls.workflow: page_cls().get_recipe_title( + page_cls.recipe_doc_sr().state + ) + for page_cls in all_home_pages + } + last_workflow_key = "__last_run_url_workflow" + workflow = st.selectbox( + "", + key=key + ":workflow", + default_value=( + d.get("workflow") or st.session_state.get(last_workflow_key) + ), + options=options, + format_func=lambda x: options[x], + ) + d["workflow"] = workflow + # use this to set default for next time + st.session_state[last_workflow_key] = workflow + with scol2: + options = { + SavedRun.objects.get( + workflow=d["workflow"], + example_id__isnull=True, + run_id__isnull=True, + uid__isnull=True, + ).get_app_url(): "Default" + } | { + sr.get_app_url(): sr.page_title + for sr in SavedRun.objects.filter( + workflow=d["workflow"], + example_id__isnull=False, + run_id__isnull=True, + uid__isnull=True, + hidden=False, + ).exclude(page_title="") + } + with st.div(className="pt-1"): + url = st.selectbox( + "", + key=key + ":url", + options=options, + default_value=d.get("url"), + format_func=lambda x: options[x], + ) + with scol3: + edit_button(key + ":editmode") + with col2: + url_button(url) + with col3: + del_button(del_key) + + try: + url_to_sr(url) + except Exception as e: + st.error(repr(e)) + d["url"] = url + + +def render_eval_url_inputs(key: str, del_key: str, d: dict): + _prefill_workflow(d, key) + + col1, col2, col3 = st.columns([10, 1, 1], responsive=False) + if not d.get("workflow") and d.get("url"): + with col1: + url = st.text_input( + "", + key=key + ":url", + value=d.get("url"), + placeholder="https://gooey.ai/.../?run_id=...", + ) + else: + d["workflow"] = Workflow.BULK_EVAL + with col1: + scol1, scol2 = st.columns([11, 1], responsive=False) + with scol1: + from recipes.BulkEval import BulkEvalPage + + options = { + BulkEvalPage().recipe_doc_sr().get_app_url(): "Default", + } | { + sr.get_app_url(): sr.page_title + for sr in SavedRun.objects.filter( + workflow=Workflow.BULK_EVAL, + example_id__isnull=False, + run_id__isnull=True, + uid__isnull=True, + hidden=False, + ).exclude(page_title="") + } + with st.div(className="pt-1"): + url = st.selectbox( + "", + key=key + ":url", + options=options, + default_value=d.get("url"), + format_func=lambda x: options[x], + ) + with scol2: + edit_button(key + ":editmode") + with col2: + url_button(url) + with col3: + del_button(del_key) + + try: + url_to_sr(url) + except Exception as e: + st.error(repr(e)) + d["url"] = url + + +def url_button(url): + st.html( + f""" + + + + """ + ) + + +def edit_button(key: str): + st.button( + '', + key=key, + type="tertiary", + ) + + +def del_button(key: str): + st.button( + '', + key=key, + type="tertiary", + ) + + +def _prefill_workflow(d: dict, key: str): + if st.session_state.get(key + ":editmode"): + d.pop("workflow", None) + elif not d.get("workflow") and d.get("url"): + try: + page_cls, sr = url_to_sr(d.get("url")) + except: + return + if (sr.example_id and sr.page_title and not sr.hidden) or not ( + sr.example_id or sr.run_id or sr.uid + ): + d["workflow"] = sr.workflow + d["url"] = sr.get_app_url() + + +def url_to_sr(url: str) -> tuple[typing.Type[BasePage], SavedRun]: + from daras_ai_v2.all_pages import page_slug_map, normalize_slug + + f = furl(url) + slug = f.path.segments[0] + page_cls = page_slug_map[normalize_slug(slug)] + example_id, run_id, uid = extract_query_params(f.query.params) + sr = page_cls.get_sr_from_query_params(example_id, run_id, uid) + return page_cls, sr + + def build_requests_for_df(df, request, df_ix, arr_len): from daras_ai_v2.all_pages import page_slug_map, normalize_slug @@ -483,3 +703,48 @@ def read_df_any(f_url: str) -> "pd.DataFrame": raise ValueError(f"Unsupported file type: {f_url}") return df.dropna(how="all", axis=1).dropna(how="all", axis=0).fillna("") + + +def list_view_editor( + *, + add_btn_label: str, + key: str, + render_labels: typing.Callable = None, + render_inputs: typing.Callable[[str, str, dict], None], + flatten_dict_key: str = None, +): + if flatten_dict_key: + list_key = f"--list-view:{key}" + st.session_state.setdefault( + list_key, + [{flatten_dict_key: val} for val in st.session_state.get(key, [])], + ) + new_lst = list_view_editor( + add_btn_label=add_btn_label, + key=list_key, + render_labels=render_labels, + render_inputs=render_inputs, + ) + ret = [d[flatten_dict_key] for d in new_lst] + st.session_state[key] = ret + return ret + + old_lst = st.session_state.setdefault(key, []) + add_key = f"--{key}:add" + if st.session_state.get(add_key): + old_lst.append({}) + label_placeholder = st.div() + new_lst = [] + for d in old_lst: + entry_key = d.setdefault("__key__", f"--{key}:{uuid.uuid1()}") + del_key = entry_key + ":del" + if st.session_state.pop(del_key, None): + continue + render_inputs(entry_key, del_key, d) + new_lst.append(d) + if new_lst and render_labels: + with label_placeholder: + render_labels() + st.session_state[key] = new_lst + st.button(add_btn_label, key=add_key) + return new_lst From 8f1c786a4a5415c80134d8faecce41ebf8909d57 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Wed, 20 Dec 2023 14:23:16 -0800 Subject: [PATCH 134/138] simplified google translate fix, won't disallow unsuported langs from ui --- daras_ai_v2/asr.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index ae5f22122..f5f65ac88 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -157,6 +157,34 @@ def google_translate_languages() -> dict[str, str]: } +@redis_cache_decorator +def google_translate_input_languages() -> dict[str, str]: + """ + Get list of supported languages for Google Translate. + :return: Dictionary of language codes and display names. + """ + from google.cloud import translate + + _, project = get_google_auth_session() + parent = f"projects/{project}/locations/global" + client = translate.TranslationServiceClient() + supported_languages = client.get_supported_languages( + parent=parent, display_language_code="en" + ) + return { + lang.language_code: lang.display_name + for lang in supported_languages.languages + if lang.support_source + } + + +def get_language_in_collection(langcode: str, languages): + for lang in languages: + if langcodes.get(lang).language == langcodes.get(langcode).language: + return langcode + return None + + def asr_language_selector( selected_model: AsrModels, label="##### Spoken Language", @@ -209,6 +237,19 @@ def run_google_translate( """ from google.cloud import translate_v2 as translate + # convert to BCP-47 format (google handles consistent language codes but sometimes gets confused by a mix of iso2 and iso3 which we have) + if source_language: + source_language = langcodes.Language.get(source_language).to_tag() + source_language = get_language_in_collection( + source_language, google_translate_input_languages().keys() + ) # this will default to autodetect if language is not found as supported + target_language = langcodes.Language.get(target_language).to_tag() + target_language: str | None = get_language_in_collection( + target_language, google_translate_languages().keys() + ) + if not target_language: + raise ValueError(f"Unsupported target language: {target_language!r}") + # if the language supports transliteration, we should check if the script is Latin if source_language and source_language not in TRANSLITERATION_SUPPORTED: language_codes = [source_language] * len(texts) @@ -339,6 +380,8 @@ def run_asr( from google.api_core.client_options import ClientOptions from google.cloud.texttospeech_v1 import AudioEncoding + return "మిరుపు పంటలో రసం పిల్చే పురుగులను అరికట్టడానికి ఎటువంటి కషాయాలు వాడాలి" + selected_model = AsrModels[selected_model] output_format = AsrOutputFormat[output_format] is_youtube_url = "youtube" in audio_url or "youtu.be" in audio_url From a10b5f0924acb52a7329e3b9111e48903ae626b9 Mon Sep 17 00:00:00 2001 From: Alexander Metzger Date: Wed, 20 Dec 2023 14:25:12 -0800 Subject: [PATCH 135/138] removed test code --- daras_ai_v2/asr.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py index f5f65ac88..898ef61be 100644 --- a/daras_ai_v2/asr.py +++ b/daras_ai_v2/asr.py @@ -380,8 +380,6 @@ def run_asr( from google.api_core.client_options import ClientOptions from google.cloud.texttospeech_v1 import AudioEncoding - return "మిరుపు పంటలో రసం పిల్చే పురుగులను అరికట్టడానికి ఎటువంటి కషాయాలు వాడాలి" - selected_model = AsrModels[selected_model] output_format = AsrOutputFormat[output_format] is_youtube_url = "youtube" in audio_url or "youtu.be" in audio_url From 0613ad6b15e3bc69d91f3d06e9b4341d11f37c6f Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Thu, 21 Dec 2023 20:55:44 +0530 Subject: [PATCH 136/138] migrate --- ...rkflow_alter_savedrun_workflow_and_more.py | 103 ++++++++++++++++++ daras_ai_v2/base.py | 6 +- 2 files changed, 108 insertions(+), 1 deletion(-) create mode 100644 bots/migrations/0053_alter_publishedrun_workflow_alter_savedrun_workflow_and_more.py diff --git a/bots/migrations/0053_alter_publishedrun_workflow_alter_savedrun_workflow_and_more.py b/bots/migrations/0053_alter_publishedrun_workflow_alter_savedrun_workflow_and_more.py new file mode 100644 index 000000000..4398af91d --- /dev/null +++ b/bots/migrations/0053_alter_publishedrun_workflow_alter_savedrun_workflow_and_more.py @@ -0,0 +1,103 @@ +# Generated by Django 4.2.7 on 2023-12-21 15:25 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("bots", "0052_alter_publishedrun_options_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="publishedrun", + name="workflow", + field=models.IntegerField( + choices=[ + (1, "Doc Search"), + (2, "Doc Summary"), + (3, "Google GPT"), + (4, "Copilot"), + (5, "Lipysnc + TTS"), + (6, "Text to Speech"), + (7, "Speech Recognition"), + (8, "Lipsync"), + (9, "Deforum Animation"), + (10, "Compare Text2Img"), + (11, "Text2Audio"), + (12, "Img2Img"), + (13, "Face Inpainting"), + (14, "Google Image Gen"), + (15, "Compare AI Upscalers"), + (16, "SEO Summary"), + (17, "Email Face Inpainting"), + (18, "Social Lookup Email"), + (19, "Object Inpainting"), + (20, "Image Segmentation"), + (21, "Compare LLM"), + (22, "Chyron Plant"), + (23, "Letter Writer"), + (24, "Smart GPT"), + (25, "AI QR Code"), + (26, "Doc Extract"), + (27, "Related QnA Maker"), + (28, "Related QnA Maker Doc"), + (29, "Embeddings"), + (30, "Bulk Runner"), + (31, "Bulk Evaluator"), + ] + ), + ), + migrations.AlterField( + model_name="savedrun", + name="workflow", + field=models.IntegerField( + choices=[ + (1, "Doc Search"), + (2, "Doc Summary"), + (3, "Google GPT"), + (4, "Copilot"), + (5, "Lipysnc + TTS"), + (6, "Text to Speech"), + (7, "Speech Recognition"), + (8, "Lipsync"), + (9, "Deforum Animation"), + (10, "Compare Text2Img"), + (11, "Text2Audio"), + (12, "Img2Img"), + (13, "Face Inpainting"), + (14, "Google Image Gen"), + (15, "Compare AI Upscalers"), + (16, "SEO Summary"), + (17, "Email Face Inpainting"), + (18, "Social Lookup Email"), + (19, "Object Inpainting"), + (20, "Image Segmentation"), + (21, "Compare LLM"), + (22, "Chyron Plant"), + (23, "Letter Writer"), + (24, "Smart GPT"), + (25, "AI QR Code"), + (26, "Doc Extract"), + (27, "Related QnA Maker"), + (28, "Related QnA Maker Doc"), + (29, "Embeddings"), + (30, "Bulk Runner"), + (31, "Bulk Evaluator"), + ], + default=4, + ), + ), + migrations.AddIndex( + model_name="publishedrun", + index=models.Index( + fields=[ + "workflow", + "visibility", + "is_approved_example", + "published_run_id", + ], + name="bots_publis_workflo_d3ad4e_idx", + ), + ), + ] diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py index daa985040..3368a36dd 100644 --- a/daras_ai_v2/base.py +++ b/daras_ai_v2/base.py @@ -693,7 +693,11 @@ def _render_breadcrumbs(self, items: list[tuple[str, str | None]]): ) def get_recipe_title(self) -> str: - return self.get_or_create_root_published_run().title or self.title or self.workflow.label + return ( + self.get_or_create_root_published_run().title + or self.title + or self.workflow.label + ) def get_explore_image(self, state: dict) -> str: return self.explore_image or "" From 0a15d9fd53df464f7c4eb9c18bb8ae2af9c60239 Mon Sep 17 00:00:00 2001 From: Kaustubh Maske Patil <37668193+nikochiko@users.noreply.github.com> Date: Thu, 21 Dec 2023 22:36:07 +0530 Subject: [PATCH 137/138] Fix get_recipe_title usage --- recipes/BulkRunner.py | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py index 7c133925a..9cdbd82d4 100644 --- a/recipes/BulkRunner.py +++ b/recipes/BulkRunner.py @@ -34,15 +34,15 @@ class RequestModel(BaseModel): documents: list[str] = Field( title="Input Data Spreadsheet", description=""" -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. Remember to includes header names in your CSV too. """, ) run_urls: list[str] = Field( title="Gooey Workflows", description=""" -Provide one or more Gooey.AI workflow runs. +Provide one or more Gooey.AI workflow runs. You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. """, ) @@ -142,7 +142,7 @@ def render_form_v2(self): st.write( """ -###### **Preview**: Here's what you uploaded +###### **Preview**: Here's what you uploaded """ ) for file in files: @@ -155,8 +155,8 @@ def render_form_v2(self): st.write( """ ###### **Columns** -Please select which CSV column corresponds to your workflow's input fields. -For the outputs, select the fields that should be included in the output CSV. +Please select which CSV column corresponds to your workflow's input fields. +For the outputs, select the fields that should be included in the output CSV. To understand what each field represents, check out our [API docs](https://api.gooey.ai/docs). """, ) @@ -358,7 +358,7 @@ def run_v2( response.eval_runs = [] for url in request.eval_urls: page_cls, sr = url_to_sr(url) - yield f"Running {page_cls().get_recipe_title(sr.state)}..." + yield f"Running {page_cls().get_recipe_title()}..." request_body = page_cls.RequestModel( documents=response.output_documents ).dict(exclude_unset=True) @@ -371,30 +371,30 @@ def run_v2( def preview_description(self, state: dict) -> str: return """ -Which AI model actually works best for your needs? -Upload your own data and evaluate any Gooey.AI workflow, LLM or AI model against any other. -Great for large data sets, AI model evaluation, task automation, parallel processing and automated testing. -To get started, paste in a Gooey.AI workflow, upload a CSV of your test data (with header names!), check the mapping of headers to workflow inputs and tap Submit. -More tips in the Details below. +Which AI model actually works best for your needs? +Upload your own data and evaluate any Gooey.AI workflow, LLM or AI model against any other. +Great for large data sets, AI model evaluation, task automation, parallel processing and automated testing. +To get started, paste in a Gooey.AI workflow, upload a CSV of your test data (with header names!), check the mapping of headers to workflow inputs and tap Submit. +More tips in the Details below. """ def render_description(self): st.write( """ -Building complex AI workflows like copilot) and then evaluating each iteration is complex. -Workflows are affected by the particular LLM used (GPT4 vs PalM2), their vector DB knowledge sets (e.g. your google docs), how synthetic data creation happened (e.g. how you transformed your video transcript or PDF into structured data), which translation or speech engine you used and your LLM prompts. Every change can affect the quality of your outputs. +Building complex AI workflows like copilot) and then evaluating each iteration is complex. +Workflows are affected by the particular LLM used (GPT4 vs PalM2), their vector DB knowledge sets (e.g. your google docs), how synthetic data creation happened (e.g. how you transformed your video transcript or PDF into structured data), which translation or speech engine you used and your LLM prompts. Every change can affect the quality of your outputs. 1. This bulk tool enables you to do two incredible things: -2. Upload your own set of inputs (e.g. typical questions to your bot) to any gooey workflow (e.g. /copilot) and run them in bulk to generate outputs or answers. -3. Compare the results of competing workflows to determine which one generates better outputs. +2. Upload your own set of inputs (e.g. typical questions to your bot) to any gooey workflow (e.g. /copilot) and run them in bulk to generate outputs or answers. +3. Compare the results of competing workflows to determine which one generates better outputs. To get started: 1. Enter the Gooey.AI Workflow URLs that you'd like to run in bulk 2. Enter a csv of sample inputs to run in bulk -3. Ensure that the mapping between your inputs and API parameters of the Gooey.AI workflow are correctly mapped. -4. Tap Submit. +3. Ensure that the mapping between your inputs and API parameters of the Gooey.AI workflow are correctly mapped. +4. Tap Submit. 5. Wait for results -6. Make a change to your Gooey Workflow, copy its URL and repeat Step 1 (or just add the link to see the results of both workflows together) +6. Make a change to your Gooey Workflow, copy its URL and repeat Step 1 (or just add the link to see the results of both workflows together) """ ) @@ -419,9 +419,7 @@ def render_run_url_inputs(key: str, del_key: str, d: dict): with scol1: with st.div(className="pt-1"): options = { - page_cls.workflow: page_cls().get_recipe_title( - page_cls.recipe_doc_sr().state - ) + page_cls.workflow: page_cls().get_recipe_title() for page_cls in all_home_pages } last_workflow_key = "__last_run_url_workflow" From 93715d3e708857955e410a15cbddd5ce2ee9f666 Mon Sep 17 00:00:00 2001 From: Dev Aggarwal Date: Tue, 26 Dec 2023 20:48:47 +0530 Subject: [PATCH 138/138] mark example_id, page_title and page_notes as deprecated in admin --- bots/models.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/bots/models.py b/bots/models.py index 1968569c1..f44b64bb9 100644 --- a/bots/models.py +++ b/bots/models.py @@ -152,7 +152,6 @@ class SavedRun(models.Model): workflow = models.IntegerField( choices=Workflow.choices, default=Workflow.VIDEO_BOTS ) - example_id = models.CharField(max_length=128, default=None, null=True, blank=True) run_id = models.CharField(max_length=128, default=None, null=True, blank=True) uid = models.CharField(max_length=128, default=None, null=True, blank=True) @@ -161,8 +160,6 @@ class SavedRun(models.Model): error_msg = models.TextField(default="", blank=True) run_time = models.DurationField(default=datetime.timedelta, blank=True) run_status = models.TextField(default="", blank=True) - page_title = models.TextField(default="", blank=True) - page_notes = models.TextField(default="", blank=True) hidden = models.BooleanField(default=False) is_flagged = models.BooleanField(default=False) @@ -180,6 +177,12 @@ class SavedRun(models.Model): updated_at = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now_add=True) + example_id = models.CharField( + max_length=128, default=None, null=True, blank=True, help_text="(Deprecated)" + ) + page_title = models.TextField(default="", blank=True, help_text="(Deprecated)") + page_notes = models.TextField(default="", blank=True, help_text="(Deprecated)") + objects = SavedRunQuerySet.as_manager() class Meta: