diff --git a/bots/admin.py b/bots/admin.py
index e3a872644..6749d802f 100644
--- a/bots/admin.py
+++ b/bots/admin.py
@@ -222,7 +222,7 @@ class PublishedRunAdmin(admin.ModelAdmin):
]
list_filter = ["workflow"]
search_fields = ["workflow", "published_run_id"]
-
+ autocomplete_fields = ["saved_run", "created_by", "last_edited_by"]
readonly_fields = [
"open_in_gooey",
"created_at",
@@ -297,6 +297,7 @@ def preview_input(self, saved_run: SavedRun):
@admin.register(PublishedRunVersion)
class PublishedRunVersionAdmin(admin.ModelAdmin):
search_fields = ["id", "version_id", "published_run__published_run_id"]
+ autocomplete_fields = ["published_run", "saved_run", "changed_by"]
class LastActiveDeltaFilter(admin.SimpleListFilter):
diff --git a/bots/migrations/0053_alter_publishedrun_workflow_alter_savedrun_workflow_and_more.py b/bots/migrations/0053_alter_publishedrun_workflow_alter_savedrun_workflow_and_more.py
new file mode 100644
index 000000000..4398af91d
--- /dev/null
+++ b/bots/migrations/0053_alter_publishedrun_workflow_alter_savedrun_workflow_and_more.py
@@ -0,0 +1,103 @@
+# Generated by Django 4.2.7 on 2023-12-21 15:25
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("bots", "0052_alter_publishedrun_options_and_more"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="publishedrun",
+ name="workflow",
+ field=models.IntegerField(
+ choices=[
+ (1, "Doc Search"),
+ (2, "Doc Summary"),
+ (3, "Google GPT"),
+ (4, "Copilot"),
+ (5, "Lipysnc + TTS"),
+ (6, "Text to Speech"),
+ (7, "Speech Recognition"),
+ (8, "Lipsync"),
+ (9, "Deforum Animation"),
+ (10, "Compare Text2Img"),
+ (11, "Text2Audio"),
+ (12, "Img2Img"),
+ (13, "Face Inpainting"),
+ (14, "Google Image Gen"),
+ (15, "Compare AI Upscalers"),
+ (16, "SEO Summary"),
+ (17, "Email Face Inpainting"),
+ (18, "Social Lookup Email"),
+ (19, "Object Inpainting"),
+ (20, "Image Segmentation"),
+ (21, "Compare LLM"),
+ (22, "Chyron Plant"),
+ (23, "Letter Writer"),
+ (24, "Smart GPT"),
+ (25, "AI QR Code"),
+ (26, "Doc Extract"),
+ (27, "Related QnA Maker"),
+ (28, "Related QnA Maker Doc"),
+ (29, "Embeddings"),
+ (30, "Bulk Runner"),
+ (31, "Bulk Evaluator"),
+ ]
+ ),
+ ),
+ migrations.AlterField(
+ model_name="savedrun",
+ name="workflow",
+ field=models.IntegerField(
+ choices=[
+ (1, "Doc Search"),
+ (2, "Doc Summary"),
+ (3, "Google GPT"),
+ (4, "Copilot"),
+ (5, "Lipysnc + TTS"),
+ (6, "Text to Speech"),
+ (7, "Speech Recognition"),
+ (8, "Lipsync"),
+ (9, "Deforum Animation"),
+ (10, "Compare Text2Img"),
+ (11, "Text2Audio"),
+ (12, "Img2Img"),
+ (13, "Face Inpainting"),
+ (14, "Google Image Gen"),
+ (15, "Compare AI Upscalers"),
+ (16, "SEO Summary"),
+ (17, "Email Face Inpainting"),
+ (18, "Social Lookup Email"),
+ (19, "Object Inpainting"),
+ (20, "Image Segmentation"),
+ (21, "Compare LLM"),
+ (22, "Chyron Plant"),
+ (23, "Letter Writer"),
+ (24, "Smart GPT"),
+ (25, "AI QR Code"),
+ (26, "Doc Extract"),
+ (27, "Related QnA Maker"),
+ (28, "Related QnA Maker Doc"),
+ (29, "Embeddings"),
+ (30, "Bulk Runner"),
+ (31, "Bulk Evaluator"),
+ ],
+ default=4,
+ ),
+ ),
+ migrations.AddIndex(
+ model_name="publishedrun",
+ index=models.Index(
+ fields=[
+ "workflow",
+ "visibility",
+ "is_approved_example",
+ "published_run_id",
+ ],
+ name="bots_publis_workflo_d3ad4e_idx",
+ ),
+ ),
+ ]
diff --git a/bots/models.py b/bots/models.py
index 896446c97..f8c71c54b 100644
--- a/bots/models.py
+++ b/bots/models.py
@@ -180,7 +180,6 @@ class SavedRun(models.Model):
workflow = models.IntegerField(
choices=Workflow.choices, default=Workflow.VIDEO_BOTS
)
- example_id = models.CharField(max_length=128, default=None, null=True, blank=True)
run_id = models.CharField(max_length=128, default=None, null=True, blank=True)
uid = models.CharField(max_length=128, default=None, null=True, blank=True)
@@ -189,8 +188,6 @@ class SavedRun(models.Model):
error_msg = models.TextField(default="", blank=True)
run_time = models.DurationField(default=datetime.timedelta, blank=True)
run_status = models.TextField(default="", blank=True)
- page_title = models.TextField(default="", blank=True)
- page_notes = models.TextField(default="", blank=True)
hidden = models.BooleanField(default=False)
is_flagged = models.BooleanField(default=False)
@@ -208,6 +205,12 @@ class SavedRun(models.Model):
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
+ example_id = models.CharField(
+ max_length=128, default=None, null=True, blank=True, help_text="(Deprecated)"
+ )
+ page_title = models.TextField(default="", blank=True, help_text="(Deprecated)")
+ page_notes = models.TextField(default="", blank=True, help_text="(Deprecated)")
+
objects = SavedRunQuerySet.as_manager()
class Meta:
@@ -1065,6 +1068,14 @@ class Meta:
models.Index(fields=["workflow", "created_by"]),
models.Index(fields=["workflow", "published_run_id"]),
models.Index(fields=["workflow", "visibility", "is_approved_example"]),
+ models.Index(
+ fields=[
+ "workflow",
+ "visibility",
+ "is_approved_example",
+ "published_run_id",
+ ]
+ ),
]
def __str__(self):
diff --git a/daras_ai_v2/api_examples_widget.py b/daras_ai_v2/api_examples_widget.py
index da9a25a5e..8f5e1cf7e 100644
--- a/daras_ai_v2/api_examples_widget.py
+++ b/daras_ai_v2/api_examples_widget.py
@@ -93,19 +93,20 @@ def api_example_generator(
"""
1. Generate an api key [below๐](#api-keys)
-2. Install [curl](https://everything.curl.dev/get) & add the `GOOEY_API_KEY` to your environment variables.
-Never store the api key [in your code](https://12factor.net/config).
+2. Install [curl](https://everything.curl.dev/get) & add the `GOOEY_API_KEY` to your environment variables.
+Never store the api key [in your code](https://12factor.net/config).
```bash
export GOOEY_API_KEY=sk-xxxx
```
-3. Run the following `curl` command in your terminal.
+3. Run the following `curl` command in your terminal.
If you encounter any issues, write to us at support@gooey.ai and make sure to include the full curl command and the error message.
```bash
%s
```
"""
- % curl_code.strip()
+ % curl_code.strip(),
+ unsafe_allow_html=True,
)
with python:
@@ -157,8 +158,8 @@ def api_example_generator(
)
if as_async:
py_code += r"""
-from time import sleep
-
+from time import sleep
+
status_url = response.headers["Location"]
while True:
response = requests.get(status_url, headers={"Authorization": "%(auth_keyword)s " + os.environ["GOOEY_API_KEY"]})
@@ -188,20 +189,21 @@ def api_example_generator(
rf"""
1. Generate an api key [below๐](#api-keys)
-2. Install [requests](https://requests.readthedocs.io/en/latest/) & add the `GOOEY_API_KEY` to your environment variables.
-Never store the api key [in your code](https://12factor.net/config).
+2. Install [requests](https://requests.readthedocs.io/en/latest/) & add the `GOOEY_API_KEY` to your environment variables.
+Never store the api key [in your code](https://12factor.net/config).
```bash
$ python3 -m pip install requests
$ export GOOEY_API_KEY=sk-xxxx
```
-
-3. Use this sample code to call the API.
+
+3. Use this sample code to call the API.
If you encounter any issues, write to us at support@gooey.ai and make sure to include the full code snippet and the error message.
```python
%s
```
"""
- % py_code
+ % py_code,
+ unsafe_allow_html=True,
)
with js:
@@ -276,7 +278,7 @@ def api_example_generator(
if (!response.ok) {
throw new Error(response.status);
}
-
+
const result = await response.json();
if (result.status === "completed") {
console.log(response.status, result);
@@ -302,18 +304,19 @@ def api_example_generator(
r"""
1. Generate an api key [below๐](#api-keys)
-2. Install [node-fetch](https://www.npmjs.com/package/node-fetch) & add the `GOOEY_API_KEY` to your environment variables.
-Never store the api key [in your code](https://12factor.net/config) and don't use direcly in the browser.
+2. Install [node-fetch](https://www.npmjs.com/package/node-fetch) & add the `GOOEY_API_KEY` to your environment variables.
+Never store the api key [in your code](https://12factor.net/config) and don't use direcly in the browser.
```bash
$ npm install node-fetch
$ export GOOEY_API_KEY=sk-xxxx
```
-3. Use this sample code to call the API.
+3. Use this sample code to call the API.
If you encounter any issues, write to us at support@gooey.ai and make sure to include the full code snippet and the error message.
```js
%s
```
"""
- % js_code
+ % js_code,
+ unsafe_allow_html=True,
)
diff --git a/daras_ai_v2/asr.py b/daras_ai_v2/asr.py
index 65ed40ddc..898ef61be 100644
--- a/daras_ai_v2/asr.py
+++ b/daras_ai_v2/asr.py
@@ -3,6 +3,7 @@
import subprocess
import tempfile
from enum import Enum
+from time import sleep
import langcodes
import requests
@@ -12,17 +13,16 @@
import gooey_ui as st
from daras_ai.image_input import upload_file_from_bytes, gs_url_to_uri
+from daras_ai_v2 import settings
+from daras_ai_v2.functional import map_parallel
from daras_ai_v2.gdrive_downloader import (
is_gdrive_url,
gdrive_download,
gdrive_metadata,
url_to_gdrive_file_id,
)
-from daras_ai_v2 import settings
-from daras_ai_v2.functional import map_parallel
from daras_ai_v2.gpu_server import call_celery_task
from daras_ai_v2.redis_cache import redis_cache_decorator
-from time import sleep
SHORT_FILE_CUTOFF = 5 * 1024 * 1024 # 1 MB
@@ -49,6 +49,7 @@
class AsrModels(Enum):
whisper_large_v2 = "Whisper Large v2 (openai)"
+ whisper_large_v3 = "Whisper Large v3 (openai)"
whisper_hindi_large_v2 = "Whisper Hindi Large v2 (Bhashini)"
whisper_telugu_large_v2 = "Whisper Telugu Large v2 (Bhashini)"
nemo_english = "Conformer English (ai4bharat.org)"
@@ -66,6 +67,7 @@ def supports_auto_detect(self) -> bool:
asr_model_ids = {
+ AsrModels.whisper_large_v3: "vaibhavs10/incredibly-fast-whisper:37dfc0d6a7eb43ff84e230f74a24dab84e6bb7756c9b457dbdcceca3de7a4a04",
AsrModels.whisper_large_v2: "openai/whisper-large-v2",
AsrModels.whisper_hindi_large_v2: "vasista22/whisper-hindi-large-v2",
AsrModels.whisper_telugu_large_v2: "vasista22/whisper-telugu-large-v2",
@@ -84,6 +86,7 @@ def supports_auto_detect(self) -> bool:
}
asr_supported_languages = {
+ AsrModels.whisper_large_v3: WHISPER_SUPPORTED,
AsrModels.whisper_large_v2: WHISPER_SUPPORTED,
AsrModels.usm: CHIRP_SUPPORTED,
AsrModels.deepgram: DEEPGRAM_SUPPORTED,
@@ -154,6 +157,34 @@ def google_translate_languages() -> dict[str, str]:
}
+@redis_cache_decorator
+def google_translate_input_languages() -> dict[str, str]:
+ """
+ Get list of supported languages for Google Translate.
+ :return: Dictionary of language codes and display names.
+ """
+ from google.cloud import translate
+
+ _, project = get_google_auth_session()
+ parent = f"projects/{project}/locations/global"
+ client = translate.TranslationServiceClient()
+ supported_languages = client.get_supported_languages(
+ parent=parent, display_language_code="en"
+ )
+ return {
+ lang.language_code: lang.display_name
+ for lang in supported_languages.languages
+ if lang.support_source
+ }
+
+
+def get_language_in_collection(langcode: str, languages):
+ for lang in languages:
+ if langcodes.get(lang).language == langcodes.get(langcode).language:
+ return langcode
+ return None
+
+
def asr_language_selector(
selected_model: AsrModels,
label="##### Spoken Language",
@@ -206,6 +237,19 @@ def run_google_translate(
"""
from google.cloud import translate_v2 as translate
+ # convert to BCP-47 format (google handles consistent language codes but sometimes gets confused by a mix of iso2 and iso3 which we have)
+ if source_language:
+ source_language = langcodes.Language.get(source_language).to_tag()
+ source_language = get_language_in_collection(
+ source_language, google_translate_input_languages().keys()
+ ) # this will default to autodetect if language is not found as supported
+ target_language = langcodes.Language.get(target_language).to_tag()
+ target_language: str | None = get_language_in_collection(
+ target_language, google_translate_languages().keys()
+ )
+ if not target_language:
+ raise ValueError(f"Unsupported target language: {target_language!r}")
+
# if the language supports transliteration, we should check if the script is Latin
if source_language and source_language not in TRANSLITERATION_SUPPORTED:
language_codes = [source_language] * len(texts)
@@ -358,6 +402,19 @@ def run_asr(
if selected_model == AsrModels.azure:
return azure_asr(audio_url, language)
+ elif selected_model == AsrModels.whisper_large_v3:
+ import replicate
+
+ config = {
+ "audio": audio_url,
+ "return_timestamps": output_format != AsrOutputFormat.text,
+ }
+ if language:
+ config["language"] = language
+ data = replicate.run(
+ asr_model_ids[AsrModels.whisper_large_v3],
+ input=config,
+ )
elif selected_model == AsrModels.deepgram:
r = requests.post(
"https://api.deepgram.com/v1/listen",
diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py
index cf9e6ac1f..ae1d2dc5d 100644
--- a/daras_ai_v2/base.py
+++ b/daras_ai_v2/base.py
@@ -6,6 +6,7 @@
import urllib.parse
import uuid
from copy import deepcopy
+from enum import Enum
from itertools import pairwise
from random import Random
from time import sleep
@@ -71,7 +72,7 @@
DEFAULT_META_IMG = (
# Small
- "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/optimized%20hp%20gif.gif"
+ "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/b0f328d0-93f7-11ee-bd89-02420a0001cc/Main.jpg.png"
# "https://storage.googleapis.com/dara-c1b52.appspot.com/meta_tag_default_img.jpg"
# Big
# "https://storage.googleapis.com/dara-c1b52.appspot.com/meta_tag_gif.gif"
@@ -83,6 +84,13 @@
SUBMIT_AFTER_LOGIN_Q = "submitafterlogin"
+class RecipeRunState(Enum):
+ idle = 1
+ running = 2
+ completed = 3
+ failed = 4
+
+
class StateKeys:
created_at = "created_at"
updated_at = "updated_at"
@@ -97,12 +105,13 @@ class StateKeys:
class BasePage:
title: str
- image: str | None = None
workflow: Workflow
slug_versions: list[str]
sane_defaults: dict = {}
+ explore_image: str = None
+
RequestModel: typing.Type[BaseModel]
ResponseModel: typing.Type[BaseModel]
@@ -158,6 +167,15 @@ def api_url(self, example_id=None, run_id=None, uid=None) -> furl:
def endpoint(self) -> str:
return f"/v2/{self.slug_versions[0]}/"
+ def get_tab_url(self, tab: str) -> str:
+ example_id, run_id, uid = extract_query_params(gooey_get_query_params())
+ return self.app_url(
+ example_id=example_id,
+ run_id=run_id,
+ uid=uid,
+ tab_name=MenuTabs.paths[tab],
+ )
+
def render(self):
with sentry_sdk.configure_scope() as scope:
scope.set_extra("base_url", self.app_url())
@@ -261,10 +279,7 @@ def render(self):
with st.nav_tabs():
tab_names = self.get_tabs()
for name in tab_names:
- url = self.app_url(
- *extract_query_params(gooey_get_query_params()),
- tab_name=MenuTabs.paths[name],
- )
+ url = self.get_tab_url(name)
with st.nav_item(url, active=name == selected_tab):
st.html(name)
with st.nav_tab_content():
@@ -440,7 +455,11 @@ def _render_publish_modal(
if publish_button:
recipe_title = self.get_root_published_run().title or self.title
- if published_run_title.strip() == recipe_title.strip():
+ is_root_published_run = is_update_mode and published_run.is_root_example()
+ if (
+ not is_root_published_run
+ and published_run_title.strip() == recipe_title.strip()
+ ):
st.error("Title can't be the same as the recipe title")
return
if not is_update_mode:
@@ -569,7 +588,7 @@ def _render_confirm_delete_modal(
):
st.write(
"Are you sure you want to delete this published run? "
- f"({published_run.title})"
+ f"_({published_run.title})_"
)
st.caption("This will also delete all the associated versions.")
with st.div(className="d-flex"):
@@ -674,10 +693,14 @@ def _render_breadcrumbs(self, items: list[tuple[str, str | None]]):
)
def get_recipe_title(self) -> str:
- return self.get_or_create_root_published_run().title or self.title or ""
+ return (
+ self.get_or_create_root_published_run().title
+ or self.title
+ or self.workflow.label
+ )
- def get_recipe_image(self, state: dict) -> str:
- return self.image or ""
+ def get_explore_image(self, state: dict) -> str:
+ return self.explore_image or ""
def _user_disabled_check(self):
if self.run_user and self.run_user.is_disabled:
@@ -692,6 +715,7 @@ def get_tabs(self):
tabs = [MenuTabs.run, MenuTabs.examples, MenuTabs.run_as_api]
if self.request.user:
tabs.extend([MenuTabs.history])
+ if self.request.user and not self.request.user.is_anonymous:
tabs.extend([MenuTabs.saved])
return tabs
@@ -809,11 +833,11 @@ def _render(page_cls: typing.Type[BasePage]):
root_run = page.get_root_published_run()
state = root_run.saved_run.to_dict()
preview_image = meta_preview_url(
- page_cls().preview_image(state), page_cls().fallback_preivew_image()
+ page.get_explore_image(state), page.fallback_preivew_image()
)
with st.link(to=page.app_url()):
- st.markdown(
+ st.html(
# language=html
f"""
@@ -961,6 +985,7 @@ def get_sr_from_query_params(
@classmethod
def get_total_runs(cls) -> int:
+ # TODO: fix to also handle published run case
return SavedRun.objects.filter(workflow=cls.workflow).count()
@classmethod
@@ -1310,6 +1335,17 @@ def _render_input_col(self):
)
return submitted
+ def get_run_state(self) -> RecipeRunState:
+ if st.session_state.get(StateKeys.run_status):
+ return RecipeRunState.running
+ elif st.session_state.get(StateKeys.error_msg):
+ return RecipeRunState.failed
+ elif st.session_state.get(StateKeys.run_time):
+ return RecipeRunState.completed
+ else:
+ # when user is at a recipe root, and not running anything
+ return RecipeRunState.idle
+
def _render_output_col(self, submitted: bool):
assert inspect.isgeneratorfunction(self.run)
@@ -1323,27 +1359,62 @@ def _render_output_col(self, submitted: bool):
self._render_before_output()
- run_status = st.session_state.get(StateKeys.run_status)
- if run_status:
- st.caption("Your changes are saved in the above URL. Save it for later!")
- html_spinner(run_status)
- else:
- err_msg = st.session_state.get(StateKeys.error_msg)
- run_time = st.session_state.get(StateKeys.run_time, 0)
-
- # render errors
- if err_msg is not None:
- st.error(err_msg)
- # render run time
- elif run_time:
- st.success(f"Success! Run Time: `{run_time:.2f}` seconds.")
+ run_state = self.get_run_state()
+ match run_state:
+ case RecipeRunState.completed:
+ self._render_completed_output()
+ case RecipeRunState.failed:
+ self._render_failed_output()
+ case RecipeRunState.running:
+ self._render_running_output()
+ case RecipeRunState.idle:
+ pass
# render outputs
self.render_output()
- if not run_status:
+ if run_state != "waiting":
self._render_after_output()
+ def _render_completed_output(self):
+ run_time = st.session_state.get(StateKeys.run_time, 0)
+ st.success(f"Success! Run Time: `{run_time:.2f}` seconds.")
+
+ def _render_failed_output(self):
+ err_msg = st.session_state.get(StateKeys.error_msg)
+ st.error(err_msg, unsafe_allow_html=True)
+
+ def _render_running_output(self):
+ run_status = st.session_state.get(StateKeys.run_status)
+ st.caption("Your changes are saved in the above URL. Save it for later!")
+ html_spinner(run_status)
+ self.render_extra_waiting_output()
+
+ def render_extra_waiting_output(self):
+ estimated_run_time = self.estimate_run_duration()
+ if not estimated_run_time:
+ return
+ if created_at := st.session_state.get("created_at"):
+ if isinstance(created_at, datetime.datetime):
+ start_time = created_at
+ else:
+ start_time = datetime.datetime.fromisoformat(created_at)
+ with st.countdown_timer(
+ end_time=start_time + datetime.timedelta(seconds=estimated_run_time),
+ delay_text="Sorry for the wait. Your run is taking longer than we expected.",
+ ):
+ if self.is_current_user_owner() and self.request.user.email:
+ st.write(
+ f"""We'll email **{self.request.user.email}** when your workflow is done."""
+ )
+ st.write(
+ f"""In the meantime, check out [๐ Examples]({self.get_tab_url(MenuTabs.examples)})
+ for inspiration."""
+ )
+
+ def estimate_run_duration(self) -> int | None:
+ pass
+
def on_submit(self):
example_id, run_id, uid = self.create_new_run()
if settings.CREDITS_TO_DEDUCT_PER_RUN and not self.check_credits():
@@ -1555,7 +1626,7 @@ def _render(pr: PublishedRun):
workflow=self.workflow,
visibility=PublishedRunVisibility.PUBLIC,
is_approved_example=True,
- )[:50]
+ ).exclude(published_run_id="")[:50]
grid_layout(3, example_runs, _render, column_props=dict(className="mb-0 pb-0"))
@@ -1650,7 +1721,7 @@ def _render_run_preview(self, saved_run: SavedRun):
if saved_run.run_status:
html_spinner(saved_run.run_status)
elif saved_run.error_msg:
- st.error(saved_run.error_msg)
+ st.error(saved_run.error_msg, unsafe_allow_html=True)
return self.render_example(saved_run.to_dict())
@@ -1777,7 +1848,8 @@ def run_as_api_tab(self):
)
st.markdown(
- f'๐ To learn more, take a look at our complete API'
+ f'๐ To learn more, take a look at our complete API',
+ unsafe_allow_html=True,
)
st.write("#### ๐ค Example Request")
diff --git a/daras_ai_v2/doc_search_settings_widgets.py b/daras_ai_v2/doc_search_settings_widgets.py
index 06be1c12d..84a4ae1a0 100644
--- a/daras_ai_v2/doc_search_settings_widgets.py
+++ b/daras_ai_v2/doc_search_settings_widgets.py
@@ -98,8 +98,8 @@ def doc_search_settings(
st.text_area(
"""
###### ๐โ๐จ Summarization Instructions
-Prompt to transform the conversation history into a vector search query.
-These instructions run before the workflow performs a search of the knowledge base documents and should summarize the conversation into a VectorDB query most relevant to the user's last message. In general, you shouldn't need to adjust these instructions.
+Prompt to transform the conversation history into a vector search query.
+These instructions run before the workflow performs a search of the knowledge base documents and should summarize the conversation into a VectorDB query most relevant to the user's last message. In general, you shouldn't need to adjust these instructions.
""",
key="query_instructions",
height=300,
@@ -107,7 +107,7 @@ def doc_search_settings(
if keyword_instructions_allowed:
st.text_area(
"""
-###### ๐ Keyword Extraction
+###### ๐ Keyword Extraction
""",
key="keyword_instructions",
height=300,
@@ -135,7 +135,7 @@ def doc_search_settings(
label="""
###### Max Snippet Words
-After a document search, relevant snippets of your documents are returned as results. This setting adjusts the maximum number of words in each snippet. A high snippet size allows the LLM to access more information from your document results, at the cost of being verbose and potentially exhausting input tokens (which can cause a failure of the copilot to respond). Default: 300
+After a document search, relevant snippets of your documents are returned as results. This setting adjusts the maximum number of words in each snippet. A high snippet size allows the LLM to access more information from your document results, at the cost of being verbose and potentially exhausting input tokens (which can cause a failure of the copilot to respond). Default: 300
""",
key="max_context_words",
min_value=10,
@@ -160,8 +160,9 @@ def doc_search_settings(
st.write(
"""
##### ๐ค Knowledge Base Speech Recognition
- If your knowledge base documents contain audio or video files, we'll transcribe and optionally translate them to English, given we've found most vectorDBs and LLMs perform best in English (even if their final answers are translated into another language).
- """
+ If your knowledge base documents contain audio or video files, we'll transcribe and optionally translate them to English, given we've found most vectorDBs and LLMs perform best in English (even if their final answers are translated into another language).
+ """,
+ unsafe_allow_html=True,
)
enum_selector(
diff --git a/daras_ai_v2/manage_api_keys_widget.py b/daras_ai_v2/manage_api_keys_widget.py
index 1c7dc71e8..9c6664def 100644
--- a/daras_ai_v2/manage_api_keys_widget.py
+++ b/daras_ai_v2/manage_api_keys_widget.py
@@ -19,12 +19,12 @@
def manage_api_keys(user: AppUser):
st.write(
"""
-Your secret API keys are listed below.
+Your secret API keys are listed below.
Please note that we do not display your secret API keys again after you generate them.
-Do not share your API key with others, or expose it in the browser or other client-side code.
+Do not share your API key with others, or expose it in the browser or other client-side code.
-In order to protect the security of your account,
+In order to protect the security of your account,
Gooey.AI may also automatically rotate any API key that we've found has leaked publicly.
"""
)
@@ -74,10 +74,10 @@ def _generate_new_key_doc() -> dict:
st.success(
f"""
-
API key generated
+##### API key generated
-Please save this secret key somewhere safe and accessible.
-For security reasons, **you won't be able to view it again** through your account.
+Please save this secret key somewhere safe and accessible.
+For security reasons, **you won't be able to view it again** through your account.
If you lose this secret key, you'll need to generate a new one.
"""
)
diff --git a/daras_ai_v2/meta_preview_url.py b/daras_ai_v2/meta_preview_url.py
index 3d02e0c62..369900c90 100644
--- a/daras_ai_v2/meta_preview_url.py
+++ b/daras_ai_v2/meta_preview_url.py
@@ -1,12 +1,17 @@
import mimetypes
import os
-from time import time
+import typing
-import requests
from furl import furl
-def meta_preview_url(file_url: str | None, fallback_img: str | None) -> str | None:
+def meta_preview_url(
+ file_url: str | None,
+ fallback_img: str | None,
+ size: typing.Literal[
+ "400x400", "1170x1560", "40x40", "72x72", "80x80", "96x96"
+ ] = "400x400",
+) -> str | None:
if not file_url:
return fallback_img
@@ -22,7 +27,6 @@ def meta_preview_url(file_url: str | None, fallback_img: str | None) -> str | No
file_url = fallback_img
elif content_type in ["image/png", "image/jpeg", "image/tiff", "image/webp"]:
# sizes: 400x400,1170x1560,40x40,72x72,80x80,96x96
- size = "400x400"
f.path.segments = dir_segments + ["thumbs", f"{base}_{size}{ext}"]
new_url = str(f)
diff --git a/daras_ai_v2/text_to_speech_settings_widgets.py b/daras_ai_v2/text_to_speech_settings_widgets.py
index 0526c441f..df9ec3ad1 100644
--- a/daras_ai_v2/text_to_speech_settings_widgets.py
+++ b/daras_ai_v2/text_to_speech_settings_widgets.py
@@ -79,6 +79,7 @@ class TextToSpeechProviders(Enum):
"eleven_multilingual_v2": "Multilingual V2 - High quality speech in 29 languages",
"eleven_turbo_v2": "English V2 - Very low latency text-to-speech",
"eleven_monolingual_v1": "English V1 - Low latency text-to-speech",
+ "eleven_multilingual_v1": "Multilingual V1",
}
ELEVEN_LABS_SUPPORTED_LANGS = [
@@ -289,8 +290,7 @@ def text_to_speech_settings(page):
):
st.caption(
"""
- Note: Please purchase Gooey.AI credits to use ElevenLabs voices
- here.
+ Note: Please purchase Gooey.AI credits to use ElevenLabs voices [here](/account).
Alternatively, you can use your own ElevenLabs API key by selecting the checkbox above.
"""
)
@@ -347,6 +347,26 @@ def text_to_speech_settings(page):
key="elevenlabs_similarity_boost",
)
+ if st.session_state.get("elevenlabs_model") == "eleven_multilingual_v2":
+ col1, col2 = st.columns(2)
+ with col1:
+ st.slider(
+ """
+ ###### Style Exaggeration
+ """,
+ min_value=0,
+ max_value=1.0,
+ step=0.05,
+ key="elevenlabs_style",
+ value=0.0,
+ )
+ with col2:
+ st.checkbox(
+ "Speaker Boost",
+ key="elevenlabs_speaker_boost",
+ value=True,
+ )
+
with st.expander(
"Eleven Labs Supported Languages",
style={"fontSize": "0.9rem", "textDecoration": "underline"},
diff --git a/explore.py b/explore.py
index f2f0b64d4..f47f6fc23 100644
--- a/explore.py
+++ b/explore.py
@@ -1,8 +1,11 @@
+import typing
+
import gooey_ui as gui
from daras_ai.image_input import truncate_text_words
from daras_ai_v2.all_pages import all_home_pages_by_category
+from daras_ai_v2.base import BasePage
from daras_ai_v2.grid_layout_widget import grid_layout
-
+from daras_ai_v2.meta_preview_url import meta_preview_url
META_TITLE = "Explore AI workflows"
META_DESCRIPTION = "Find, fork and run your fieldโs favorite AI recipes on Gooey.AI"
@@ -25,7 +28,7 @@ def _render_non_featured(page_cls):
# render_description(page, state, total_runs)
render_description(page, state)
- def _render_as_featured(page_cls):
+ def _render_as_featured(page_cls: typing.Type[BasePage]):
page = page_cls()
state = page.recipe_doc_sr(create=True).to_dict()
# total_runs = page.get_total_runs()
@@ -33,9 +36,9 @@ def _render_as_featured(page_cls):
# render_description(page, state, total_runs)
render_description(page, state)
- def render_image(page, state):
+ def render_image(page: BasePage, state: dict):
gui.image(
- page.get_recipe_image(state),
+ meta_preview_url(page.get_explore_image(state), page.preview_image(state)),
href=page.app_url(),
style={"border-radius": 5},
)
@@ -45,7 +48,7 @@ def render_description(page, state):
gui.markdown(f"#### {page.get_recipe_title()}")
preview = page.preview_description(state)
if preview:
- with gui.tag("p", style={"margin-bottom": "2px"}):
+ with gui.tag("p", style={"margin-bottom": "25px"}):
gui.html(
truncate_text_words(preview, 150),
)
diff --git a/gooey_ui/components/__init__.py b/gooey_ui/components/__init__.py
index b15eefdb7..207620c1f 100644
--- a/gooey_ui/components/__init__.py
+++ b/gooey_ui/components/__init__.py
@@ -1,4 +1,5 @@
import base64
+import html as html_lib
import math
import textwrap
import typing
@@ -82,9 +83,11 @@ def write(*objs: typing.Any, unsafe_allow_html=False, **props):
)
-def markdown(body: str, *, unsafe_allow_html=False, **props):
+def markdown(body: str | None, *, unsafe_allow_html=False, **props):
if body is None:
return _node("markdown", body="", **props)
+ if not unsafe_allow_html:
+ body = html_lib.escape(body)
props["className"] = (
props.get("className", "") + " gui-html-container gui-md-container"
)
@@ -524,8 +527,13 @@ def json(value: typing.Any, expanded: bool = False, depth: int = 1):
).mount()
-def data_table(file_url: str):
- return _node("data-table", fileUrl=file_url)
+def data_table(file_url_or_cells: str | list):
+ if isinstance(file_url_or_cells, str):
+ file_url = file_url_or_cells
+ return _node("data-table", fileUrl=file_url)
+ else:
+ cells = file_url_or_cells
+ return _node("data-table-raw", cells=cells)
def table(df: "pd.DataFrame"):
diff --git a/pages/UsageDashboard.py b/pages/UsageDashboard.py
index 5d023e887..b4faf6a5d 100644
--- a/pages/UsageDashboard.py
+++ b/pages/UsageDashboard.py
@@ -171,14 +171,28 @@ def main():
"""
)
- total_runs = (
- counts_df.sum(numeric_only=True)
- .rename("Total Runs")
- .to_frame()
- .reset_index(names=["label"])
- .sort_values("Total Runs", ascending=False)
- .reset_index(drop=True)
- )
+ if st.checkbox("Show Uniques"):
+ calc = "Unique Users"
+ total_runs = (
+ counts_df.drop(columns=["display_name", "email"])
+ .astype(bool)
+ .sum(numeric_only=True)
+ .rename(calc)
+ .to_frame()
+ .reset_index(names=["label"])
+ .sort_values(calc, ascending=False)
+ .reset_index(drop=True)
+ )
+ else:
+ calc = "Total Runs"
+ total_runs = (
+ counts_df.sum(numeric_only=True)
+ .rename(calc)
+ .to_frame()
+ .reset_index(names=["label"])
+ .sort_values(calc, ascending=False)
+ .reset_index(drop=True)
+ )
col1, col2 = st.columns(2)
@@ -189,7 +203,7 @@ def main():
st.plotly_chart(
px.pie(
total_runs.iloc[2:],
- values="Total Runs",
+ values=calc,
names="label",
),
use_container_width=True,
diff --git a/recipes/BulkEval.py b/recipes/BulkEval.py
index 83c3172ad..0672f662b 100644
--- a/recipes/BulkEval.py
+++ b/recipes/BulkEval.py
@@ -1,8 +1,8 @@
import itertools
import typing
-import uuid
from itertools import zip_longest
+import typing_extensions
from pydantic import BaseModel, Field
import gooey_ui as st
@@ -19,7 +19,7 @@
)
from daras_ai_v2.language_model_settings_widgets import language_model_settings
from daras_ai_v2.prompt_vars import render_prompt_vars
-from recipes.BulkRunner import read_df_any
+from recipes.BulkRunner import read_df_any, list_view_editor, del_button
from recipes.DocSearch import render_documents
NROWS_CACHE_KEY = "__nrows"
@@ -58,8 +58,8 @@ class EvalPrompt(typing.TypedDict):
prompt: str
-class AggFunction(typing.TypedDict):
- column: str
+class AggFunction(typing_extensions.TypedDict):
+ column: typing_extensions.NotRequired[str]
function: typing.Literal[tuple(AggFunctionsList)]
@@ -73,63 +73,86 @@ class AggFunctionResult(typing.TypedDict):
def _render_results(results: list[AggFunctionResult]):
import plotly.graph_objects as go
from plotly.colors import sample_colorscale
- from plotly.subplots import make_subplots
for k, g in itertools.groupby(results, key=lambda d: d["function"]):
- st.write("---\n##### " + k.capitalize())
+ st.write("---\n###### **Aggregate**: " + k.capitalize())
g = list(g)
+
columns = [d["column"] for d in g]
values = [round(d["value"], 2) for d in g]
- norm_values = [(v - min(values)) / (max(values) - min(values)) for v in values]
+
+ norm_values = [
+ (v - min(values)) / ((max(values) - min(values)) or 1) for v in values
+ ]
colors = sample_colorscale("RdYlGn", norm_values, colortype="tuple")
colors = [f"rgba{(r * 255, g * 255, b * 255, 0.5)}" for r, g, b in colors]
- fig = make_subplots(
- rows=2,
- shared_xaxes=True,
- specs=[[{"type": "table"}], [{"type": "bar"}]],
- vertical_spacing=0.03,
- row_heights=[0.3, 0.7],
+ st.data_table(
+ [
+ ["Metric", k.capitalize(), "Count"],
+ ]
+ + [
+ [
+ columns[i],
+ dict(
+ kind="number",
+ readonly=True,
+ displayData=str(values[i]),
+ data=values[i],
+ themeOverride=dict(bgCell=colors[i]),
+ ),
+ g[i].get("count", 1),
+ ]
+ for i in range(len(g))
+ ]
)
- counts = [d.get("count", 1) for d in g]
- fig.add_trace(
- go.Table(
- header=dict(values=["Metric", k.capitalize(), "Count"]),
- cells=dict(
- values=[columns, values, counts],
- fill_color=["aliceblue", colors, "aliceblue"],
+
+ fig = go.Figure(
+ data=[
+ go.Bar(
+ name=k,
+ x=columns,
+ y=values,
+ marker=dict(color=colors),
+ text=values,
+ texttemplate="%{text}",
+ insidetextanchor="middle",
+ insidetextfont=dict(size=24),
),
+ ],
+ layout=dict(
+ margin=dict(l=0, r=0, t=24, b=0),
),
- row=1,
- col=1,
- )
- fig.add_trace(
- go.Bar(
- name=k,
- x=columns,
- y=values,
- marker=dict(color=colors),
- text=values,
- texttemplate="%{text}",
- insidetextanchor="middle",
- insidetextfont=dict(size=24),
- ),
- row=2,
- col=1,
- )
- fig.update_layout(
- margin=dict(l=0, r=0, t=24, b=0),
- # autosize=True,
)
st.plotly_chart(fig)
class BulkEvalPage(BasePage):
- title = "Bulk Evaluator"
+ title = "Evaluator"
workflow = Workflow.BULK_EVAL
slug_versions = ["bulk-eval", "eval"]
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aad314f0-9a97-11ee-8318-02420a0001c7/W.I.9.png.png"
+
+ def preview_image(self, state: dict) -> str | None:
+ return "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/9631fb74-9a97-11ee-971f-02420a0001c4/evaluator.png.png"
+
+ def render_description(self):
+ st.write(
+ """
+Summarize and score every row of any CSV, google sheet or excel with GPT4 (or any LLM you choose). Then average every score in any column to generate automated evaluations.
+ """
+ )
+
+ def related_workflows(self) -> list:
+ from recipes.BulkRunner import BulkRunnerPage
+ from recipes.VideoBots import VideoBotsPage
+ from recipes.asr import AsrPage
+ from recipes.DocSearch import DocSearchPage
+
+ return [BulkRunnerPage, VideoBotsPage, AsrPage, DocSearchPage]
+
class RequestModel(LLMSettingsMixin, BaseModel):
documents: list[str] = Field(
title="Input Data Spreadsheet",
@@ -140,7 +163,7 @@ class RequestModel(LLMSettingsMixin, BaseModel):
""",
)
- eval_prompts: list[EvalPrompt] = Field(
+ eval_prompts: list[EvalPrompt] | None = Field(
title="Evaluation Prompts",
description="""
Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values.
@@ -151,7 +174,7 @@ class RequestModel(LLMSettingsMixin, BaseModel):
agg_functions: list[AggFunction] | None = Field(
title="Aggregations",
description="""
-Aggregate using one or more operations over the specified columns. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
+Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats).
""",
)
@@ -179,10 +202,8 @@ def render_form_v2(self):
st.write("---")
def render_inputs(key: str, del_key: str, d: EvalPrompt):
- col1, col2 = st.columns([1, 8], responsive=False)
+ col1, col2 = st.columns([8, 1], responsive=False)
with col1:
- st.button("โ๏ธ", key=del_key, type="tertiary")
- with col2:
d["name"] = st.text_input(
label="",
label_visibility="collapsed",
@@ -198,6 +219,8 @@ def render_inputs(key: str, del_key: str, d: EvalPrompt):
value=d.get("prompt"),
height=500,
).strip()
+ with col2:
+ del_button(del_key)
st.write("##### " + field_title_desc(self.RequestModel, "eval_prompts"))
list_view_editor(
@@ -206,33 +229,34 @@ def render_inputs(key: str, del_key: str, d: EvalPrompt):
render_inputs=render_inputs,
)
- def render_inputs(key: str, del_key: str, d: AggFunction):
- col1, col2, col3 = st.columns([1, 5, 3], responsive=False)
+ def render_agg_inputs(key: str, del_key: str, d: AggFunction):
+ col1, col3 = st.columns([8, 1], responsive=False)
with col1:
- st.button("โ๏ธ", key=del_key, type="tertiary")
- with col2:
- d["column"] = st.text_input(
- "",
- label_visibility="collapsed",
- placeholder="Column Name",
- key=key + ":column",
- value=d.get("column"),
- ).strip()
+ # d["column"] = st.text_input(
+ # "",
+ # label_visibility="collapsed",
+ # placeholder="Column Name",
+ # key=key + ":column",
+ # value=d.get("column"),
+ # ).strip()
+ # with col2:
+ with st.div(className="pt-1"):
+ d["function"] = st.selectbox(
+ "",
+ label_visibility="collapsed",
+ key=key + ":func",
+ options=AggFunctionsList,
+ default_value=d.get("function"),
+ )
with col3:
- d["function"] = st.selectbox(
- "",
- label_visibility="collapsed",
- key=key + ":func",
- options=AggFunctionsList,
- default_value=d.get("function"),
- )
+ del_button(del_key)
st.html(" ")
st.write("##### " + field_title_desc(self.RequestModel, "agg_functions"))
list_view_editor(
add_btn_label="โ Add an Aggregation",
key="agg_functions",
- render_inputs=render_inputs,
+ render_inputs=render_agg_inputs,
)
def render_settings(self):
@@ -305,7 +329,7 @@ def run_v2(
out_df = pd.DataFrame.from_records(out_recs)
f = upload_file_from_bytes(
- filename=f"bulk-runner-{doc_ix}-{df_ix}.csv",
+ filename=f"evaluator-{doc_ix}-{df_ix}.csv",
data=out_df.to_csv(index=False).encode(),
content_type="text/csv",
)
@@ -313,17 +337,22 @@ def run_v2(
if out_df is None:
continue
- for agg_ix, agg in enumerate(request.agg_functions):
- col_values = out_df[agg["column"]].dropna()
- agg_value = col_values.agg(agg["function"])
- response.aggregations[doc_ix].append(
- {
- "column": agg["column"],
- "function": agg["function"],
- "count": len(col_values),
- "value": agg_value,
- }
- )
+ for agg in request.agg_functions:
+ if agg.get("column"):
+ cols = [agg["column"]]
+ else:
+ cols = out_df.select_dtypes(include=["float", "int"]).columns
+ for col in cols:
+ col_values = out_df[col].dropna()
+ agg_value = col_values.agg(agg["function"])
+ response.aggregations[doc_ix].append(
+ {
+ "column": col,
+ "function": agg["function"],
+ "count": len(col_values),
+ "value": agg_value,
+ }
+ )
def fields_to_save(self) -> [str]:
return super().fields_to_save() + [NROWS_CACHE_KEY]
@@ -344,30 +373,3 @@ def get_raw_price(self, state: dict) -> float:
def get_nrows(files: list[str]) -> int:
dfs = map_parallel(read_df_any, files)
return sum((len(df) for df in dfs), 0)
-
-
-def list_view_editor(
- *,
- add_btn_label: str,
- key: str,
- render_labels: typing.Callable = None,
- render_inputs: typing.Callable[[str, str, dict], None],
-):
- old_lst = st.session_state.setdefault(key, [])
- add_key = f"--{key}:add"
- if st.session_state.get(add_key):
- old_lst.append({})
- label_placeholder = st.div()
- new_lst = []
- for d in old_lst:
- entry_key = d.setdefault("__key__", f"--{key}:{uuid.uuid1()}")
- del_key = entry_key + ":del"
- if st.session_state.pop(del_key, None):
- continue
- render_inputs(entry_key, del_key, d)
- new_lst.append(d)
- if new_lst and render_labels:
- with label_placeholder:
- render_labels()
- st.session_state[key] = new_lst
- st.button(add_btn_label, key=add_key)
diff --git a/recipes/BulkRunner.py b/recipes/BulkRunner.py
index 449efd058..9cdbd82d4 100644
--- a/recipes/BulkRunner.py
+++ b/recipes/BulkRunner.py
@@ -1,13 +1,13 @@
import datetime
import io
import typing
+import uuid
-from fastapi import HTTPException
from furl import furl
from pydantic import BaseModel, Field
import gooey_ui as st
-from bots.models import Workflow
+from bots.models import Workflow, SavedRun
from daras_ai.image_input import upload_file_from_bytes
from daras_ai_v2.base import BasePage
from daras_ai_v2.doc_search_settings_widgets import document_uploader
@@ -20,10 +20,12 @@
)
from recipes.DocSearch import render_documents
+DEFAULT_BULK_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/d80fd4d8-93fa-11ee-bc13-02420a0001cc/Bulk%20Runner.jpg.png"
+
class BulkRunnerPage(BasePage):
title = "Bulk Runner"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/87f35df4-88d7-11ee-aac9-02420a00016b/Bulk%20Runner.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/87f35df4-88d7-11ee-aac9-02420a00016b/Bulk%20Runner.png.png"
workflow = Workflow.BULK_RUNNER
slug_versions = ["bulk-runner", "bulk"]
price = 1
@@ -32,16 +34,16 @@ class RequestModel(BaseModel):
documents: list[str] = Field(
title="Input Data Spreadsheet",
description="""
-Upload or link to a CSV or google sheet that contains your sample input data.
-For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
Remember to includes header names in your CSV too.
""",
)
run_urls: list[str] = Field(
- title="Gooey Workflow URL(s)",
+ title="Gooey Workflows",
description="""
-Paste in one or more Gooey.AI workflow links (on separate lines).
-You can add multiple URLs runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
+Provide one or more Gooey.AI workflow runs.
+You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
""",
)
@@ -58,26 +60,37 @@ class RequestModel(BaseModel):
""",
)
+ eval_urls: list[str] | None = Field(
+ title="Evaluation Workflows",
+ description="""
+_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
+ """,
+ )
+
class ResponseModel(BaseModel):
output_documents: list[str]
+ eval_runs: list[str] | None = Field(
+ title="Evaluation Run URLs",
+ description="""
+List of URLs to the evaluation runs that you requested.
+ """,
+ )
+
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_BULK_META_IMG
+
def render_form_v2(self):
- from daras_ai_v2.all_pages import page_slug_map, normalize_slug
-
- run_urls = st.session_state.get("run_urls", "")
- st.session_state.setdefault("__run_urls", "\n".join(run_urls))
- run_urls = (
- st.text_area(
- f"##### {field_title_desc(self.RequestModel, 'run_urls')}",
- key="__run_urls",
- )
- .strip()
- .splitlines()
+ st.write(f"##### {field_title_desc(self.RequestModel, 'run_urls')}")
+ run_urls = list_view_editor(
+ add_btn_label="โ Add a Workflow",
+ key="run_urls",
+ render_inputs=render_run_url_inputs,
+ flatten_dict_key="url",
)
- st.session_state["run_urls"] = run_urls
files = document_uploader(
- f"##### {field_title_desc(self.RequestModel, 'documents')}",
+ f"---\n##### {field_title_desc(self.RequestModel, 'documents')}",
accept=(".csv", ".xlsx", ".xls", ".json", ".tsv", ".xml"),
)
@@ -86,19 +99,9 @@ def render_form_v2(self):
output_fields = {}
for url in run_urls:
- f = furl(url)
- slug = f.path.segments[0]
try:
- page_cls = page_slug_map[normalize_slug(slug)]
- except KeyError as e:
- st.error(repr(e))
- continue
-
- example_id, run_id, uid = extract_query_params(f.query.params)
- try:
- sr = page_cls.get_sr_from_query_params(example_id, run_id, uid)
- except HTTPException as e:
- st.error(repr(e))
+ page_cls, sr = url_to_sr(url)
+ except:
continue
schema = page_cls.RequestModel.schema(ref_template="{model}")
@@ -139,8 +142,7 @@ def render_form_v2(self):
st.write(
"""
-##### Input Data Preview
-Here's what you uploaded:
+###### **Preview**: Here's what you uploaded
"""
)
for file in files:
@@ -149,14 +151,15 @@ def render_form_v2(self):
if not (required_input_fields or optional_input_fields):
return
- st.write(
- """
----
+ with st.div(className="pt-3"):
+ st.write(
+ """
+###### **Columns**
Please select which CSV column corresponds to your workflow's input fields.
-For the outputs, please fill in what the column name should be that corresponds to each output too.
+For the outputs, select the fields that should be included in the output CSV.
To understand what each field represents, check out our [API docs](https://api.gooey.ai/docs).
- """
- )
+ """,
+ )
visible_col1, visible_col2 = st.columns(2)
with st.expander("๐คฒ Show All Columns"):
@@ -226,14 +229,34 @@ def render_form_v2(self):
if col:
output_columns_new[field] = title
+ st.write("---")
+ st.write(f"##### {field_title_desc(self.RequestModel, 'eval_urls')}")
+ list_view_editor(
+ add_btn_label="โ Add an Eval",
+ key="eval_urls",
+ render_inputs=render_eval_url_inputs,
+ flatten_dict_key="url",
+ )
+
def render_example(self, state: dict):
render_documents(state)
def render_output(self):
- files = st.session_state.get("output_documents", [])
- for file in files:
- st.write(file)
- st.data_table(file)
+ eval_runs = st.session_state.get("eval_runs")
+
+ if eval_runs:
+ _backup = st.session_state
+ for url in eval_runs:
+ page_cls, sr = url_to_sr(url)
+ st.set_session_state(sr.state)
+ page_cls().render_output()
+ st.write("---")
+ st.set_session_state(_backup)
+ else:
+ files = st.session_state.get("output_documents", [])
+ for file in files:
+ st.write(file)
+ st.data_table(file)
def run_v2(
self,
@@ -329,36 +352,236 @@ def run_v2(
)
response.output_documents[doc_ix] = f
+ if not request.eval_urls:
+ return
+
+ response.eval_runs = []
+ for url in request.eval_urls:
+ page_cls, sr = url_to_sr(url)
+ yield f"Running {page_cls().get_recipe_title()}..."
+ request_body = page_cls.RequestModel(
+ documents=response.output_documents
+ ).dict(exclude_unset=True)
+ result, sr = sr.submit_api_call(
+ current_user=self.request.user, request_body=request_body
+ )
+ result.get(disable_sync_subtasks=False)
+ sr.refresh_from_db()
+ response.eval_runs.append(sr.get_app_url())
+
def preview_description(self, state: dict) -> str:
return """
-Which AI model actually works best for your needs?
-Upload your own data and evaluate any Gooey.AI workflow, LLM or AI model against any other.
-Great for large data sets, AI model evaluation, task automation, parallel processing and automated testing.
-To get started, paste in a Gooey.AI workflow, upload a CSV of your test data (with header names!), check the mapping of headers to workflow inputs and tap Submit.
-More tips in the Details below.
+Which AI model actually works best for your needs?
+Upload your own data and evaluate any Gooey.AI workflow, LLM or AI model against any other.
+Great for large data sets, AI model evaluation, task automation, parallel processing and automated testing.
+To get started, paste in a Gooey.AI workflow, upload a CSV of your test data (with header names!), check the mapping of headers to workflow inputs and tap Submit.
+More tips in the Details below.
"""
def render_description(self):
st.write(
"""
-Building complex AI workflows like copilot) and then evaluating each iteration is complex.
-Workflows are affected by the particular LLM used (GPT4 vs PalM2), their vector DB knowledge sets (e.g. your google docs), how synthetic data creation happened (e.g. how you transformed your video transcript or PDF into structured data), which translation or speech engine you used and your LLM prompts. Every change can affect the quality of your outputs.
+Building complex AI workflows like copilot) and then evaluating each iteration is complex.
+Workflows are affected by the particular LLM used (GPT4 vs PalM2), their vector DB knowledge sets (e.g. your google docs), how synthetic data creation happened (e.g. how you transformed your video transcript or PDF into structured data), which translation or speech engine you used and your LLM prompts. Every change can affect the quality of your outputs.
1. This bulk tool enables you to do two incredible things:
-2. Upload your own set of inputs (e.g. typical questions to your bot) to any gooey workflow (e.g. /copilot) and run them in bulk to generate outputs or answers.
-3. Compare the results of competing workflows to determine which one generates better outputs.
+2. Upload your own set of inputs (e.g. typical questions to your bot) to any gooey workflow (e.g. /copilot) and run them in bulk to generate outputs or answers.
+3. Compare the results of competing workflows to determine which one generates better outputs.
To get started:
1. Enter the Gooey.AI Workflow URLs that you'd like to run in bulk
2. Enter a csv of sample inputs to run in bulk
-3. Ensure that the mapping between your inputs and API parameters of the Gooey.AI workflow are correctly mapped.
-4. Tap Submit.
+3. Ensure that the mapping between your inputs and API parameters of the Gooey.AI workflow are correctly mapped.
+4. Tap Submit.
5. Wait for results
-6. Make a change to your Gooey Workflow, copy its URL and repeat Step 1 (or just add the link to see the results of both workflows together)
+6. Make a change to your Gooey Workflow, copy its URL and repeat Step 1 (or just add the link to see the results of both workflows together)
"""
)
+def render_run_url_inputs(key: str, del_key: str, d: dict):
+ from daras_ai_v2.all_pages import all_home_pages
+
+ _prefill_workflow(d, key)
+
+ col1, col2, col3 = st.columns([10, 1, 1], responsive=False)
+ if not d.get("workflow") and d.get("url"):
+ with col1:
+ url = st.text_input(
+ "",
+ key=key + ":url",
+ value=d.get("url"),
+ placeholder="https://gooey.ai/.../?run_id=...",
+ )
+ else:
+ with col1:
+ scol1, scol2, scol3 = st.columns([5, 6, 1], responsive=False)
+ with scol1:
+ with st.div(className="pt-1"):
+ options = {
+ page_cls.workflow: page_cls().get_recipe_title()
+ for page_cls in all_home_pages
+ }
+ last_workflow_key = "__last_run_url_workflow"
+ workflow = st.selectbox(
+ "",
+ key=key + ":workflow",
+ default_value=(
+ d.get("workflow") or st.session_state.get(last_workflow_key)
+ ),
+ options=options,
+ format_func=lambda x: options[x],
+ )
+ d["workflow"] = workflow
+ # use this to set default for next time
+ st.session_state[last_workflow_key] = workflow
+ with scol2:
+ options = {
+ SavedRun.objects.get(
+ workflow=d["workflow"],
+ example_id__isnull=True,
+ run_id__isnull=True,
+ uid__isnull=True,
+ ).get_app_url(): "Default"
+ } | {
+ sr.get_app_url(): sr.page_title
+ for sr in SavedRun.objects.filter(
+ workflow=d["workflow"],
+ example_id__isnull=False,
+ run_id__isnull=True,
+ uid__isnull=True,
+ hidden=False,
+ ).exclude(page_title="")
+ }
+ with st.div(className="pt-1"):
+ url = st.selectbox(
+ "",
+ key=key + ":url",
+ options=options,
+ default_value=d.get("url"),
+ format_func=lambda x: options[x],
+ )
+ with scol3:
+ edit_button(key + ":editmode")
+ with col2:
+ url_button(url)
+ with col3:
+ del_button(del_key)
+
+ try:
+ url_to_sr(url)
+ except Exception as e:
+ st.error(repr(e))
+ d["url"] = url
+
+
+def render_eval_url_inputs(key: str, del_key: str, d: dict):
+ _prefill_workflow(d, key)
+
+ col1, col2, col3 = st.columns([10, 1, 1], responsive=False)
+ if not d.get("workflow") and d.get("url"):
+ with col1:
+ url = st.text_input(
+ "",
+ key=key + ":url",
+ value=d.get("url"),
+ placeholder="https://gooey.ai/.../?run_id=...",
+ )
+ else:
+ d["workflow"] = Workflow.BULK_EVAL
+ with col1:
+ scol1, scol2 = st.columns([11, 1], responsive=False)
+ with scol1:
+ from recipes.BulkEval import BulkEvalPage
+
+ options = {
+ BulkEvalPage().recipe_doc_sr().get_app_url(): "Default",
+ } | {
+ sr.get_app_url(): sr.page_title
+ for sr in SavedRun.objects.filter(
+ workflow=Workflow.BULK_EVAL,
+ example_id__isnull=False,
+ run_id__isnull=True,
+ uid__isnull=True,
+ hidden=False,
+ ).exclude(page_title="")
+ }
+ with st.div(className="pt-1"):
+ url = st.selectbox(
+ "",
+ key=key + ":url",
+ options=options,
+ default_value=d.get("url"),
+ format_func=lambda x: options[x],
+ )
+ with scol2:
+ edit_button(key + ":editmode")
+ with col2:
+ url_button(url)
+ with col3:
+ del_button(del_key)
+
+ try:
+ url_to_sr(url)
+ except Exception as e:
+ st.error(repr(e))
+ d["url"] = url
+
+
+def url_button(url):
+ st.html(
+ f"""
+
+
+
+ """
+ )
+
+
+def edit_button(key: str):
+ st.button(
+ '',
+ key=key,
+ type="tertiary",
+ )
+
+
+def del_button(key: str):
+ st.button(
+ '',
+ key=key,
+ type="tertiary",
+ )
+
+
+def _prefill_workflow(d: dict, key: str):
+ if st.session_state.get(key + ":editmode"):
+ d.pop("workflow", None)
+ elif not d.get("workflow") and d.get("url"):
+ try:
+ page_cls, sr = url_to_sr(d.get("url"))
+ except:
+ return
+ if (sr.example_id and sr.page_title and not sr.hidden) or not (
+ sr.example_id or sr.run_id or sr.uid
+ ):
+ d["workflow"] = sr.workflow
+ d["url"] = sr.get_app_url()
+
+
+def url_to_sr(url: str) -> tuple[typing.Type[BasePage], SavedRun]:
+ from daras_ai_v2.all_pages import page_slug_map, normalize_slug
+
+ f = furl(url)
+ slug = f.path.segments[0]
+ page_cls = page_slug_map[normalize_slug(slug)]
+ example_id, run_id, uid = extract_query_params(f.query.params)
+ sr = page_cls.get_sr_from_query_params(example_id, run_id, uid)
+ return page_cls, sr
+
+
def build_requests_for_df(df, request, df_ix, arr_len):
from daras_ai_v2.all_pages import page_slug_map, normalize_slug
@@ -478,3 +701,48 @@ def read_df_any(f_url: str) -> "pd.DataFrame":
raise ValueError(f"Unsupported file type: {f_url}")
return df.dropna(how="all", axis=1).dropna(how="all", axis=0).fillna("")
+
+
+def list_view_editor(
+ *,
+ add_btn_label: str,
+ key: str,
+ render_labels: typing.Callable = None,
+ render_inputs: typing.Callable[[str, str, dict], None],
+ flatten_dict_key: str = None,
+):
+ if flatten_dict_key:
+ list_key = f"--list-view:{key}"
+ st.session_state.setdefault(
+ list_key,
+ [{flatten_dict_key: val} for val in st.session_state.get(key, [])],
+ )
+ new_lst = list_view_editor(
+ add_btn_label=add_btn_label,
+ key=list_key,
+ render_labels=render_labels,
+ render_inputs=render_inputs,
+ )
+ ret = [d[flatten_dict_key] for d in new_lst]
+ st.session_state[key] = ret
+ return ret
+
+ old_lst = st.session_state.setdefault(key, [])
+ add_key = f"--{key}:add"
+ if st.session_state.get(add_key):
+ old_lst.append({})
+ label_placeholder = st.div()
+ new_lst = []
+ for d in old_lst:
+ entry_key = d.setdefault("__key__", f"--{key}:{uuid.uuid1()}")
+ del_key = entry_key + ":del"
+ if st.session_state.pop(del_key, None):
+ continue
+ render_inputs(entry_key, del_key, d)
+ new_lst.append(d)
+ if new_lst and render_labels:
+ with label_placeholder:
+ render_labels()
+ st.session_state[key] = new_lst
+ st.button(add_btn_label, key=add_key)
+ return new_lst
diff --git a/recipes/ChyronPlant.py b/recipes/ChyronPlant.py
index 4c371abf3..116a8ad6a 100644
--- a/recipes/ChyronPlant.py
+++ b/recipes/ChyronPlant.py
@@ -10,7 +10,7 @@
class ChyronPlantPage(BasePage):
title = "Chyron Plant Bot"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
workflow = Workflow.CHYRON_PLANT
slug_versions = ["ChyronPlant"]
diff --git a/recipes/CompareLLM.py b/recipes/CompareLLM.py
index 867fc31ef..583317ddc 100644
--- a/recipes/CompareLLM.py
+++ b/recipes/CompareLLM.py
@@ -17,12 +17,12 @@
from daras_ai_v2.loom_video_widget import youtube_video
from daras_ai_v2.prompt_vars import prompt_vars_widget, render_prompt_vars
-DEFAULT_COMPARE_LM_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/compare%20llm%20under%201%20mg%20gif.gif"
+DEFAULT_COMPARE_LM_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5e4f4c58-93fc-11ee-a39e-02420a0001ce/LLMs.jpg.png"
class CompareLLMPage(BasePage):
title = "Large Language Models: GPT-3"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ae42015e-88d7-11ee-aac9-02420a00016b/Compare%20LLMs.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ae42015e-88d7-11ee-aac9-02420a00016b/Compare%20LLMs.png.png"
workflow = Workflow.COMPARE_LLM
slug_versions = ["CompareLLM", "llm", "compare-large-language-models"]
diff --git a/recipes/CompareText2Img.py b/recipes/CompareText2Img.py
index 2b6757020..80fe16392 100644
--- a/recipes/CompareText2Img.py
+++ b/recipes/CompareText2Img.py
@@ -25,10 +25,12 @@
Schedulers,
)
+DEFAULT_COMPARE_TEXT2IMG_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ae7b2940-93fc-11ee-8edc-02420a0001cc/Compare%20image%20generators.jpg.png"
+
class CompareText2ImgPage(BasePage):
title = "Compare AI Image Generators"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/d127484e-88d9-11ee-b549-02420a000167/Compare%20AI%20Image%20generators.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/d127484e-88d9-11ee-b549-02420a000167/Compare%20AI%20Image%20generators.png.png"
workflow = Workflow.COMPARE_TEXT2IMG
slug_versions = [
"CompareText2Img",
@@ -74,6 +76,9 @@ class ResponseModel(BaseModel):
typing.Literal[tuple(e.name for e in Text2ImgModels)], list[str]
]
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_COMPARE_TEXT2IMG_META_IMG
+
def related_workflows(self) -> list:
from recipes.FaceInpainting import FaceInpaintingPage
from recipes.ObjectInpainting import ObjectInpaintingPage
diff --git a/recipes/CompareUpscaler.py b/recipes/CompareUpscaler.py
index 428d80322..84973380f 100644
--- a/recipes/CompareUpscaler.py
+++ b/recipes/CompareUpscaler.py
@@ -9,12 +9,12 @@
from daras_ai_v2.face_restoration import UpscalerModels, run_upscaler_model
from daras_ai_v2.stable_diffusion import SD_IMG_MAX_SIZE
-DEFAULT_COMPARE_UPSCALER_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/COMPARE%20IMAGE%20UPSCALERS.jpg"
+DEFAULT_COMPARE_UPSCALER_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/2e8ee512-93fe-11ee-a083-02420a0001c8/Image%20upscaler.jpg.png"
class CompareUpscalerPage(BasePage):
title = "Compare AI Image Upscalers"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/64393e0c-88db-11ee-b428-02420a000168/AI%20Image%20Upscaler.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/64393e0c-88db-11ee-b428-02420a000168/AI%20Image%20Upscaler.png.png"
workflow = Workflow.COMPARE_UPSCALER
slug_versions = ["compare-ai-upscalers"]
diff --git a/recipes/DeforumSD.py b/recipes/DeforumSD.py
index a8b87ac6e..7ec57bc19 100644
--- a/recipes/DeforumSD.py
+++ b/recipes/DeforumSD.py
@@ -15,6 +15,8 @@
from daras_ai_v2.safety_checker import safety_checker
from daras_ai_v2.tabs_widget import MenuTabs
+DEFAULT_DEFORUMSD_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/7dc25196-93fe-11ee-9e3a-02420a0001ce/AI%20Animation%20generator.jpg.png"
+
class AnimationModels(TextChoices):
protogen_2_2 = ("Protogen_V2.2.ckpt", "Protogen V2.2 (darkstorm2150)")
@@ -161,7 +163,7 @@ def get_last_frame(prompt_list: list) -> int:
class DeforumSDPage(BasePage):
title = "AI Animation Generator"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/media/users/kxmNIYAOJbfOURxHBKNCWeUSKiP2/dd88c110-88d6-11ee-9b4f-2b58bd50e819/animation.gif"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/media/users/kxmNIYAOJbfOURxHBKNCWeUSKiP2/dd88c110-88d6-11ee-9b4f-2b58bd50e819/animation.gif"
workflow = Workflow.DEFORUM_SD
slug_versions = ["DeforumSD", "animation-generator"]
@@ -199,6 +201,9 @@ class RequestModel(BaseModel):
class ResponseModel(BaseModel):
output_video: str
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_DEFORUMSD_META_IMG
+
def related_workflows(self) -> list:
from recipes.VideoBots import VideoBotsPage
from recipes.LipsyncTTS import LipsyncTTSPage
diff --git a/recipes/DocExtract.py b/recipes/DocExtract.py
index bbb0b2277..fc2b8b3c8 100644
--- a/recipes/DocExtract.py
+++ b/recipes/DocExtract.py
@@ -40,7 +40,7 @@
from daras_ai_v2.vector_search import doc_url_to_metadata
from recipes.DocSearch import render_documents
-DEFAULT_YOUTUBE_BOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/6c8f6876-538c-11ee-bea7-02420a000195/youtube%20bot%201.png.png"
+DEFAULT_YOUTUBE_BOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ddc8ffac-93fb-11ee-89fb-02420a0001cb/Youtube%20transcripts.jpg.png"
class Columns(IntegerChoices):
@@ -56,7 +56,7 @@ class Columns(IntegerChoices):
class DocExtractPage(BasePage):
title = "Youtube Transcripts + GPT extraction to Google Sheets"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
workflow = Workflow.DOC_EXTRACT
slug_versions = [
"doc-extract",
diff --git a/recipes/DocSearch.py b/recipes/DocSearch.py
index 70a3c43b5..2a509d8fd 100644
--- a/recipes/DocSearch.py
+++ b/recipes/DocSearch.py
@@ -33,12 +33,12 @@
render_sources_widget,
)
-DEFAULT_DOC_SEARCH_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/DOC%20SEARCH.gif"
+DEFAULT_DOC_SEARCH_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/bcc7aa58-93fe-11ee-a083-02420a0001c8/Search%20your%20docs.jpg.png"
class DocSearchPage(BasePage):
title = "Search your Docs with GPT"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cbbb4dc6-88d7-11ee-bf6c-02420a000166/Search%20your%20docs%20with%20gpt.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cbbb4dc6-88d7-11ee-bf6c-02420a000166/Search%20your%20docs%20with%20gpt.png.png"
workflow = Workflow.DOC_SEARCH
slug_versions = ["doc-search"]
diff --git a/recipes/DocSummary.py b/recipes/DocSummary.py
index 8e099e947..4b9283cde 100644
--- a/recipes/DocSummary.py
+++ b/recipes/DocSummary.py
@@ -27,7 +27,7 @@
)
from recipes.GoogleGPT import render_output_with_refs, GoogleGPTPage
-DEFAULT_DOC_SUMMARY_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/db70c56e-585a-11ee-990b-02420a00018f/doc%20summary.png.png"
+DEFAULT_DOC_SUMMARY_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f35796d2-93fe-11ee-b86c-02420a0001c7/Summarize%20with%20GPT.jpg.png"
class CombineDocumentsChains(Enum):
@@ -38,7 +38,7 @@ class CombineDocumentsChains(Enum):
class DocSummaryPage(BasePage):
title = "Summarize your Docs with GPT"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1f858a7a-88d8-11ee-a658-02420a000163/Summarize%20your%20docs%20with%20gpt.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1f858a7a-88d8-11ee-a658-02420a000163/Summarize%20your%20docs%20with%20gpt.png.png"
workflow = Workflow.DOC_SUMMARY
slug_versions = ["doc-summary"]
diff --git a/recipes/EmailFaceInpainting.py b/recipes/EmailFaceInpainting.py
index 8550cb46b..86428c4ae 100644
--- a/recipes/EmailFaceInpainting.py
+++ b/recipes/EmailFaceInpainting.py
@@ -17,10 +17,12 @@
email_regex = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b"
twitter_handle_regex = r"(@)?[A-Za-z0-9_]{1,15}"
+DEFAULT_EMAIL_FACE_INPAINTING_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/6937427a-9522-11ee-b6d3-02420a0001ea/Email%20photo.jpg.png"
+
class EmailFaceInpaintingPage(FaceInpaintingPage):
title = "AI Generated Photo from Email Profile Lookup"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/2affefa6-88da-11ee-aa86-02420a000165/AI%20generated%20photo%20with%20email%20profile%20lookup.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ec0df5aa-9521-11ee-93d3-02420a0001e5/Email%20Profile%20Lookup.png.png"
workflow = Workflow.EMAIL_FACE_INPAINTING
slug_versions = ["EmailFaceInpainting", "ai-image-from-email-lookup"]
@@ -85,6 +87,9 @@ class ResponseModel(BaseModel):
output_images: list[str]
email_sent: bool = False
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_EMAIL_FACE_INPAINTING_META_IMG
+
def preview_description(self, state: dict) -> str:
return "Find an email's public photo and then draw the face into an AI generated scene using your own prompt + the latest Stable Diffusion or DallE image generator."
diff --git a/recipes/FaceInpainting.py b/recipes/FaceInpainting.py
index 3702652ae..a9353ea07 100644
--- a/recipes/FaceInpainting.py
+++ b/recipes/FaceInpainting.py
@@ -23,10 +23,12 @@
from daras_ai_v2.repositioning import repositioning_preview_img
from daras_ai_v2.stable_diffusion import InpaintingModels
+DEFAULT_FACE_INPAINTING_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a146bfc0-93ff-11ee-b86c-02420a0001c7/Face%20in%20painting.jpg.png"
+
class FaceInpaintingPage(BasePage):
title = "AI Image with a Face"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/10c2ce06-88da-11ee-b428-02420a000168/ai%20image%20with%20a%20face.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/10c2ce06-88da-11ee-b428-02420a000168/ai%20image%20with%20a%20face.png.png"
workflow = Workflow.FACE_INPAINTING
slug_versions = ["FaceInpainting", "face-in-ai-generated-photo"]
@@ -77,6 +79,9 @@ class ResponseModel(BaseModel):
diffusion_images: list[str]
output_images: list[str]
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_FACE_INPAINTING_META_IMG
+
def preview_description(self, state: dict) -> str:
return "Upload & extract a face into an AI-generated photo using your text + the latest Stable Diffusion or DallE image generator."
diff --git a/recipes/GoogleGPT.py b/recipes/GoogleGPT.py
index dde12482e..57b1078ba 100644
--- a/recipes/GoogleGPT.py
+++ b/recipes/GoogleGPT.py
@@ -37,12 +37,12 @@
EmptySearchResults,
)
-DEFAULT_GOOGLE_GPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/WEBSEARCH%20%2B%20CHATGPT.jpg"
+DEFAULT_GOOGLE_GPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85ed60a2-9405-11ee-9747-02420a0001ce/Web%20search%20GPT.jpg.png"
class GoogleGPTPage(BasePage):
title = "Web Search + GPT3"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1de97d80-88d7-11ee-ad97-02420a00016c/Websearch%20GPT.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/28649544-9406-11ee-bba3-02420a0001cc/Websearch%20GPT%20option%202.png.png"
workflow = Workflow.GOOGLE_GPT
slug_versions = ["google-gpt"]
diff --git a/recipes/GoogleImageGen.py b/recipes/GoogleImageGen.py
index e02833063..c3dab52f0 100644
--- a/recipes/GoogleImageGen.py
+++ b/recipes/GoogleImageGen.py
@@ -29,10 +29,12 @@
instruct_pix2pix,
)
+DEFAULT_GOOGLE_IMG_GEN_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/dcd82b68-9400-11ee-9e3a-02420a0001ce/Search%20result%20photo.jpg.png"
+
class GoogleImageGenPage(BasePage):
title = "Render Image Search Results with AI"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/eb23c078-88da-11ee-aa86-02420a000165/web%20search%20render.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/eb23c078-88da-11ee-aa86-02420a000165/web%20search%20render.png.png"
workflow = Workflow.GOOGLE_IMAGE_GEN
slug_versions = ["GoogleImageGen", "render-images-with-ai"]
@@ -74,6 +76,9 @@ class ResponseModel(BaseModel):
image_urls: list[str]
selected_image: str | None
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_GOOGLE_IMG_GEN_META_IMG
+
def related_workflows(self):
from recipes.ObjectInpainting import ObjectInpaintingPage
from recipes.QRCodeGenerator import QRCodeGeneratorPage
diff --git a/recipes/ImageSegmentation.py b/recipes/ImageSegmentation.py
index 72b6ec193..543664065 100644
--- a/recipes/ImageSegmentation.py
+++ b/recipes/ImageSegmentation.py
@@ -28,10 +28,12 @@
repositioning_preview_widget,
)
+DEFAULT_IMG_SEGMENTATION_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8363ed50-9401-11ee-878f-02420a0001cb/AI%20bg%20changer.jpg.png"
+
class ImageSegmentationPage(BasePage):
title = "AI Background Changer"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/06fc595e-88db-11ee-b428-02420a000168/AI%20Background%20Remover.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/06fc595e-88db-11ee-b428-02420a000168/AI%20Background%20Remover.png.png"
workflow = Workflow.IMAGE_SEGMENTATION
slug_versions = ["ImageSegmentation", "remove-image-background-with-ai"]
@@ -65,6 +67,9 @@ class ResponseModel(BaseModel):
resized_image: str
resized_mask: str
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_IMG_SEGMENTATION_META_IMG
+
def related_workflows(self) -> list:
from recipes.ObjectInpainting import ObjectInpaintingPage
from recipes.Img2Img import Img2ImgPage
diff --git a/recipes/Img2Img.py b/recipes/Img2Img.py
index 228c3ed73..da330639b 100644
--- a/recipes/Img2Img.py
+++ b/recipes/Img2Img.py
@@ -19,10 +19,12 @@
)
from daras_ai_v2.safety_checker import safety_checker
+DEFAULT_IMG2IMG_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cc2804ea-9401-11ee-940a-02420a0001c7/Edit%20an%20image.jpg.png"
+
class Img2ImgPage(BasePage):
title = "Edit An Image with AI prompt"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/bcc9351a-88d9-11ee-bf6c-02420a000166/Edit%20an%20image%20with%20AI%201.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/bcc9351a-88d9-11ee-bf6c-02420a000166/Edit%20an%20image%20with%20AI%201.png.png"
workflow = Workflow.IMG_2_IMG
slug_versions = ["Img2Img", "ai-photo-editor"]
@@ -68,6 +70,9 @@ class RequestModel(BaseModel):
class ResponseModel(BaseModel):
output_images: list[str]
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_IMG2IMG_META_IMG
+
def related_workflows(self) -> list:
from recipes.QRCodeGenerator import QRCodeGeneratorPage
from recipes.ObjectInpainting import ObjectInpaintingPage
diff --git a/recipes/LetterWriter.py b/recipes/LetterWriter.py
index d4cd34fce..87e0aa550 100644
--- a/recipes/LetterWriter.py
+++ b/recipes/LetterWriter.py
@@ -14,7 +14,7 @@
class LetterWriterPage(BasePage):
title = "Letter Writer"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
workflow = Workflow.LETTER_WRITER
slug_versions = ["LetterWriter"]
diff --git a/recipes/Lipsync.py b/recipes/Lipsync.py
index 74c4671b8..5fe08a09b 100644
--- a/recipes/Lipsync.py
+++ b/recipes/Lipsync.py
@@ -15,12 +15,12 @@
CREDITS_PER_MB = 2
-DEFAULT_LIPSYNC_GIF = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/91acbbde-5857-11ee-920a-02420a000194/lipsync%20audio.png.png"
+DEFAULT_LIPSYNC_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/7fc4d302-9402-11ee-98dc-02420a0001ca/Lip%20Sync.jpg.png"
class LipsyncPage(BasePage):
title = "Lip Syncing"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f33e6332-88d8-11ee-89f9-02420a000169/Lipsync%20TTS.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f33e6332-88d8-11ee-89f9-02420a000169/Lipsync%20TTS.png.png"
workflow = Workflow.LIPSYNC
slug_versions = ["Lipsync"]
@@ -37,7 +37,7 @@ class ResponseModel(BaseModel):
output_video: str
def preview_image(self, state: dict) -> str | None:
- return DEFAULT_LIPSYNC_GIF
+ return DEFAULT_LIPSYNC_META_IMG
def render_form_v2(self) -> bool:
st.file_uploader(
diff --git a/recipes/LipsyncTTS.py b/recipes/LipsyncTTS.py
index 5ec6d7518..f85d4550a 100644
--- a/recipes/LipsyncTTS.py
+++ b/recipes/LipsyncTTS.py
@@ -9,12 +9,12 @@
from daras_ai_v2.safety_checker import safety_checker
from daras_ai_v2.loom_video_widget import youtube_video
-DEFAULT_LIPSYNC_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/lipsync_meta_img.gif"
+DEFAULT_LIPSYNC_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/13b4d352-9456-11ee-8edd-02420a0001c7/Lipsync%20TTS.jpg.png"
class LipsyncTTSPage(LipsyncPage, TextToSpeechPage):
title = "Lipsync Video with Any Text"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1acfa370-88d9-11ee-bf6c-02420a000166/Lipsync%20with%20audio%201.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1acfa370-88d9-11ee-bf6c-02420a000166/Lipsync%20with%20audio%201.png.png"
workflow = Workflow.LIPSYNC_TTS
slug_versions = ["LipsyncTTS", "lipsync-maker"]
@@ -53,6 +53,8 @@ class RequestModel(BaseModel):
elevenlabs_model: str | None
elevenlabs_stability: float | None
elevenlabs_similarity_boost: float | None
+ elevenlabs_style: float | None
+ elevenlabs_speaker_boost: bool | None
class ResponseModel(BaseModel):
output_video: str
diff --git a/recipes/ObjectInpainting.py b/recipes/ObjectInpainting.py
index be04355dd..d84c59069 100644
--- a/recipes/ObjectInpainting.py
+++ b/recipes/ObjectInpainting.py
@@ -24,10 +24,12 @@
)
from daras_ai_v2.stable_diffusion import InpaintingModels
+DEFAULT_OBJECT_INPAINTING_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/4bca6982-9456-11ee-bc12-02420a0001cc/Product%20photo%20backgrounds.jpg.png"
+
class ObjectInpaintingPage(BasePage):
title = "Generate Product Photo Backgrounds"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f07b731e-88d9-11ee-a658-02420a000163/W.I.3.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f07b731e-88d9-11ee-a658-02420a000163/W.I.3.png.png"
workflow = Workflow.OBJECT_INPAINTING
slug_versions = ["ObjectInpainting", "product-photo-background-generator"]
@@ -74,6 +76,9 @@ class ResponseModel(BaseModel):
# diffusion_images: list[str]
output_images: list[str]
+ def preview_image(self, state: dict) -> str | None:
+ return DEFAULT_OBJECT_INPAINTING_META_IMG
+
def related_workflows(self) -> list:
from recipes.ImageSegmentation import ImageSegmentationPage
from recipes.GoogleImageGen import GoogleImageGenPage
diff --git a/recipes/QRCodeGenerator.py b/recipes/QRCodeGenerator.py
index 17e31465d..7d27d8075 100644
--- a/recipes/QRCodeGenerator.py
+++ b/recipes/QRCodeGenerator.py
@@ -39,7 +39,7 @@
from url_shortener.models import ShortenedURL
ATTEMPTS = 1
-DEFAULT_QR_CODE_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f09c8cfa-5393-11ee-a837-02420a000190/ai%20art%20qr%20codes1%201.png.png"
+DEFAULT_QR_CODE_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a679a410-9456-11ee-bd77-02420a0001ce/QR%20Code.jpg.png"
class QrSources(Enum):
@@ -51,7 +51,7 @@ class QrSources(Enum):
class QRCodeGeneratorPage(BasePage):
title = "AI Art QR Code"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/03d6538e-88d5-11ee-ad97-02420a00016c/W.I.2.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/03d6538e-88d5-11ee-ad97-02420a00016c/W.I.2.png.png"
workflow = Workflow.QR_CODE
slug_versions = ["art-qr-code", "qr", "qr-code"]
diff --git a/recipes/RelatedQnA.py b/recipes/RelatedQnA.py
index 67467b5eb..efddba5ba 100644
--- a/recipes/RelatedQnA.py
+++ b/recipes/RelatedQnA.py
@@ -16,7 +16,7 @@
from recipes.GoogleGPT import GoogleGPTPage
from recipes.RelatedQnADoc import render_qna_outputs
-DEFAULT_SEO_CONTENT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/9b415768-5393-11ee-a837-02420a000190/RQnA%20SEO%20content%201.png.png"
+DEFAULT_SEO_CONTENT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/cbd2c94e-9456-11ee-a95e-02420a0001cc/People%20also%20ask.jpg.png"
class RelatedGoogleGPTResponse(GoogleGPTPage.ResponseModel):
@@ -25,7 +25,7 @@ class RelatedGoogleGPTResponse(GoogleGPTPage.ResponseModel):
class RelatedQnAPage(BasePage):
title = "Generate โPeople Also Askโ SEO Content "
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/37b0ba22-88d6-11ee-b549-02420a000167/People%20also%20ask.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/37b0ba22-88d6-11ee-b549-02420a000167/People%20also%20ask.png.png"
workflow = Workflow.RELATED_QNA_MAKER
slug_versions = ["related-qna-maker"]
diff --git a/recipes/RelatedQnADoc.py b/recipes/RelatedQnADoc.py
index 93a68b963..7ebc87080 100644
--- a/recipes/RelatedQnADoc.py
+++ b/recipes/RelatedQnADoc.py
@@ -24,7 +24,7 @@ class RelatedDocSearchResponse(DocSearchPage.ResponseModel):
class RelatedQnADocPage(BasePage):
title = '"People Also Ask" Answers from a Doc'
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
workflow = Workflow.RELATED_QNA_MAKER_DOC
slug_versions = ["related-qna-maker-doc"]
@@ -155,4 +155,4 @@ def render_qna_outputs(state, height, show_count=None):
{"output_text": output_text, "references": references}, height
)
render_sources_widget(references)
- st.write(" ")
+ st.html(" ")
diff --git a/recipes/SEOSummary.py b/recipes/SEOSummary.py
index 1227e52e0..51b4e21db 100644
--- a/recipes/SEOSummary.py
+++ b/recipes/SEOSummary.py
@@ -36,7 +36,7 @@
KEYWORDS_SEP = re.compile(r"[\n,]")
STOP_SEQ = "$" * 10
-SEO_SUMMARY_DEFAULT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/seo.png"
+SEO_SUMMARY_DEFAULT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/13d3ab1e-9457-11ee-98a6-02420a0001c9/SEO.jpg.png"
BANNED_HOSTS = [
# youtube generally returns garbage
@@ -56,7 +56,7 @@
class SEOSummaryPage(BasePage):
title = "Create a perfect SEO-optimized Title & Paragraph"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85f38b42-88d6-11ee-ad97-02420a00016c/Create%20SEO%20optimized%20content%20option%202.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85f38b42-88d6-11ee-ad97-02420a00016c/Create%20SEO%20optimized%20content%20option%202.png.png"
workflow = Workflow.SEO_SUMMARY
slug_versions = ["SEOSummary", "seo-paragraph-generator"]
diff --git a/recipes/SmartGPT.py b/recipes/SmartGPT.py
index 56dd886c5..554a74940 100644
--- a/recipes/SmartGPT.py
+++ b/recipes/SmartGPT.py
@@ -17,12 +17,12 @@
from daras_ai_v2.language_model_settings_widgets import language_model_settings
from daras_ai_v2.pt import PromptTree
-DEFAULT_SMARTGPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/e02d1582-538a-11ee-9d7b-02420a000194/smartgpt%201.png.png"
+DEFAULT_SMARTGPT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3d71b434-9457-11ee-8edd-02420a0001c7/Smart%20GPT.jpg.png"
class SmartGPTPage(BasePage):
title = "SmartGPT"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ffd24ad8-88d7-11ee-a658-02420a000163/SmartGPT.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ffd24ad8-88d7-11ee-a658-02420a000163/SmartGPT.png.png"
workflow = Workflow.SMART_GPT
slug_versions = ["SmartGPT"]
price = 20
diff --git a/recipes/SocialLookupEmail.py b/recipes/SocialLookupEmail.py
index 23a455c03..5ad154f9f 100644
--- a/recipes/SocialLookupEmail.py
+++ b/recipes/SocialLookupEmail.py
@@ -14,12 +14,12 @@
from daras_ai_v2.redis_cache import redis_cache_decorator
email_regex = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b"
-DEFAULT_SOCIAL_LOOKUP_EMAIL_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/email%20ver%202.png"
+DEFAULT_SOCIAL_LOOKUP_EMAIL_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/6729ea44-9457-11ee-bd77-02420a0001ce/Profile%20look%20up%20gpt%20email.jpg.png"
class SocialLookupEmailPage(BasePage):
title = "Profile Lookup + GPT3 for AI-Personalized Emails"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fbd475a-88d7-11ee-aac9-02420a00016b/personalized%20email.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fbd475a-88d7-11ee-aac9-02420a00016b/personalized%20email.png.png"
workflow = Workflow.SOCIAL_LOOKUP_EMAIL
slug_versions = ["SocialLookupEmail", "email-writer-with-profile-lookup"]
diff --git a/recipes/Text2Audio.py b/recipes/Text2Audio.py
index 589800d92..77776ddf5 100644
--- a/recipes/Text2Audio.py
+++ b/recipes/Text2Audio.py
@@ -14,7 +14,7 @@
num_outputs_setting,
)
-DEFAULT_TEXT2AUDIO_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/ddc6e894-538b-11ee-a837-02420a000190/text2audio1%201.png.png"
+DEFAULT_TEXT2AUDIO_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/85cf8ea4-9457-11ee-bd77-02420a0001ce/Text%20guided%20audio.jpg.png"
class Text2AudioModels(Enum):
@@ -28,7 +28,7 @@ class Text2AudioModels(Enum):
class Text2AudioPage(BasePage):
title = "Text guided audio generator"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a4481d58-88d9-11ee-aa86-02420a000165/Text%20guided%20audio%20generator.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a4481d58-88d9-11ee-aa86-02420a000165/Text%20guided%20audio%20generator.png.png"
workflow = Workflow.TEXT_2_AUDIO
slug_versions = ["text2audio"]
diff --git a/recipes/TextToSpeech.py b/recipes/TextToSpeech.py
index b4c3702b9..1f67fbe20 100644
--- a/recipes/TextToSpeech.py
+++ b/recipes/TextToSpeech.py
@@ -22,12 +22,12 @@
TextToSpeechProviders,
)
-DEFAULT_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/assets/cropped_tts_compare_meta_img.gif"
+DEFAULT_TTS_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/a73181ce-9457-11ee-8edd-02420a0001c7/Voice%20generators.jpg.png"
class TextToSpeechPage(BasePage):
title = "Compare AI Voice Generators"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3621e11a-88d9-11ee-b549-02420a000167/Compare%20AI%20voice%20generators.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3621e11a-88d9-11ee-b549-02420a000167/Compare%20AI%20voice%20generators.png.png"
workflow = Workflow.TEXT_TO_SPEECH
slug_versions = [
"TextToSpeech",
@@ -71,6 +71,8 @@ class RequestModel(BaseModel):
elevenlabs_model: str | None
elevenlabs_stability: float | None
elevenlabs_similarity_boost: float | None
+ elevenlabs_style: float | None
+ elevenlabs_speaker_boost: bool | None
class ResponseModel(BaseModel):
audio_url: str
@@ -268,6 +270,14 @@ def run(self, state: dict):
stability = state.get("elevenlabs_stability", 0.5)
similarity_boost = state.get("elevenlabs_similarity_boost", 0.75)
+ voice_settings = dict(
+ stability=stability, similarity_boost=similarity_boost
+ )
+ if voice_model == "eleven_multilingual_v2":
+ voice_settings["style"] = state.get("elevenlabs_style", 0.0)
+ voice_settings["speaker_boost"] = state.get(
+ "elevenlabs_speaker_boost", True
+ )
response = requests.post(
f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}",
@@ -278,10 +288,7 @@ def run(self, state: dict):
json={
"text": text,
"model_id": voice_model,
- "voice_settings": {
- "stability": stability,
- "similarity_boost": similarity_boost,
- },
+ "voice_settings": voice_settings,
},
)
response.raise_for_status()
diff --git a/recipes/VideoBots.py b/recipes/VideoBots.py
index 84ed1e45b..952df781c 100644
--- a/recipes/VideoBots.py
+++ b/recipes/VideoBots.py
@@ -67,7 +67,7 @@
from recipes.TextToSpeech import TextToSpeechPage
from url_shortener.models import ShortenedURL
-DEFAULT_COPILOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/c8b24b0c-538a-11ee-a1a3-02420a00018d/meta%20tags1%201.png.png"
+DEFAULT_COPILOT_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/f454d64a-9457-11ee-b6d5-02420a0001cb/Copilot.jpg.png"
# BOT_SCRIPT_RE = re.compile(
# # start of line
@@ -97,7 +97,7 @@ class ReplyButton(typing.TypedDict):
class VideoBotsPage(BasePage):
title = "Copilot for your Enterprise" # "Create Interactive Video Bots"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8c014530-88d4-11ee-aac9-02420a00016b/Copilot.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/8c014530-88d4-11ee-aac9-02420a00016b/Copilot.png.png"
workflow = Workflow.VIDEO_BOTS
slug_versions = ["video-bots", "bots", "copilot"]
diff --git a/recipes/asr.py b/recipes/asr.py
index 603b31c59..a297bf9c3 100644
--- a/recipes/asr.py
+++ b/recipes/asr.py
@@ -25,12 +25,12 @@
from daras_ai_v2.text_output_widget import text_outputs
from recipes.DocSearch import render_documents
-DEFAULT_ASR_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/3b98d906-538b-11ee-9c77-02420a000193/Speech1%201.png.png"
+DEFAULT_ASR_META_IMG = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/1916825c-93fa-11ee-97be-02420a0001c8/Speech.jpg.png"
class AsrPage(BasePage):
title = "Speech Recognition & Translation"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fb7e5f6-88d9-11ee-aa86-02420a000165/Speech.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/5fb7e5f6-88d9-11ee-aa86-02420a000165/Speech.png.png"
workflow = Workflow.ASR
slug_versions = ["asr", "speech"]
diff --git a/recipes/embeddings_page.py b/recipes/embeddings_page.py
index 8c2214a2d..76efe16d9 100644
--- a/recipes/embeddings_page.py
+++ b/recipes/embeddings_page.py
@@ -39,7 +39,7 @@ class EmbeddingModels(models.TextChoices):
class EmbeddingsPage(BasePage):
title = "Embeddings"
- image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
+ explore_image = "https://storage.googleapis.com/dara-c1b52.appspot.com/daras_ai/media/aeb83ee8-889e-11ee-93dc-02420a000143/Youtube%20transcripts%20GPT%20extractions.png.png"
workflow = Workflow.EMBEDDINGS
slug_versions = ["embeddings", "embed", "text-embedings"]
price = 1