From 80e6e57ecec6219305ebe568766cc5083441bc7c Mon Sep 17 00:00:00 2001
From: clr-li <111320104+clr-li@users.noreply.github.com>
Date: Wed, 14 Feb 2024 23:28:07 -0800
Subject: [PATCH] More merges
---
daras_ai_v2/base.py | 35 --------------------------
daras_ai_v2/bot_integration_widgets.py | 3 ---
daras_ai_v2/bots.py | 3 ---
daras_ai_v2/language_model.py | 19 --------------
recipes/CompareText2Img.py | 5 ----
recipes/CompareUpscaler.py | 5 ----
6 files changed, 70 deletions(-)
diff --git a/daras_ai_v2/base.py b/daras_ai_v2/base.py
index 6c0435ac5..d6243aeb7 100644
--- a/daras_ai_v2/base.py
+++ b/daras_ai_v2/base.py
@@ -1351,11 +1351,7 @@ def _render_output_col(self, submitted: bool):
self._render_after_output()
def _render_completed_output(self):
-<<<<<<< HEAD
- run_time = st.session_state.get(StateKeys.run_time, 0)
-=======
pass
->>>>>>> master
def _render_failed_output(self):
err_msg = st.session_state.get(StateKeys.error_msg)
@@ -1518,36 +1514,6 @@ def clear_outputs(self):
st.session_state.pop(field_name, None)
def _render_after_output(self):
-<<<<<<< HEAD
- if "seed" in self.RequestModel.schema_json():
- randomize = st.button(
- ' Regenerate', type="tertiary"
- )
- if randomize:
- st.session_state[StateKeys.pressed_randomize] = True
- st.experimental_rerun()
- caption = ""
- caption += f'\\\nGenerated in {st.session_state.get(StateKeys.run_time, 0):.2f}s'
- if "seed" in self.RequestModel.schema_json():
- seed = st.session_state.get("seed")
- caption += f' with seed {seed} '
- created_at = st.session_state.get(
- StateKeys.created_at, datetime.datetime.today()
- )
- if not isinstance(created_at, datetime.datetime):
- created_at = datetime.datetime.fromisoformat(created_at)
- format_created_at = created_at.strftime("%d %b %Y %-I:%M%p")
- caption += f' at {format_created_at}'
- st.caption(caption, unsafe_allow_html=True)
-
- def render_buttons(self, url: str):
- st.download_button(
- label=' Download',
- url=url,
- type="secondary",
- )
- self._render_report_button()
-=======
self._render_report_button()
if "seed" in self.RequestModel.schema_json():
@@ -1559,7 +1525,6 @@ def render_buttons(self, url: str):
st.experimental_rerun()
render_output_caption()
->>>>>>> master
def state_to_doc(self, state: dict):
ret = {
diff --git a/daras_ai_v2/bot_integration_widgets.py b/daras_ai_v2/bot_integration_widgets.py
index 5dc9dba29..73c6ccad0 100644
--- a/daras_ai_v2/bot_integration_widgets.py
+++ b/daras_ai_v2/bot_integration_widgets.py
@@ -19,12 +19,9 @@ def general_integration_settings(bi: BotIntegration):
st.session_state[f"_bi_user_language_{bi.id}"] = BotIntegration._meta.get_field(
"user_language"
).default
-<<<<<<< HEAD
-=======
st.session_state[f"_bi_streaming_enabled_{bi.id}"] = (
BotIntegration._meta.get_field("streaming_enabled").default
)
->>>>>>> master
st.session_state[f"_bi_show_feedback_buttons_{bi.id}"] = (
BotIntegration._meta.get_field("show_feedback_buttons").default
)
diff --git a/daras_ai_v2/bots.py b/daras_ai_v2/bots.py
index d07436790..30acf07a3 100644
--- a/daras_ai_v2/bots.py
+++ b/daras_ai_v2/bots.py
@@ -462,10 +462,7 @@ def _save_msgs(
if speech_run
else None
),
-<<<<<<< HEAD
-=======
response_time=timezone.now() - received_time,
->>>>>>> master
)
attachments = []
for f_url in (input_images or []) + (input_documents or []):
diff --git a/daras_ai_v2/language_model.py b/daras_ai_v2/language_model.py
index 7ac308d83..fd6142758 100644
--- a/daras_ai_v2/language_model.py
+++ b/daras_ai_v2/language_model.py
@@ -381,23 +381,7 @@ def run_language_model(
if stream:
return _stream_llm_outputs(entries, response_format_type)
else:
-<<<<<<< HEAD
- out_content = [
- # return messages back as either chatml or json messages
- (
- format_chatml_message(entry)
- if is_chatml
- else (entry.get("content") or "").strip()
- )
- for entry in result
- ]
- if tools:
- return out_content, [(entry.get("tool_calls") or []) for entry in result]
- else:
- return out_content
-=======
return _parse_entries(entries, is_chatml, response_format_type, tools)
->>>>>>> master
else:
if tools:
raise ValueError("Only OpenAI chat models support Tools")
@@ -593,10 +577,7 @@ def _run_openai_chat(
if response_format_type
else NOT_GIVEN
),
-<<<<<<< HEAD
-=======
stream=stream,
->>>>>>> master
)
for model_str in model
],
diff --git a/recipes/CompareText2Img.py b/recipes/CompareText2Img.py
index aab8bc966..dc5ea1ae2 100644
--- a/recipes/CompareText2Img.py
+++ b/recipes/CompareText2Img.py
@@ -246,14 +246,9 @@ def _render_outputs(self, state):
for key in selected_models:
output_images: dict = state.get("output_images", {}).get(key, [])
for img in output_images:
-<<<<<<< HEAD
- st.image(img, caption=Text2ImgModels[key].value)
- self.render_buttons(img)
-=======
st.image(
img, caption=Text2ImgModels[key].value, show_download_button=True
)
->>>>>>> master
def preview_description(self, state: dict) -> str:
return "Create multiple AI photos from one prompt using Stable Diffusion (1.5 -> 2.1, Open/Midjourney), DallE, and other models. Find out which AI Image generator works best for your text prompt on comparing OpenAI, Stability.AI etc."
diff --git a/recipes/CompareUpscaler.py b/recipes/CompareUpscaler.py
index ed1df9338..13f62de91 100644
--- a/recipes/CompareUpscaler.py
+++ b/recipes/CompareUpscaler.py
@@ -107,12 +107,7 @@ def _render_outputs(self, state):
img: dict = state.get("output_images", {}).get(key)
if not img:
continue
-<<<<<<< HEAD
- st.image(img, caption=UpscalerModels[key].value)
- self.render_buttons(img)
-=======
st.image(img, caption=UpscalerModels[key].value, show_download_button=True)
->>>>>>> master
def get_raw_price(self, state: dict) -> int:
selected_models = state.get("selected_models", [])