From a615112b97f81ce47704701157e9b8bbd4215815 Mon Sep 17 00:00:00 2001 From: steven krawczyk Date: Fri, 4 Aug 2023 12:08:04 -0700 Subject: [PATCH] Add qs parameters to streamlit app --- prompttools/playground/constants.py | 26 +++++++++++++++ prompttools/playground/playground.py | 49 +++++++++++++--------------- 2 files changed, 48 insertions(+), 27 deletions(-) diff --git a/prompttools/playground/constants.py b/prompttools/playground/constants.py index e87ef718..9493e261 100644 --- a/prompttools/playground/constants.py +++ b/prompttools/playground/constants.py @@ -28,3 +28,29 @@ "Google PaLM": GooglePaLMCompletionExperiment, "HuggingFace Hub": HuggingFaceHubExperiment, } + +MODEL_TYPES = ( + "OpenAI Chat", + "OpenAI Completion", + "Anthropic", + "Google PaLM", + "LlamaCpp Chat", + "LlamaCpp Completion", + "HuggingFace Hub", +) + +OPENAI_CHAT_MODELS = ( + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-3.5-turbo-0301", + "gpt-4", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0613", + "gpt-4-0314", + "gpt-4-32k-0314", +) + +OPENAI_COMPLETION_MODELS = ("text-davinci-003", "text-davinci-002", "code-davinci-002") diff --git a/prompttools/playground/playground.py b/prompttools/playground/playground.py index c3d37e7a..2cf58be9 100644 --- a/prompttools/playground/playground.py +++ b/prompttools/playground/playground.py @@ -19,8 +19,12 @@ except Exception: pass +from prompttools.playground.constants import MODEL_TYPES, OPENAI_CHAT_MODELS, OPENAI_COMPLETION_MODELS from prompttools.playground.data_loader import render_prompts, load_data, run_multiple + +params = st.experimental_get_query_params() + st.header("PromptTools Playground") st.write("Give us a \U00002B50 on [GitHub](https://github.com/hegelai/prompttools)") @@ -29,16 +33,7 @@ if mode != "Model Comparison": model_type = st.selectbox( - "Model Type", - ( - "OpenAI Chat", - "OpenAI Completion", - "Anthropic", - "Google PaLM", - "LlamaCpp Chat", - "LlamaCpp Completion", - "HuggingFace Hub", - ), + "Model Type", MODEL_TYPES, index=MODEL_TYPES.index(params["model_type"][0]) if "model_type" in params else 0 ) model, api_key = None, None if model_type in {"LlamaCpp Chat", "LlamaCpp Completion"}: @@ -55,23 +50,16 @@ elif model_type == "OpenAI Chat": model = st.selectbox( "Model", - ( - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo-0301", - "gpt-4", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0613", - "gpt-4-0314", - "gpt-4-32k-0314", - ), + OPENAI_CHAT_MODELS, + index=OPENAI_CHAT_MODELS.index(params["model"][0]) if "model" in params else 0, ) api_key = st.text_input("OpenAI API Key") - elif model_type == "OpenAI g": - model = st.selectbox("Model", ("text-davinci-003", "text-davinci-002", "code-davinci-002")) + elif model_type == "OpenAI Completion": + model = st.selectbox( + "Model", + OPENAI_COMPLETION_MODELS, + index=OPENAI_COMPLETION_MODELS.index(params["model"][0]) if "model" in params else 0, + ) api_key = st.text_input("OpenAI API Key") variable_count = 0 @@ -111,6 +99,7 @@ google_api_key = st.text_input("Google PaLM Key") hf_api_key = st.text_input("HuggingFace Hub Key") + if mode == "Instruction": placeholders = [[st.empty() for _ in range(instruction_count + 1)] for _ in range(prompt_count)] @@ -124,7 +113,7 @@ instructions.append( st.text_area( "System Message" if model_type == "OpenAI Chat" else "Instruction", - value="You are a helpful AI assistant.", + value=params["instruction"][0] if "instruction" in params else "You are a helpful AI assistant.", key=f"col_{j}", ) ) @@ -133,7 +122,13 @@ for i in range(prompt_count): cols = st.columns(instruction_count + 1) with cols[0]: - prompts.append(st.text_area("User Message" if model_type == "OpenAI Chat" else "Prompt", key=f"row_{i}")) + prompts.append( + st.text_area( + "User Message" if model_type == "OpenAI Chat" else "Prompt", + key=f"row_{i}", + value=params["prompt"][0] if "prompt" in params else "", + ) + ) for j in range(1, instruction_count + 1): with cols[j]: placeholders[i][j] = st.empty() # placeholders for the future output