From 51090710798173223380c77cc7e8b5eb82bc142f Mon Sep 17 00:00:00 2001 From: evpearce <30893538+evpearce@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:33:33 +0000 Subject: [PATCH] FS-60: Add promptfoo test config for suggestions prompt (#14) * FS-60: Add promptfoo test config for suggestions prompt * Update backend/src/prompts/README.md Co-authored-by: Charlie Leopard * Move promptfoo to its own folder in /backend. Load OpenAI key from env file. Simplify test structure so less utility .py classes are needed * update readme * rename folder --------- Co-authored-by: Charlie Leopard Co-authored-by: Ivan Mladjenovic (He/Him) --- backend/promptfoo/README.md | 21 +++++++++++++ .../generate_message_suggestions_config.yaml | 31 +++++++++++++++++++ backend/promptfoo/prompt_foo_runner.py | 18 +++++++++++ backend/promptfoo/promptfooconfig.yaml | 4 +++ backend/requirements.txt | 7 +++-- 5 files changed, 78 insertions(+), 3 deletions(-) create mode 100644 backend/promptfoo/README.md create mode 100644 backend/promptfoo/generate_message_suggestions_config.yaml create mode 100644 backend/promptfoo/prompt_foo_runner.py create mode 100644 backend/promptfoo/promptfooconfig.yaml diff --git a/backend/promptfoo/README.md b/backend/promptfoo/README.md new file mode 100644 index 00000000..ca1809ed --- /dev/null +++ b/backend/promptfoo/README.md @@ -0,0 +1,21 @@ +# Promptfoo + +Promptfoo is a CLI and library for evaluating and red-teaming LLM apps. + +See https://www.promptfoo.dev/docs/intro/ + +## Setup + +### Install Promptfoo +Install promptfoo by running `npx install promptfoo` + +### Activate Python venv +Promptfoo must be run in a python virtual environment as python is used to load the jinja prompt templates. +To set up a virtual environment, see [Running Locally](../README.md) + +## Run Promptfoo +Promptfoo configuration (e.g. LLM model) can be set in `promptfooconfig.yaml` + +* Use `promptfoo eval` to run all promptfoo tests. +* Use `promptfoo eval -c generate_message_suggestions_config.yaml` to run a specific test suite. +* Use `promptfoo view` to view the results in browser. diff --git a/backend/promptfoo/generate_message_suggestions_config.yaml b/backend/promptfoo/generate_message_suggestions_config.yaml new file mode 100644 index 00000000..cfb16d52 --- /dev/null +++ b/backend/promptfoo/generate_message_suggestions_config.yaml @@ -0,0 +1,31 @@ +description: "Generate Message Suggestions" + +providers: + - id: openai:chat # openai:chat - defaults to gpt-4o-mini + config: + temperature: 0 + +prompts: file://prompt_foo_runner.py:generate_message_suggestions + +tests: + - description: "test the output has the correct format and content when there is no chat history " + vars: + chatHistory: [] + assert: + - type: javascript + value: JSON.parse(output).suggestions.length === 5 + - type: contains + value: ESG + + - description: "test the output has content containing coca-cola when the chat history contains a previous question about coca-cola" + vars: + chatHistory: + [ + "User: Can you find recent news articles discussing the ESG initiatives of Coca-Cola?", + "System: In 2023, Coca-Cola HBC has strengthened its commitment to Environmental, Social, and Governance (ESG) initiatives by embedding sustainability into its operations. The company aims for a net zero carbon footprint and net positive biodiversity by 2040, and it has been recognized as the world's most sustainable beverage company by the Dow Jones Sustainability Indices for the seventh consecutive year. Key efforts include collaborating with suppliers to improve sustainability practices, reducing carbon emissions, and promoting responsible sourcing. Additionally, Coca-Cola HBC has expanded its sustainability strategy to Egypt, reflecting its global approach to these initiatives.", + ] + assert: + - type: contains + value: Coca-Cola + - type: llm-rubric + value: the suggestions are all related to the topic of sustainability and ESG (Environment, Social, Governance) diff --git a/backend/promptfoo/prompt_foo_runner.py b/backend/promptfoo/prompt_foo_runner.py new file mode 100644 index 00000000..1763c366 --- /dev/null +++ b/backend/promptfoo/prompt_foo_runner.py @@ -0,0 +1,18 @@ +import sys +import os +sys.path.append("../") +from dotenv import load_dotenv, find_dotenv # noqa: E402 +from src.prompts.prompting import PromptEngine # noqa: E402 + +load_dotenv(find_dotenv()) + +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") +engine = PromptEngine() + + +def generate_message_suggestions(context): + chat_history = context["vars"]["chatHistory"] + + system_prompt = engine.load_prompt("generate_message_suggestions", chat_history=chat_history) + + return [{"role": "system", "content": system_prompt}, {"role": "user", "content": "Give me 5 suggestions."}] diff --git a/backend/promptfoo/promptfooconfig.yaml b/backend/promptfoo/promptfooconfig.yaml new file mode 100644 index 00000000..b2dfaee0 --- /dev/null +++ b/backend/promptfoo/promptfooconfig.yaml @@ -0,0 +1,4 @@ +providers: + id: openai:chat # openai:chat - defaults to gpt-4o-mini + config: + temperature: 0 diff --git a/backend/requirements.txt b/backend/requirements.txt index 8bc08017..eafe3ca3 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -5,9 +5,6 @@ pycodestyle==2.11.1 python-dotenv==1.0.1 neo4j==5.18.0 ruff==0.3.5 -pytest==8.1.1 -pytest-mock==3.14.0 -pytest-asyncio==0.23.7 jinja2==3.1.3 websockets==12.0 azure-core==1.30.1 @@ -29,3 +26,7 @@ pypdf==4.3.1 hiredis==3.0.0 redis==5.0.8 +# tests +pytest==8.1.1 +pytest-mock==3.14.0 +pytest-asyncio==0.23.7