From 2065e14d606db6de3c748db3eba8f05df95a98d6 Mon Sep 17 00:00:00 2001 From: steven krawczyk Date: Mon, 27 Nov 2023 19:23:05 -0800 Subject: [PATCH 01/52] Fix streamlit playground --- prompttools/experiment/experiments/experiment.py | 4 +++- prompttools/playground/data_loader.py | 4 ++-- prompttools/version.py | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/prompttools/experiment/experiments/experiment.py b/prompttools/experiment/experiments/experiment.py index deaf5996..872f12e1 100644 --- a/prompttools/experiment/experiments/experiment.py +++ b/prompttools/experiment/experiments/experiment.py @@ -527,7 +527,7 @@ def to_csv( table = self.get_table(get_all_cols=get_all_cols) table.to_csv(path, **kwargs) - def to_pandas_df(self, get_all_cols: bool = True): + def to_pandas_df(self, get_all_cols: bool = True, from_streamlit: bool = False): r""" Return the results as a ``pandas.DataFrame``. If the experiment has not been executed, it will run. @@ -535,6 +535,8 @@ def to_pandas_df(self, get_all_cols: bool = True): get_all_cols (bool): defaults to ``False``. If ``True``, it will return the full data with all input arguments (including frozen ones), full model response (not just the text response), and scores. """ + if from_streamlit: + self.run() return self.get_table(get_all_cols=get_all_cols) def to_json( diff --git a/prompttools/playground/data_loader.py b/prompttools/playground/data_loader.py index d5aee79e..22ad7c9a 100644 --- a/prompttools/playground/data_loader.py +++ b/prompttools/playground/data_loader.py @@ -66,7 +66,7 @@ def load_data( model_specific_kwargs = {model: {}} experiment = EXPERIMENTS[model_type]([model], input_kwargs, model_specific_kwargs) - return experiment.to_pandas_df() + return experiment.to_pandas_df(True, True) @st.cache_data @@ -112,5 +112,5 @@ def run_multiple( experiment = EXPERIMENTS[model_types[i]]([models[i]], input_kwargs, model_specific_kwargs) else: experiment = EXPERIMENTS[model_types[i]]([models[i]], prompts) - dfs.append(experiment.to_pandas_df()) + dfs.append(experiment.to_pandas_df(True, True)) return dfs diff --git a/prompttools/version.py b/prompttools/version.py index ca2ae2b6..7ca825d1 100644 --- a/prompttools/version.py +++ b/prompttools/version.py @@ -1,2 +1,2 @@ -__version__ = '0.0.43a0+5234a22' -git_version = '5234a228395a7f0c61b7d732927f51b4857a6846' +__version__ = '0.0.43a0+ead22e8' +git_version = 'ead22e88a83ddcda4c4aba62e1219473d7d1a41c' From 0ee330eb6da012d100fa340126df04c5dc0a39a0 Mon Sep 17 00:00:00 2001 From: "Steven Krawczyk (Hegel AI)" Date: Mon, 27 Nov 2023 19:35:23 -0800 Subject: [PATCH 02/52] Update README.md --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index 8bb0dda8..c0666d06 100644 --- a/README.md +++ b/README.md @@ -14,9 +14,7 @@ Total Downloads - - - +

From 60b68ce3a06646bdc2a0ba4b55f2a1d31efa1a05 Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 1 Dec 2023 13:06:40 -0500 Subject: [PATCH 03/52] Update traces sample rate --- prompttools/sentry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prompttools/sentry.py b/prompttools/sentry.py index f3dff61c..3be08d38 100644 --- a/prompttools/sentry.py +++ b/prompttools/sentry.py @@ -57,7 +57,7 @@ def init_sentry(): sentry_sdk.init( dsn=SENTRY_DSN, release=__version__, - traces_sample_rate=1.0, + traces_sample_rate=0.01, include_local_variables=False, send_default_pii=False, attach_stacktrace=False, From 1e62528415a48c68ef8a1ef5ca1c1113e85be94a Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 7 Dec 2023 14:35:30 -0500 Subject: [PATCH 04/52] Adding daemon-like shutdown behavior with cleanup --- test/sender.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 test/sender.py diff --git a/test/sender.py b/test/sender.py new file mode 100644 index 00000000..f045adf1 --- /dev/null +++ b/test/sender.py @@ -0,0 +1,52 @@ +import requests +import threading +import queue +from functools import partial + + +class Sender: + def __init__(self): + self.flask_api_url = "http://localhost:5000/" + self.data_queue = queue.Queue() + self.worker_thread = threading.Thread(target=self.worker) + + # When the main thread is joining, put `None` into queue to signal worker thread to end + threading.Thread(target=lambda: threading.main_thread().join() or self.data_queue.put(None)).start() + + self.worker_thread.start() + + def execute_and_add(self, callable_func): + result = callable_func() + self.data_queue.put(result) + + def worker(self): + while True: + if not self.data_queue.empty(): + result = self.data_queue.get() + if result is None: + return + self.send_data_to_flask(result) + self.data_queue.task_done() + + def send_data_to_flask(self, data): + try: + response = requests.post(self.flask_api_url, json=data) + if response.status_code == 200: + print(f"Data sent to Flask API: {data}") + else: + print(f"Failed to send data to Flask API. Status code: {response.status_code}") + except requests.exceptions.RequestException as e: + print(f"Error sending data to Flask API: {e}") + + +if __name__ == "__main__": + sender = Sender() + + # Example usage: + def example_callable(i: int): + print(f"Executing callable {i}") + return {"message": f"Hello, {i}!"} + + for i in range(3): + sender.execute_and_add(partial(example_callable, i)) + print("end") From 150c5b324382c8684454aa4a5fe49ca13e3a807f Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 8 Dec 2023 17:16:42 -0500 Subject: [PATCH 05/52] Monkey-patch OpenAI chat --- prompttools/scribe/__init__.py | 13 +++++++ prompttools/scribe/scribe.py | 65 ++++++++++++++++++++++++++++++++++ test/app.py | 27 ++++++++++++++ test/sender.py | 52 --------------------------- test/test_scribe.py | 24 +++++++++++++ 5 files changed, 129 insertions(+), 52 deletions(-) create mode 100644 prompttools/scribe/__init__.py create mode 100644 prompttools/scribe/scribe.py create mode 100644 test/app.py delete mode 100644 test/sender.py create mode 100644 test/test_scribe.py diff --git a/prompttools/scribe/__init__.py b/prompttools/scribe/__init__.py new file mode 100644 index 00000000..1d86689c --- /dev/null +++ b/prompttools/scribe/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Hegel AI, Inc. +# All rights reserved. +# +# This source code's license can be found in the +# LICENSE file in the root directory of this source tree. + + +from .scribe import Scribe + + +__all__ = [ + "Scribe", +] diff --git a/prompttools/scribe/scribe.py b/prompttools/scribe/scribe.py new file mode 100644 index 00000000..975486f3 --- /dev/null +++ b/prompttools/scribe/scribe.py @@ -0,0 +1,65 @@ +# Copyright (c) Hegel AI, Inc. +# All rights reserved. +# +# This source code's license can be found in the +# LICENSE file in the root directory of this source tree. + + +import requests +import threading +import queue +from functools import partial +import openai +from dotenv import load_dotenv +from os.path import join, dirname + + +# Load "OPENAI_API_KEY" into `os.environ["OPENAI_API_KEY"]` +dotenv_path = join(dirname(__file__), ".env") +load_dotenv(dotenv_path) + + +class Scribe: + def __init__(self): + self.flask_api_url = "http://localhost:5000/" + self.data_queue = queue.Queue() + self.worker_thread = threading.Thread(target=self.worker) + + # When the main thread is joining, put `None` into queue to signal worker thread to end + threading.Thread(target=lambda: threading.main_thread().join() or self.data_queue.put(None)).start() + + self.worker_thread.start() + + def execute_and_add_to_queue(self, callable_func, **kwargs): + result = callable_func(**kwargs) + self.data_queue.put(result.model_dump_json()) + return result + + def wrap(self, callable_func): + return partial(self.execute_and_add_to_queue, callable_func) + + def worker(self): + while True: + if not self.data_queue.empty(): + result = self.data_queue.get() + if result is None: + return + self.log_data_to_remote(result) + self.data_queue.task_done() + + def log_data_to_remote(self, data): + try: + response = requests.post(self.flask_api_url, json=data) + if response.status_code != 200: + print(f"Failed to send data to Flask API. Status code: {response.status_code} for {data}.") + except requests.exceptions.RequestException as e: + print(f"Error sending data to Flask API: {e}") + + +sender = Scribe() +# Monkey-patching +try: + openai.chat.completions.create = sender.wrap(openai.chat.completions.create) +except Exception: + print("You may need to add `OPENAI_API_KEY=''` to your `.env` file.") + raise diff --git a/test/app.py b/test/app.py new file mode 100644 index 00000000..4dd025a3 --- /dev/null +++ b/test/app.py @@ -0,0 +1,27 @@ +# Copyright (c) Hegel AI, Inc. +# All rights reserved. +# +# This source code's license can be found in the +# LICENSE file in the root directory of this source tree. + + +r""" +App for local testing +""" + +from flask import Flask, request +import time + +app = Flask(__name__) + + +@app.route("/", methods=["POST"]) +def process_request(): + time.sleep(0.1) + data = request.json + print(f"Request received and processed {data}.") + return "", 200 + + +if __name__ == "__main__": + app.run(debug=True) diff --git a/test/sender.py b/test/sender.py deleted file mode 100644 index f045adf1..00000000 --- a/test/sender.py +++ /dev/null @@ -1,52 +0,0 @@ -import requests -import threading -import queue -from functools import partial - - -class Sender: - def __init__(self): - self.flask_api_url = "http://localhost:5000/" - self.data_queue = queue.Queue() - self.worker_thread = threading.Thread(target=self.worker) - - # When the main thread is joining, put `None` into queue to signal worker thread to end - threading.Thread(target=lambda: threading.main_thread().join() or self.data_queue.put(None)).start() - - self.worker_thread.start() - - def execute_and_add(self, callable_func): - result = callable_func() - self.data_queue.put(result) - - def worker(self): - while True: - if not self.data_queue.empty(): - result = self.data_queue.get() - if result is None: - return - self.send_data_to_flask(result) - self.data_queue.task_done() - - def send_data_to_flask(self, data): - try: - response = requests.post(self.flask_api_url, json=data) - if response.status_code == 200: - print(f"Data sent to Flask API: {data}") - else: - print(f"Failed to send data to Flask API. Status code: {response.status_code}") - except requests.exceptions.RequestException as e: - print(f"Error sending data to Flask API: {e}") - - -if __name__ == "__main__": - sender = Sender() - - # Example usage: - def example_callable(i: int): - print(f"Executing callable {i}") - return {"message": f"Hello, {i}!"} - - for i in range(3): - sender.execute_and_add(partial(example_callable, i)) - print("end") diff --git a/test/test_scribe.py b/test/test_scribe.py new file mode 100644 index 00000000..5fdbc54a --- /dev/null +++ b/test/test_scribe.py @@ -0,0 +1,24 @@ +# Copyright (c) Hegel AI, Inc. +# All rights reserved. +# +# This source code's license can be found in the +# LICENSE file in the root directory of this source tree. + + +import openai +import os +import prompttools.scribe # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` + +if __name__ == "__main__": + + os.environ["OPENAI_API_KEY"] = "" + + # Example usage: + for i in range(3): + messages = [ + {"role": "user", "content": f"What is 1 + {i}?"}, + ] + result = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) + print(f"{i} {result = }") + + print("End") From 776267ab0eb630928d33864a96c22c680cf0ad73 Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 12 Dec 2023 14:03:25 -0500 Subject: [PATCH 06/52] Update backend URL --- prompttools/scribe/scribe.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/prompttools/scribe/scribe.py b/prompttools/scribe/scribe.py index 975486f3..b5e7cfb6 100644 --- a/prompttools/scribe/scribe.py +++ b/prompttools/scribe/scribe.py @@ -12,16 +12,18 @@ import openai from dotenv import load_dotenv from os.path import join, dirname +from prompttools.common import HEGEL_BACKEND_URL # Load "OPENAI_API_KEY" into `os.environ["OPENAI_API_KEY"]` +# See `.env.example` dotenv_path = join(dirname(__file__), ".env") load_dotenv(dotenv_path) class Scribe: def __init__(self): - self.flask_api_url = "http://localhost:5000/" + self.backend_url = f"{HEGEL_BACKEND_URL}/sdk/log" self.data_queue = queue.Queue() self.worker_thread = threading.Thread(target=self.worker) @@ -49,7 +51,7 @@ def worker(self): def log_data_to_remote(self, data): try: - response = requests.post(self.flask_api_url, json=data) + response = requests.post(self.backend_url, json=data) if response.status_code != 200: print(f"Failed to send data to Flask API. Status code: {response.status_code} for {data}.") except requests.exceptions.RequestException as e: From 222b80e2214f304fbd2377b44f060c652908f13c Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 12 Dec 2023 14:39:25 -0500 Subject: [PATCH 07/52] Logging optional model name --- prompttools/scribe/scribe.py | 7 ++++++- test/test_scribe.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/prompttools/scribe/scribe.py b/prompttools/scribe/scribe.py index b5e7cfb6..d1ce38d6 100644 --- a/prompttools/scribe/scribe.py +++ b/prompttools/scribe/scribe.py @@ -33,8 +33,13 @@ def __init__(self): self.worker_thread.start() def execute_and_add_to_queue(self, callable_func, **kwargs): + if "hegel_model" in kwargs: + hegel_model = kwargs["hegel_model"] + del kwargs["hegel_model"] + else: + hegel_model = None result = callable_func(**kwargs) - self.data_queue.put(result.model_dump_json()) + self.data_queue.put({"hegel_model": hegel_model, "data": result.model_dump_json()}) return result def wrap(self, callable_func): diff --git a/test/test_scribe.py b/test/test_scribe.py index 5fdbc54a..7cd85131 100644 --- a/test/test_scribe.py +++ b/test/test_scribe.py @@ -18,7 +18,7 @@ messages = [ {"role": "user", "content": f"What is 1 + {i}?"}, ] - result = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) + result = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages, hegel_model="TEST_MODEL") print(f"{i} {result = }") print("End") From e84a71e8e739638f9f8bdc6d715755f6d10eda1b Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 12 Dec 2023 14:40:59 -0500 Subject: [PATCH 08/52] Logging optional model name --- test/app.py | 2 +- test/test_scribe.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/app.py b/test/app.py index 4dd025a3..385f7d23 100644 --- a/test/app.py +++ b/test/app.py @@ -6,7 +6,7 @@ r""" -App for local testing +App for local testing of scribe """ from flask import Flask, request diff --git a/test/test_scribe.py b/test/test_scribe.py index 7cd85131..0f3fc792 100644 --- a/test/test_scribe.py +++ b/test/test_scribe.py @@ -13,6 +13,7 @@ os.environ["OPENAI_API_KEY"] = "" + # Launch server from `app.py` first # Example usage: for i in range(3): messages = [ From f6a50e89056085c3a142aca62aa35338304c57aa Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 12 Dec 2023 14:46:56 -0500 Subject: [PATCH 09/52] Disable test_scribe as it is not meant for CI testing for now --- test/test_scribe.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/test/test_scribe.py b/test/test_scribe.py index 0f3fc792..24b40e40 100644 --- a/test/test_scribe.py +++ b/test/test_scribe.py @@ -5,21 +5,22 @@ # LICENSE file in the root directory of this source tree. -import openai -import os -import prompttools.scribe # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` +# import openai +# import os -if __name__ == "__main__": +# import prompttools.scribe # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` - os.environ["OPENAI_API_KEY"] = "" - - # Launch server from `app.py` first - # Example usage: - for i in range(3): - messages = [ - {"role": "user", "content": f"What is 1 + {i}?"}, - ] - result = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages, hegel_model="TEST_MODEL") - print(f"{i} {result = }") - - print("End") +# if __name__ == "__main__": +# +# os.environ["OPENAI_API_KEY"] = "" +# +# # Launch server from `app.py` first +# # Example usage: +# for i in range(3): +# messages = [ +# {"role": "user", "content": f"What is 1 + {i}?"}, +# ] +# result = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages, hegel_model="TEST_MODEL") +# print(f"{i} {result = }") +# +# print("End") From 216f5ff128fc95e9c8574bb551be94cdcb519561 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 14 Dec 2023 14:48:31 -0500 Subject: [PATCH 10/52] Update Scribe API with key usage --- prompttools/scribe/scribe.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/prompttools/scribe/scribe.py b/prompttools/scribe/scribe.py index d1ce38d6..51b9745d 100644 --- a/prompttools/scribe/scribe.py +++ b/prompttools/scribe/scribe.py @@ -10,6 +10,7 @@ import queue from functools import partial import openai +import os from dotenv import load_dotenv from os.path import join, dirname from prompttools.common import HEGEL_BACKEND_URL @@ -23,7 +24,7 @@ class Scribe: def __init__(self): - self.backend_url = f"{HEGEL_BACKEND_URL}/sdk/log" + self.backend_url = f"{HEGEL_BACKEND_URL}/sdk/scribe" self.data_queue = queue.Queue() self.worker_thread = threading.Thread(target=self.worker) @@ -56,7 +57,12 @@ def worker(self): def log_data_to_remote(self, data): try: - response = requests.post(self.backend_url, json=data) + headers = { + "Content-Type": "application/json", + "Authorization": os.environ["HEGELAI_API_KEY"], + } + + response = requests.post(self.backend_url, json=data, headers=headers) if response.status_code != 200: print(f"Failed to send data to Flask API. Status code: {response.status_code} for {data}.") except requests.exceptions.RequestException as e: From 34f1c2aaafb992aca99680c465ebe649cccfaf70 Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 15 Dec 2023 23:51:30 -0500 Subject: [PATCH 11/52] Enhance scribe logging --- prompttools/scribe/scribe.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/prompttools/scribe/scribe.py b/prompttools/scribe/scribe.py index 51b9745d..a27f1696 100644 --- a/prompttools/scribe/scribe.py +++ b/prompttools/scribe/scribe.py @@ -3,7 +3,7 @@ # # This source code's license can be found in the # LICENSE file in the root directory of this source tree. - +import json import requests import threading @@ -40,7 +40,9 @@ def execute_and_add_to_queue(self, callable_func, **kwargs): else: hegel_model = None result = callable_func(**kwargs) - self.data_queue.put({"hegel_model": hegel_model, "data": result.model_dump_json()}) + self.data_queue.put( + {"hegel_model": hegel_model, "result": result.model_dump_json(), "input_parameters": json.dumps(kwargs)} + ) return result def wrap(self, callable_func): From 00d308bc5aca1251e40f0e72acd84abb101af0da Mon Sep 17 00:00:00 2001 From: Kevin Date: Mon, 18 Dec 2023 15:35:27 -0500 Subject: [PATCH 12/52] Rename scribe to logger --- prompttools/{scribe => logger}/__init__.py | 4 ++-- prompttools/{scribe/scribe.py => logger/logger.py} | 6 +++--- test/app.py | 2 +- test/{test_scribe.py => test_logger.py} | 11 +++++------ 4 files changed, 11 insertions(+), 12 deletions(-) rename prompttools/{scribe => logger}/__init__.py (81%) rename prompttools/{scribe/scribe.py => logger/logger.py} (96%) rename test/{test_scribe.py => test_logger.py} (82%) diff --git a/prompttools/scribe/__init__.py b/prompttools/logger/__init__.py similarity index 81% rename from prompttools/scribe/__init__.py rename to prompttools/logger/__init__.py index 1d86689c..47bc34ee 100644 --- a/prompttools/scribe/__init__.py +++ b/prompttools/logger/__init__.py @@ -5,9 +5,9 @@ # LICENSE file in the root directory of this source tree. -from .scribe import Scribe +from .logger import Logger __all__ = [ - "Scribe", + "Logger", ] diff --git a/prompttools/scribe/scribe.py b/prompttools/logger/logger.py similarity index 96% rename from prompttools/scribe/scribe.py rename to prompttools/logger/logger.py index a27f1696..6366e1f5 100644 --- a/prompttools/scribe/scribe.py +++ b/prompttools/logger/logger.py @@ -22,9 +22,9 @@ load_dotenv(dotenv_path) -class Scribe: +class Logger: def __init__(self): - self.backend_url = f"{HEGEL_BACKEND_URL}/sdk/scribe" + self.backend_url = f"{HEGEL_BACKEND_URL}/sdk/logger" self.data_queue = queue.Queue() self.worker_thread = threading.Thread(target=self.worker) @@ -71,7 +71,7 @@ def log_data_to_remote(self, data): print(f"Error sending data to Flask API: {e}") -sender = Scribe() +sender = Logger() # Monkey-patching try: openai.chat.completions.create = sender.wrap(openai.chat.completions.create) diff --git a/test/app.py b/test/app.py index 385f7d23..dd2a9934 100644 --- a/test/app.py +++ b/test/app.py @@ -6,7 +6,7 @@ r""" -App for local testing of scribe +App for local testing of logger """ from flask import Flask, request diff --git a/test/test_scribe.py b/test/test_logger.py similarity index 82% rename from test/test_scribe.py rename to test/test_logger.py index 24b40e40..06af8379 100644 --- a/test/test_scribe.py +++ b/test/test_logger.py @@ -7,16 +7,15 @@ # import openai # import os - -# import prompttools.scribe # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` - -# if __name__ == "__main__": # -# os.environ["OPENAI_API_KEY"] = "" +# import prompttools.logger # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` +# +# +# if __name__ == "__main__": # # # Launch server from `app.py` first # # Example usage: -# for i in range(3): +# for i in range(1): # messages = [ # {"role": "user", "content": f"What is 1 + {i}?"}, # ] From 148faf9a54e1f3e605ac645cccc0df02023d6cc0 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 21 Dec 2023 12:29:22 -0800 Subject: [PATCH 13/52] Add latency data to logger --- prompttools/logger/logger.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/prompttools/logger/logger.py b/prompttools/logger/logger.py index 6366e1f5..361a5709 100644 --- a/prompttools/logger/logger.py +++ b/prompttools/logger/logger.py @@ -13,6 +13,7 @@ import os from dotenv import load_dotenv from os.path import join, dirname +from time import perf_counter from prompttools.common import HEGEL_BACKEND_URL @@ -39,9 +40,16 @@ def execute_and_add_to_queue(self, callable_func, **kwargs): del kwargs["hegel_model"] else: hegel_model = None + start = perf_counter() result = callable_func(**kwargs) + latency = perf_counter() - start self.data_queue.put( - {"hegel_model": hegel_model, "result": result.model_dump_json(), "input_parameters": json.dumps(kwargs)} + { + "hegel_model": hegel_model, + "result": result.model_dump_json(), + "input_parameters": json.dumps(kwargs), + "latency": latency, + } ) return result From fb67547227fac856495ca49f5c2827c52a03ede5 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 21 Dec 2023 13:08:16 -0800 Subject: [PATCH 14/52] Update OpenAI Azure API --- .../AzureOpenAIServiceExperiment.ipynb | 100 +----------------- .../experiments/openai_chat_experiment.py | 23 ++-- 2 files changed, 15 insertions(+), 108 deletions(-) diff --git a/examples/notebooks/AzureOpenAIServiceExperiment.ipynb b/examples/notebooks/AzureOpenAIServiceExperiment.ipynb index 8bd47786..eafea469 100644 --- a/examples/notebooks/AzureOpenAIServiceExperiment.ipynb +++ b/examples/notebooks/AzureOpenAIServiceExperiment.ipynb @@ -130,7 +130,6 @@ "]\n", "\n", "azure_openai_service_configs = {\"AZURE_OPENAI_ENDPOINT\": \"https://YOURENDPOINTNAME.openai.azure.com/\",\n", - " \"API_TYPE\": \"azure\",\n", " \"API_VERSION\": \"2023-05-15\"} # Specify which API version to use\n", "temperatures = [0.0, 1.0]\n", "# You can add more parameters that you'd like to test here.\n", @@ -252,7 +251,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "id": "4cf5897b", "metadata": {}, "outputs": [], @@ -271,7 +270,6 @@ "]\n", "\n", "azure_openai_service_configs = {\"AZURE_OPENAI_ENDPOINT\": \"https://YOURENDPOINTNAME.openai.azure.com/\",\n", - " \"API_TYPE\": \"azure\",\n", " \"API_VERSION\": \"2023-05-15\"} # Specify which API version to use\n", "temperatures = [0.0, 1.0]\n", "# You can add more parameters that you'd like to test here.\n", @@ -282,100 +280,10 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "6eab3877", + "execution_count": null, + "id": "2d261524", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
temperaturemessagesresponselatency
00.0[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]The first president of the United States was George Washington.0.903520
11.0[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]The first president of the United States was George Washington. He served as president from 1789 to 1797.0.815370
20.0[{'role': 'system', 'content': 'You are a creative copywriter.'}, {'role': 'user', 'content': 'Write a tagline for an ice cream shop.'}]\"Scoops of happiness in every cone!\"0.517402
31.0[{'role': 'system', 'content': 'You are a creative copywriter.'}, {'role': 'user', 'content': 'Write a tagline for an ice cream shop.'}]\"Scoops of happiness in every cone.\"0.508131
\n", - "
" - ], - "text/plain": [ - " temperature \\\n", - "0 0.0 \n", - "1 1.0 \n", - "2 0.0 \n", - "3 1.0 \n", - "\n", - " messages \\\n", - "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", - "1 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", - "2 [{'role': 'system', 'content': 'You are a creative copywriter.'}, {'role': 'user', 'content': 'Write a tagline for an ice cream shop.'}] \n", - "3 [{'role': 'system', 'content': 'You are a creative copywriter.'}, {'role': 'user', 'content': 'Write a tagline for an ice cream shop.'}] \n", - "\n", - " response \\\n", - "0 The first president of the United States was George Washington. \n", - "1 The first president of the United States was George Washington. He served as president from 1789 to 1797. \n", - "2 \"Scoops of happiness in every cone!\" \n", - "3 \"Scoops of happiness in every cone.\" \n", - "\n", - " latency \n", - "0 0.903520 \n", - "1 0.815370 \n", - "2 0.517402 \n", - "3 0.508131 " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "experiment.run()\n", "experiment.visualize()" diff --git a/prompttools/experiment/experiments/openai_chat_experiment.py b/prompttools/experiment/experiments/openai_chat_experiment.py index 90fb8498..77f27a24 100644 --- a/prompttools/experiment/experiments/openai_chat_experiment.py +++ b/prompttools/experiment/experiments/openai_chat_experiment.py @@ -97,9 +97,8 @@ class OpenAIChatExperiment(Experiment): azure_openai_service_configs (Optional[dict]): Defaults to ``None``. If it is set, the experiment will use Azure OpenAI Service. The input dict should - contain these 3 keys (but with values based on your use case and configuration): - ``{"AZURE_OPENAI_ENDPOINT": "https://YOUR_RESOURCE_NAME.openai.azure.com/", - "API_TYPE": "azure", "API_VERSION": "2023-05-15"`` + contain these 2 keys (but with values based on your use case and configuration): + ``{"AZURE_OPENAI_ENDPOINT": "https://YOUR_RESOURCE_NAME.openai.azure.com/", "API_VERSION": "2023-05-15"}`` """ _experiment_type = "RawExperiment" @@ -123,7 +122,15 @@ def __init__( function_call: Optional[List[Dict[str, str]]] = [None], azure_openai_service_configs: Optional[dict] = None, ): - self.completion_fn = openai.chat.completions.create + if azure_openai_service_configs is None: + self.completion_fn = openai.chat.completions.create + else: + client = openai.AzureOpenAI( + api_key=os.environ["AZURE_OPENAI_KEY"], + api_version=azure_openai_service_configs["API_VERSION"], + azure_endpoint=azure_openai_service_configs["AZURE_OPENAI_ENDPOINT"], + ) + self.completion_fn = client.chat.completions.create if os.getenv("DEBUG", default=False): if functions[0] is not None: self.completion_fn = mock_openai_chat_function_completion_fn @@ -164,14 +171,6 @@ def __init__( if self.all_args["logit_bias"] == [None]: del self.all_args["logit_bias"] - if azure_openai_service_configs: - openai.api_key = os.environ["AZURE_OPENAI_KEY"] - openai.api_base = azure_openai_service_configs["AZURE_OPENAI_ENDPOINT"] - openai.api_type = azure_openai_service_configs["API_TYPE"] - openai.api_version = azure_openai_service_configs["API_VERSION"] - del self.all_args["model"] - self.all_args["engine"] = model - super().__init__() @staticmethod From 3f12441a1ecd45e3d6579f3058301a7a56ed2835 Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 22 Dec 2023 15:30:19 -0800 Subject: [PATCH 15/52] Update logger example --- test/test_logger.py | 50 +++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/test/test_logger.py b/test/test_logger.py index 06af8379..3d3286b1 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -5,21 +5,35 @@ # LICENSE file in the root directory of this source tree. -# import openai -# import os -# -# import prompttools.logger # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` -# -# -# if __name__ == "__main__": -# -# # Launch server from `app.py` first -# # Example usage: -# for i in range(1): -# messages = [ -# {"role": "user", "content": f"What is 1 + {i}?"}, -# ] -# result = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages, hegel_model="TEST_MODEL") -# print(f"{i} {result = }") -# -# print("End") +import openai +import prompttools.logger # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` + + +r""" +Example of using `prompttools.logger`. + +All you need to do is call `import prompttools.logger` to start logging. +You can optionally add `hegel_model` to your call (as seen below). This will associate +this call with a specific name in the logs. + +The OpenAI call is unchanged, it executes normally between your machine and OpenAI's server. + +Note: +You should have "HEGELAI_API_KEY" and "OPENAI_API_KEY" loaded into `os.environ`. +""" + +if __name__ == "__main__": + for i in range(1): + messages = [ + {"role": "user", "content": f"What is 1 + {i}?"}, + ] + + # `hegel_model` is an optional argument that allows you to tag your call with a specific name + # Logging still works without this argument + # The rest of the OpenAI call happens as normal between your machine and OpenAI's server + openai_response = openai.chat.completions.create( + model="gpt-3.5-turbo", messages=messages, hegel_model="Math Model" + ) + print(f"{openai_response = }") + + print("End") From 7d36bb0922948f299ace033d0de58590ae4254f3 Mon Sep 17 00:00:00 2001 From: steven krawczyk Date: Fri, 22 Dec 2023 15:51:01 -0800 Subject: [PATCH 16/52] Version bump --- docs/source/conf.py | 2 +- pyproject.toml | 2 +- version.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index e8814889..fa00b826 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -11,7 +11,7 @@ project = "prompttools" copyright = "2023, Hegel AI" author = "Hegel AI" -release = "0.0.43" +release = "0.0.44" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/pyproject.toml b/pyproject.toml index e93bde0c..2d4ce9aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "prompttools" -version = "0.0.43" +version = "0.0.44" authors = [ { name="Hegel AI", email="team@hegel-ai.com" }, ] diff --git a/version.txt b/version.txt index 85a9ee5c..2fcff7eb 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.0.43a0 +0.0.44a0 From c8720cde4fa9d21c2b7226d3c09fd4154b92cc9d Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 22 Dec 2023 15:52:06 -0800 Subject: [PATCH 17/52] Skipping test_logger in CI --- test/test_logger.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/test/test_logger.py b/test/test_logger.py index 3d3286b1..8e6b4793 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -5,8 +5,10 @@ # LICENSE file in the root directory of this source tree. -import openai -import prompttools.logger # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` +if False: # Skipping this in CI + + import openai + import prompttools.logger # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` r""" @@ -23,17 +25,18 @@ """ if __name__ == "__main__": - for i in range(1): - messages = [ - {"role": "user", "content": f"What is 1 + {i}?"}, - ] - - # `hegel_model` is an optional argument that allows you to tag your call with a specific name - # Logging still works without this argument - # The rest of the OpenAI call happens as normal between your machine and OpenAI's server - openai_response = openai.chat.completions.create( - model="gpt-3.5-turbo", messages=messages, hegel_model="Math Model" - ) - print(f"{openai_response = }") - - print("End") + if False: # Skipping this in CI + for i in range(1): + messages = [ + {"role": "user", "content": f"What is 1 + {i}?"}, + ] + + # `hegel_model` is an optional argument that allows you to tag your call with a specific name + # Logging still works without this argument + # The rest of the OpenAI call happens as normal between your machine and OpenAI's server + openai_response = openai.chat.completions.create( + model="gpt-3.5-turbo", messages=messages, hegel_model="Math Model" + ) + print(f"{openai_response = }") + + print("End") From d0e982e5c55464b7a13e0cdc7c5984e200288c2d Mon Sep 17 00:00:00 2001 From: steven krawczyk Date: Fri, 22 Dec 2023 16:19:05 -0800 Subject: [PATCH 18/52] Version bump --- prompttools/version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/prompttools/version.py b/prompttools/version.py index 7ca825d1..23fa2526 100644 --- a/prompttools/version.py +++ b/prompttools/version.py @@ -1,2 +1,2 @@ -__version__ = '0.0.43a0+ead22e8' -git_version = 'ead22e88a83ddcda4c4aba62e1219473d7d1a41c' +__version__ = '0.0.44a0+7d36bb0' +git_version = '7d36bb0922948f299ace033d0de58590ae4254f3' From 5be8a21340c2cd0044ceaa2130ee335df64e640b Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 26 Dec 2023 17:15:38 -0800 Subject: [PATCH 19/52] Adding moderation evaluation function --- prompttools/utils/__init__.py | 8 +++-- prompttools/utils/moderation.py | 53 +++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 prompttools/utils/moderation.py diff --git a/prompttools/utils/__init__.py b/prompttools/utils/__init__.py index 543c2b2b..fdbd32cc 100644 --- a/prompttools/utils/__init__.py +++ b/prompttools/utils/__init__.py @@ -12,6 +12,7 @@ from .autoeval_with_docs import autoeval_with_documents from .chunk_text import chunk_text from .expected import compute_similarity_against_model +from .moderation import apply_moderation from .ranking_correlation import ranking_correlation from .similarity import semantic_similarity from .validate_json import validate_json_response @@ -26,11 +27,12 @@ "chunk_text", "compute_similarity_against_model", "expected", + "apply_moderation", + "ranking_correlation", + "semantic_similarity", + "similarity", "validate_json", "validate_json_response", "validate_python", "validate_python_response", - "ranking_correlation", - "semantic_similarity", - "similarity", ] diff --git a/prompttools/utils/moderation.py b/prompttools/utils/moderation.py new file mode 100644 index 00000000..1bb96fc5 --- /dev/null +++ b/prompttools/utils/moderation.py @@ -0,0 +1,53 @@ +# Copyright (c) Hegel AI, Inc. +# All rights reserved. +# +# This source code's license can be found in the +# LICENSE file in the root directory of this source tree. + + +import openai +import pandas +from typing import Optional, Union + + +def apply_moderation( + row: pandas.core.series.Series, + text_col_name: str = "response", + moderation_model: str = "text-moderation-latest", + category_names: Optional[list[str]] = None, + category_score_names: Optional[list[str]] = None, +) -> Union[bool, dict]: + r""" + Uses OpenAI's moderation API to determine whether the text complies with OpenAI's usage policies. + + Args: + row (pandas.core.series.Series): A row of data from the full DataFrame (including input, model response, other + metrics, etc). + text_col_name (str): column name of text to be moderated + moderation_model (str): name of the OpenAI moderation model, defaults to ``"text-moderation-latest"`` + category_names (Optional[list[str]]): specify the names of category flags to extract from the response and + be added as column(s) in the row, optional. (e.g. ``["harassment", "violence"]``) + category_score_names (Optional[list[str]]): specify the names of category scores to extract from the response + and be added as column(s) in the row, optional. (e.g. ``["harassment", "violence"]``) + + Returns: + A boolean flag (of whether the input violates policies), or a dict with various topic specific flags/scores. + """ + text = row[text_col_name] + + moderation_response = openai.moderations.create(model=moderation_model, input=text) + flagged = moderation_response.results[0].flagged + res = {} + if category_names: + category_flags = moderation_response.results[0].categories.model_dump() + for c in category_names: + res[c] = category_flags[c] + if category_score_names: + category_scores = moderation_response.results[0].category_scores.model_dump() + for c in category_score_names: + res[f"{c}_score"] = category_scores[c] + if category_names or category_score_names: + res["moderation_flag"] = flagged + return res + else: + return flagged From 1c9bc7555574f28dd0338389acd0d1923d50ad92 Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 26 Dec 2023 17:15:48 -0800 Subject: [PATCH 20/52] Adding moderation example --- .../notebooks/Moderation Evaluation.ipynb | 574 ++++++++++++++++++ 1 file changed, 574 insertions(+) create mode 100644 examples/notebooks/Moderation Evaluation.ipynb diff --git a/examples/notebooks/Moderation Evaluation.ipynb b/examples/notebooks/Moderation Evaluation.ipynb new file mode 100644 index 00000000..ca0b6a67 --- /dev/null +++ b/examples/notebooks/Moderation Evaluation.ipynb @@ -0,0 +1,574 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "3c76e0a5", + "metadata": {}, + "source": [ + "# Moderation Evaluation Example" + ] + }, + { + "cell_type": "markdown", + "id": "befa58ff", + "metadata": {}, + "source": [ + "## Installations" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "bf6fbb68", + "metadata": {}, + "outputs": [], + "source": [ + "### If necessary\n", + "# !pip install --quiet --force-reinstall prompttools" + ] + }, + { + "cell_type": "markdown", + "id": "c7bd97ee", + "metadata": {}, + "source": [ + "## Setup imports and API keys" + ] + }, + { + "cell_type": "markdown", + "id": "d591fed6", + "metadata": {}, + "source": [ + "We will be using OpenAI's Moderation API. Therefore, an API key is needed." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "fc3e9c45", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = \"\" # Insert your key here" + ] + }, + { + "cell_type": "markdown", + "id": "79094464", + "metadata": {}, + "source": [ + "You can execute any experiment and use their response for evaluation. In this case, we will use something simple with OpenAI Chat as an example." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cf67014a", + "metadata": {}, + "outputs": [], + "source": [ + "from prompttools.experiment import OpenAIChatExperiment" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3b00eb5e", + "metadata": {}, + "outputs": [], + "source": [ + "models = [\"gpt-3.5-turbo\"] # You can also use a fine-tuned model here, e.g. [\"ft:gpt-3.5-turbo:org_id\"]\n", + "messages = [\n", + " [\n", + " {\"role\": \"system\", \"content\": \"You are a historian.\"},\n", + " {\"role\": \"user\", \"content\": \"Give me a list of weapons used in the civil war.\"},\n", + " ]\n", + "]\n", + "temperatures = [0.0, 1.0]\n", + "# You can add more parameters that you'd like to test here.\n", + "\n", + "experiment = OpenAIChatExperiment(models, messages, temperature=temperatures)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "b5341176", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
modelmessagestemperatureresponseresponse_usagelatency
0gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]0.0During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict.{'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546}8.113981
1gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]1.0During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use.{'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525}8.594419
\n", + "
" + ], + "text/plain": [ + " model \\\n", + "0 gpt-3.5-turbo \n", + "1 gpt-3.5-turbo \n", + "\n", + " messages \\\n", + "0 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "1 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "\n", + " temperature \\\n", + "0 0.0 \n", + "1 1.0 \n", + "\n", + " response \\\n", + "0 During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict. \n", + "1 During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use. \n", + "\n", + " response_usage \\\n", + "0 {'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546} \n", + "1 {'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525} \n", + "\n", + " latency \n", + "0 8.113981 \n", + "1 8.594419 " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "experiment.run()\n", + "experiment.visualize()" + ] + }, + { + "cell_type": "markdown", + "id": "afbe9b6e", + "metadata": {}, + "source": [ + "### Moderation Evaluation\n", + "\n", + "With your responses in place, we can use PromptTools built-in `apply_moderation` function to evaluation the response.\n", + "\n", + "By default, it will return a flag indicating whether the response violates any policies (e.g. harassment, violence)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "19470b7b", + "metadata": {}, + "outputs": [], + "source": [ + "from prompttools.utils import apply_moderation" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "1504a9b8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
modelmessagestemperatureresponseresponse_usagelatencymoderation_flag
0gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]0.0During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict.{'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546}8.113981False
1gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]1.0During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use.{'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525}8.594419False
\n", + "
" + ], + "text/plain": [ + " model \\\n", + "0 gpt-3.5-turbo \n", + "1 gpt-3.5-turbo \n", + "\n", + " messages \\\n", + "0 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "1 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "\n", + " temperature \\\n", + "0 0.0 \n", + "1 1.0 \n", + "\n", + " response \\\n", + "0 During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict. \n", + "1 During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use. \n", + "\n", + " response_usage \\\n", + "0 {'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546} \n", + "1 {'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525} \n", + "\n", + " latency moderation_flag \n", + "0 8.113981 False \n", + "1 8.594419 False " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "experiment.evaluate(\"moderation_flag\", apply_moderation)\n", + "experiment.visualize()" + ] + }, + { + "cell_type": "markdown", + "id": "a25a30f1", + "metadata": {}, + "source": [ + "If we are interested in specific topics, we can pass additional argument to return those flags and scores." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "331281ae", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
modelmessagestemperatureresponseresponse_usagelatencymoderation_flagmoderation_topics
0gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]0.0During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict.{'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546}8.113981False{'harassment': False, 'violence': False, 'moderation_flag': False}
1gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]1.0During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use.{'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525}8.594419False{'harassment': False, 'violence': False, 'moderation_flag': False}
\n", + "
" + ], + "text/plain": [ + " model \\\n", + "0 gpt-3.5-turbo \n", + "1 gpt-3.5-turbo \n", + "\n", + " messages \\\n", + "0 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "1 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "\n", + " temperature \\\n", + "0 0.0 \n", + "1 1.0 \n", + "\n", + " response \\\n", + "0 During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict. \n", + "1 During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use. \n", + "\n", + " response_usage \\\n", + "0 {'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546} \n", + "1 {'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525} \n", + "\n", + " latency moderation_flag \\\n", + "0 8.113981 False \n", + "1 8.594419 False \n", + "\n", + " moderation_topics \n", + "0 {'harassment': False, 'violence': False, 'moderation_flag': False} \n", + "1 {'harassment': False, 'violence': False, 'moderation_flag': False} " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "experiment.evaluate(\"moderation_topics\", apply_moderation, {\"category_names\": [\"harassment\", \"violence\"]})\n", + "experiment.visualize()" + ] + }, + { + "cell_type": "markdown", + "id": "c2ee48f8", + "metadata": {}, + "source": [ + "To get numerical scores, you can see some results score higher in \"violence\" score than others." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "348dcc0e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
modelmessagestemperatureresponseresponse_usagelatencymoderation_flagmoderation_topicsmoderation_scores
0gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]0.0During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict.{'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546}8.113981False{'harassment': False, 'violence': False, 'moderation_flag': False}{'harassment_score': 5.6028698054433335e-06, 'violence_score': 0.006405988242477179, 'moderation_flag': False}
1gpt-3.5-turbo[{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}]1.0During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use.{'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525}8.594419False{'harassment': False, 'violence': False, 'moderation_flag': False}{'harassment_score': 3.943132924177917e-06, 'violence_score': 0.007170462515205145, 'moderation_flag': False}
\n", + "
" + ], + "text/plain": [ + " model \\\n", + "0 gpt-3.5-turbo \n", + "1 gpt-3.5-turbo \n", + "\n", + " messages \\\n", + "0 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "1 [{'role': 'system', 'content': 'You are a historian.'}, {'role': 'user', 'content': 'Give me a list of weapons used in the civil war.'}] \n", + "\n", + " temperature \\\n", + "0 0.0 \n", + "1 1.0 \n", + "\n", + " response \\\n", + "0 During the American Civil War (1861-1865), a wide range of weapons were used by both the Union and Confederate forces. Here is a list of some of the most significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A muzzle-loading, single-shot rifle used by the Union Army.\\n - Enfield Pattern 1853: A British-made muzzle-loading, single-shot rifle used by both sides.\\n - Sharps Rifle: A breech-loading, single-shot rifle known for its accuracy and used by both sides.\\n\\n2. Muskets:\\n - Springfield Model 1861: A muzzle-loading, smoothbore musket used by the Union Army.\\n - Pattern 1853 Enfield: A muzzle-loading, smoothbore musket used by both sides.\\n - Lorenz Rifle: A muzzle-loading, rifled musket used primarily by the Confederacy.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A breech-loading, lever-action carbine used by Union cavalry.\\n - Sharps Carbine: A breech-loading, single-shot carbine used by both sides.\\n - Burnside Carbine: A breech-loading, single-shot carbine used by Union cavalry.\\n\\n4. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides.\\n - Parrott Rifle: A rifled, muzzle-loading cannon used by both sides.\\n - Whitworth Rifle: A British-made, rifled cannon used primarily by the Confederacy.\\n\\n5. Pistols:\\n - Colt Army Model 1860: A .44 caliber, single-action revolver used by both sides.\\n - Remington Model 1858: A .44 caliber, single-action revolver used by both sides.\\n - Smith & Wesson Model 2: A .32 caliber, single-action revolver used by both sides.\\n\\n6. Blades:\\n - Bayonets: Attached to the end of rifles and muskets, used for close combat.\\n - Swords: Officers on both sides often carried swords for personal defense.\\n\\n7. Naval Weapons:\\n - Dahlgren Gun: A smoothbore, muzzle-loading cannon used on naval vessels.\\n - Brooke Rifle: A rifled, muzzle-loading cannon used on Confederate naval vessels.\\n\\nIt is important to note that this list is not exhaustive, as there were numerous variations and models of these weapons used during the Civil War. Additionally, advancements in technology and the introduction of new weapons occurred throughout the conflict. \n", + "1 During the American Civil War (1861-1865), various weapons were utilized by both the Union and Confederate forces. Here is a list of some significant weapons employed during this conflict:\\n\\n1. Rifles:\\n - Springfield Model 1861: A widely used .58 caliber muzzle-loading rifle.\\n - Enfield Pattern 1853: A British-made rifle imported by both sides, often used by Confederate soldiers.\\n - Henry Repeating Rifle: Lever-action, magazine-fed rifle known for its rapid-fire capability, primarily used by Union troops.\\n\\n2. Muskets:\\n - Springfield Model 1861/1855: Smoothbore muskets often used by both sides earlier in the war.\\n - Lorenz Rifle: Austrian-made musket popular among Confederate forces.\\n - P53 Enfield: British-made musket used by both Union and Confederate soldiers.\\n\\n3. Carbines:\\n - Spencer Repeating Carbine: A lever-action, seven-shot carbine used by Union cavalry, notable for its high rate of fire.\\n - Sharps Carbine: A single-shot breech-loading carbine utilized by both sides.\\n\\n4. Pistols:\\n - Colt Single Action Army Revolver: Often referred to as the \"Colt .45,\" a popular six-shot revolver used by Union cavalry.\\n - Remington Model 1858: A six-shot, percussion cap revolver used by both Union and Confederate troops.\\n\\n5. Artillery:\\n - Napoleon Gun: A smoothbore, muzzle-loading cannon used by both sides. It fired a 12-pound projectile.\\n - Parrott Rifle: A rifled artillery piece, available in various calibers, used primarily by Union forces.\\n - Whitworth Rifle: A British-made, breech-loading rifle known for its accuracy and long-range capabilities, favored by the Confederacy.\\n\\n6. Edged Weapons:\\n - Model 1840 Army Non-commissioned Officer Sword: A common sword used by Union infantry and cavalry.\\n - Model 1850 Army Staff and Field Officer's Sword: An ornate sword often carried by higher-ranking officers on both sides.\\n - Bowie Knife: A large, fixed-blade knife typically used by soldiers on both sides for close combat.\\n\\nIt is worth noting that this list only scratches the surface of the wide range of weapons employed throughout the Civil War, as various other firearms, bayonets, sabers, and artillery pieces were in use. \n", + "\n", + " response_usage \\\n", + "0 {'completion_tokens': 518, 'prompt_tokens': 28, 'total_tokens': 546} \n", + "1 {'completion_tokens': 497, 'prompt_tokens': 28, 'total_tokens': 525} \n", + "\n", + " latency moderation_flag \\\n", + "0 8.113981 False \n", + "1 8.594419 False \n", + "\n", + " moderation_topics \\\n", + "0 {'harassment': False, 'violence': False, 'moderation_flag': False} \n", + "1 {'harassment': False, 'violence': False, 'moderation_flag': False} \n", + "\n", + " moderation_scores \n", + "0 {'harassment_score': 5.6028698054433335e-06, 'violence_score': 0.006405988242477179, 'moderation_flag': False} \n", + "1 {'harassment_score': 3.943132924177917e-06, 'violence_score': 0.007170462515205145, 'moderation_flag': False} " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "experiment.evaluate(\"moderation_scores\", apply_moderation, {\"category_score_names\": [\"harassment\", \"violence\"]})\n", + "experiment.visualize()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86898588", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "04df3454", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From e77fcc5bcd6fa7b123ae17fd660659bcb3e7c8bf Mon Sep 17 00:00:00 2001 From: Kevin Date: Tue, 26 Dec 2023 17:15:56 -0800 Subject: [PATCH 21/52] Adding moderation eval to docs --- docs/source/utils.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/utils.rst b/docs/source/utils.rst index 9754cd67..a73aecce 100644 --- a/docs/source/utils.rst +++ b/docs/source/utils.rst @@ -16,6 +16,8 @@ They can also be used with ``prompttest`` for be part of your CI/CD system. .. autofunction:: prompttools.utils.compute_similarity_against_model +.. autofunction:: prompttools.utils.apply_moderation + .. autofunction:: prompttools.utils.ranking_correlation .. autofunction:: prompttools.utils.validate_json_response From ffbfc60aabe91f54e7c463c4781c7e5f65432beb Mon Sep 17 00:00:00 2001 From: steven krawczyk Date: Wed, 27 Dec 2023 08:24:43 -0800 Subject: [PATCH 22/52] Version bump --- docs/source/conf.py | 2 +- prompttools/version.py | 4 ++-- pyproject.toml | 2 +- version.txt | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index fa00b826..c9d1bf03 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -11,7 +11,7 @@ project = "prompttools" copyright = "2023, Hegel AI" author = "Hegel AI" -release = "0.0.44" +release = "0.0.45" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/prompttools/version.py b/prompttools/version.py index 23fa2526..d103c2d6 100644 --- a/prompttools/version.py +++ b/prompttools/version.py @@ -1,2 +1,2 @@ -__version__ = '0.0.44a0+7d36bb0' -git_version = '7d36bb0922948f299ace033d0de58590ae4254f3' +__version__ = '0.0.45a0+e77fcc5' +git_version = 'e77fcc5bcd6fa7b123ae17fd660659bcb3e7c8bf' diff --git a/pyproject.toml b/pyproject.toml index 2d4ce9aa..e01419d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "prompttools" -version = "0.0.44" +version = "0.0.45" authors = [ { name="Hegel AI", email="team@hegel-ai.com" }, ] diff --git a/version.txt b/version.txt index 2fcff7eb..0f225676 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.0.44a0 +0.0.45a0 From 6151062e36d63229b66d1c4193f0173ad022502d Mon Sep 17 00:00:00 2001 From: "Steven Krawczyk (Hegel AI)" Date: Thu, 28 Dec 2023 07:06:51 -0800 Subject: [PATCH 23/52] Update style.mplstyle to use Hegel colors --- prompttools/experiment/experiments/style.mplstyle | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prompttools/experiment/experiments/style.mplstyle b/prompttools/experiment/experiments/style.mplstyle index b31c0eac..00520178 100755 --- a/prompttools/experiment/experiments/style.mplstyle +++ b/prompttools/experiment/experiments/style.mplstyle @@ -40,11 +40,11 @@ legend.frameon : False savefig.bbox : tight savefig.dpi : 100 -# Rainbow color cycle -#axes.prop_cycle: cycler('color', ["black", "332288","88CCEE","44AA99","117733","999933","DDCC77","CC6677","882255","AA4499","brown","fd3c06","gray"]) +# Hegel AI color cycle +axes.prop_cycle: cycler('color', ["black", "771541", "EB8F4C","594F3B","A8B7AB","9C92A3"]) # Not good for colorblind people -axes.prop_cycle: cycler('color', ['black', '7e1e9c', '15b01a', '448ee4', 'ff7fa7', '029386', 'ed872d', 'ae1717', 'gray', 'e03fd8', '011288', '0b4008']) +#axes.prop_cycle: cycler('color', ['black', '7e1e9c', '15b01a', '448ee4', 'ff7fa7', '029386', 'ed872d', 'ae1717', 'gray', 'e03fd8', '011288', '0b4008']) #font.family : serif #text.usetex : True From 280f4e94b5e1032bb126def256be4d56bb146714 Mon Sep 17 00:00:00 2001 From: steven krawczyk Date: Thu, 28 Dec 2023 08:12:02 -0800 Subject: [PATCH 24/52] Update mpstyle --- examples/notebooks/OpenAIChatExperiment.ipynb | 141 ++++++++++-------- .../experiment/experiments/experiment.py | 7 +- .../experiment/experiments/style.mplstyle | 3 - prompttools/version.py | 4 +- 4 files changed, 85 insertions(+), 70 deletions(-) diff --git a/examples/notebooks/OpenAIChatExperiment.ipynb b/examples/notebooks/OpenAIChatExperiment.ipynb index 654a8e59..f840af3e 100644 --- a/examples/notebooks/OpenAIChatExperiment.ipynb +++ b/examples/notebooks/OpenAIChatExperiment.ipynb @@ -44,7 +44,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 13, "id": "ed4e635e", "metadata": {}, "outputs": [], @@ -156,9 +156,10 @@ " \n", " \n", " model\n", - " temperature\n", " messages\n", + " temperature\n", " response\n", + " response_usage\n", " latency\n", " \n", " \n", @@ -166,45 +167,49 @@ " \n", " 0\n", " gpt-3.5-turbo\n", - " 0.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 0.0\n", " George Washington\n", - " 2.625049e-06\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000006\n", " \n", " \n", " 1\n", " gpt-3.5-turbo\n", - " 1.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 1.0\n", " George Washington\n", - " 1.000008e-06\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000005\n", " \n", " \n", " 2\n", " gpt-3.5-turbo-0613\n", - " 0.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 0.0\n", " George Washington\n", - " 7.500057e-07\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000003\n", " \n", " \n", " 3\n", " gpt-3.5-turbo-0613\n", - " 1.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 1.0\n", " George Washington\n", - " 6.670016e-07\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000002\n", " \n", " \n", "\n", "" ], "text/plain": [ - " model temperature \\\n", - "0 gpt-3.5-turbo 0.0 \n", - "1 gpt-3.5-turbo 1.0 \n", - "2 gpt-3.5-turbo-0613 0.0 \n", - "3 gpt-3.5-turbo-0613 1.0 \n", + " model \\\n", + "0 gpt-3.5-turbo \n", + "1 gpt-3.5-turbo \n", + "2 gpt-3.5-turbo-0613 \n", + "3 gpt-3.5-turbo-0613 \n", "\n", " messages \\\n", "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", @@ -212,11 +217,23 @@ "2 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", "3 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", "\n", - " response latency \n", - "0 George Washington 2.625049e-06 \n", - "1 George Washington 1.000008e-06 \n", - "2 George Washington 7.500057e-07 \n", - "3 George Washington 6.670016e-07 " + " temperature response \\\n", + "0 0.0 George Washington \n", + "1 1.0 George Washington \n", + "2 0.0 George Washington \n", + "3 1.0 George Washington \n", + "\n", + " response_usage \\\n", + "0 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "1 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "2 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "3 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "\n", + " latency \n", + "0 0.000006 \n", + "1 0.000005 \n", + "2 0.000003 \n", + "3 0.000002 " ] }, "metadata": {}, @@ -247,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "d861ab10", "metadata": {}, "outputs": [], @@ -257,7 +274,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "8ddbb951", "metadata": {}, "outputs": [], @@ -267,23 +284,12 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "id": "e80dfeec", "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/kevin/miniconda3/envs/prompttools/lib/python3.11/site-packages/torch/utils/tensorboard/__init__.py:4: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.\n", - " if not hasattr(tensorboard, \"__version__\") or LooseVersion(\n", - "/Users/kevin/miniconda3/envs/prompttools/lib/python3.11/site-packages/torch/utils/tensorboard/__init__.py:6: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.\n", - " ) < LooseVersion(\"1.15\"):\n" - ] - } - ], + "outputs": [], "source": [ "experiment.evaluate(\"similar_to_expected\", similarity.semantic_similarity, expected=[\"George Washington\"] * 4)" ] @@ -298,7 +304,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "4d09c18e", "metadata": { "scrolled": true @@ -326,9 +332,10 @@ " \n", " \n", " model\n", - " temperature\n", " messages\n", + " temperature\n", " response\n", + " response_usage\n", " latency\n", " similar_to_expected\n", " \n", @@ -337,37 +344,41 @@ " \n", " 0\n", " gpt-3.5-turbo\n", - " 0.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 0.0\n", " George Washington\n", - " 2.625049e-06\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000006\n", " 1.0\n", " \n", " \n", " 1\n", " gpt-3.5-turbo\n", - " 1.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 1.0\n", " George Washington\n", - " 1.000008e-06\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000005\n", " 1.0\n", " \n", " \n", " 2\n", " gpt-3.5-turbo-0613\n", - " 0.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 0.0\n", " George Washington\n", - " 7.500057e-07\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000003\n", " 1.0\n", " \n", " \n", " 3\n", " gpt-3.5-turbo-0613\n", - " 1.0\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", + " 1.0\n", " George Washington\n", - " 6.670016e-07\n", + " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", + " 0.000002\n", " 1.0\n", " \n", " \n", @@ -375,11 +386,11 @@ "" ], "text/plain": [ - " model temperature \\\n", - "0 gpt-3.5-turbo 0.0 \n", - "1 gpt-3.5-turbo 1.0 \n", - "2 gpt-3.5-turbo-0613 0.0 \n", - "3 gpt-3.5-turbo-0613 1.0 \n", + " model \\\n", + "0 gpt-3.5-turbo \n", + "1 gpt-3.5-turbo \n", + "2 gpt-3.5-turbo-0613 \n", + "3 gpt-3.5-turbo-0613 \n", "\n", " messages \\\n", "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", @@ -387,11 +398,23 @@ "2 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", "3 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", "\n", - " response latency similar_to_expected \n", - "0 George Washington 2.625049e-06 1.0 \n", - "1 George Washington 1.000008e-06 1.0 \n", - "2 George Washington 7.500057e-07 1.0 \n", - "3 George Washington 6.670016e-07 1.0 " + " temperature response \\\n", + "0 0.0 George Washington \n", + "1 1.0 George Washington \n", + "2 0.0 George Washington \n", + "3 1.0 George Washington \n", + "\n", + " response_usage \\\n", + "0 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "1 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "2 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "3 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "\n", + " latency similar_to_expected \n", + "0 0.000006 1.0 \n", + "1 0.000005 1.0 \n", + "2 0.000003 1.0 \n", + "3 0.000002 1.0 " ] }, "metadata": {}, @@ -422,7 +445,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "d30cd8ad", "metadata": {}, "outputs": [], @@ -451,18 +474,18 @@ "id": "e5626394", "metadata": {}, "source": [ - "You can optional " + "You can optionally visualize the results with the following command" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "id": "d0007a1f", "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABAYAAAKxCAYAAADARa4uAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABdiklEQVR4nO3deViVdf7/8ddhRxQQ3NcjrpUKLrhmULmMS6mZM1ZWJJbZfNtm0hxzignT0sbRmsnSVNq+OWNm6mhaFmKu41fBNRVU0HJLFFzZ798f/jgjAXqO5wB67ufjus6V3Pdned9wum7Oi8993xbDMAwBAAAAAABT8qjqAgAAAAAAQNUhGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAaqwOHDhzV37lw9+eSTCg8Pl5eXlywWiyZPnlzVpdll69ateuyxx9SkSRP5+vqqdu3a6t69u1555RUVFBRUdXkAAAAAAAd4VXUBZjRr1izNmjWrqsu4IW+88YZeffVVFRUVqW7dugoPD9fZs2eVnJyszZs3609/+pOqV69e1WUCAAAAAOxEMFAFatWqpUGDBqlLly6KjIzUhx9+qMWLF1d1Wdc1Z84cTZo0SY0aNdL8+fPVp08f277Lly9rzZo18vX1rcIKAQAAAACOIhioApMmTSrx9cKFC6uoEvudOnVKL730kvz8/PTNN9/otttuK7Hf399f9913XxVVBwAAAAC4Udxj4BZiGIYWLlyoPn36KDQ0VL6+vgoLC9Nzzz2nEydOVOjc8+fP1/nz5zVy5MhSoQAAAAAA4NbFioFbRH5+vh555BEtWrRIktSgQQM1btxYqampevfdd/XFF19o7dq1atWqVYXMv3z5cknSoEGDlJqaqg8++EC7du2St7e3IiIiFBsbq2bNmlXI3AAAAACAisOKgVvEq6++qkWLFqlDhw5KTk7Wzz//rJSUFJ0+fVrPPPOMjh8/rkceeaRC5i4qKlJycrIkKTU1VeHh4frrX/+qb775RitWrNAbb7yhNm3a6NNPP62Q+QEAAAAAFYdg4Bbwyy+/6G9/+5sCAwO1bNkyRURE2Pb5+/vr3XffVWRkpP7v//5PP/zwg8vnz87O1uXLlyVJEydOVNOmTZWUlKScnBwdOnRIjzzyiPLy8vTEE09o27ZtLp8fAAAAAFBxCAZuAStXrlRubq769eunRo0aldrv4eGhQYMGSZKSkpJcPv/Fixdt/y4qKtLSpUt11113ydfXV82aNdMnn3yiTp06qaCgQFOmTHH5/AAAAACAisM9Bm4Bu3btkiRt3rxZd955Z5ltTp48KUn6+eefS2yPjo6+oRsT7tmzR56enpIkPz8/2/b+/fuXuo+BxWLR888/r8cee0zffvutioqK5OFB5gQAAAAAtwKCgVtAdna2JOno0aM6evToNdsWL/kvlpaWViossIdhGLZ/BwUFycPDQ0VFRWrTpk2Z7YufVHD+/HllZmaqdu3aDs8JAAAAAKh8/Fn3FlC9enVJ0iuvvCLDMK75SkhIKNH3p59+um6fsl5eXv/NjLy9vW1PHPD19S2zxqu3FxYWuvg7AAAAAACoKAQDt4Dbb79dkrR79+4qq6F79+6SpEOHDpW5v3i7r6+vatWqVWl1AQAAAACcQzBwCxg4cKB8fHy0cuVKpaamVkkNv/3tbyVJ//73v3XmzJlS+xcsWCBJ6tWrV4nVBgAAAACAmxvBwC2gQYMGeuGFF5Sfn69+/fpp7dq1JfYbhqH//Oc/Gjt2bLl/0XfWoEGD1LlzZ50/f16xsbE6f/68bd/777+vpUuXSpJefvnlCpkfAAAAAFAxLMbVd5lDpdiwYYMGDx5s+/rChQvKzc1VtWrV5O/vb9uenJysxo0bS5IKCgr0xBNP6NNPP5Uk1atXT02aNFFubq4OHTpk+6D+448/lnuDQGcdPnxYd955p44dO6bq1avrtttu0/Hjx/XTTz9JkuLj4zVp0qQKmRsAAAAAUDEIBqrA2rVrdffdd1+33eHDh2W1WktsW7lypebOnavNmzcrMzNTNWvWVOPGjdW9e3c9+OCD6tWrV4U+KjAzM1NvvPGGli5dqp9++kkBAQHq2rWrXnzxRfXt27fC5gUAAAAAVAyCAQAAAAAATIx7DAAAAAAAYGIEAwAAAAAAmBjPlaskHTp00OHDh1W9enW1aNGiqssBAAAAALi5tLQ0XbhwQc2aNVNycnK57bjHQCUJDg5WdnZ2VZcBAAAAADCZoKAgZWVllbufFQOVpHr16srOzlZQUJAiIiKquhwAklJSUvj/EgAAF+G8Ctx8iv+/rF69+jXbEQxUkhYtWujnn39WRESE1q5dW9XlAJAUHR2tpKQk/r8EAMAFOK8CN5/i/y+vdzk7Nx8EAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMzKuqCwCAqhITE6Po6GhZrdaqLgUAgFse51Xg1kUwAMC0YmJiqroEAADcBudV4NbFpQQAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJiYV1UXgJuPxWKp6hIAABXAMIyqLgEAANyEWDEAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAm5jbBwFdffaUxY8aoU6dOql+/vnx8fBQcHKwePXpo1qxZysvLc2i8uLg4WSyWa7727dtXQUcDAAAAAEDl8KrqAlzl7bff1oYNG+Tr66sGDRooPDxcx48f16ZNm7Rp0yZ98sknWrNmjYKDgx0at3HjxmrSpEmZ+6pVq+aCygEAAAAAqDpuEwyMHj1akydPVs+ePeXt7W3bvnnzZg0fPlzbtm3TK6+8on/84x8OjTtq1CjFxcW5uFoAAAAAAG4ObnMpQUxMjKKjo0uEApLUrVs3zZgxQ9KVyw0AAAAAAMB/uU0wcC1t2rSRJF26dKmKKwEAAAAA4ObiNpcSXMumTZskSR07dnS4b2Jiovbs2aPMzEyFhISoS5cueuyxx1SvXj1XlwkAAAAAQKVz22CgsLBQx48f17JlyzRhwgQFBARo6tSpDo+zbt26El8vXrxYcXFxeu+99xQTE+PweCkpKYqOjnaoT0xMzA3NBQAAAABwDwkJCUpISHCoT0pKil3t3C4YmDlzpl588cUS24YMGaL4+Hi1bdvW7nHq16+viRMnaujQoQoLC5O/v7+Sk5M1efJkff311xo1apRCQ0N13333OVRfdna2kpKSHOrjaJAAAAAAAHAv6enpDn+WtJfbBQMNGzZUz549lZ+fr4yMDJ08eVKJiYn6/PPP9frrr8vT09OuccaMGVNqW48ePbRixQoNGzZMS5Ys0YsvvqhBgwbJYrHYXV9QUJAiIiLsbi9JVqvVofYAAAAAAPditVoVFRXlUJ+UlBRlZ2dft53FMAzjRgu7FWzZskVjxozRjh079PTTT2v27NlOj3ngwAG1bt1a0pVvdHh4+HX7REdHKykpSVFRUVq7dq3TNVQkR4IOAMCtw81P+QAA4Ffs/Rzq9k8l6Nq1q1auXClfX1/NmTNHGRkZTo/ZqlUrhYSESJLS0tKcHg8AAAAAgKri9sGAJDVo0EAREREqKirSjh07XDKmt7e3JKmgoMAl4wEAAAAAUBVMEQxI//0A74oP8qdPn9apU6ckSY0aNXJ6PAAAAAAAqoopgoH09HTbSgF77gdwPTNmzJBhGAoKClJkZKTT4wEAAAAAUFXcIhjYtm2bXnvtNR06dKjUvlWrVql///4qKCjQgAED1Lx5c9u+mTNnymq1asSIESX67NmzR88884z27NlTYntOTo6mTJmit956S5L08ssvy8fHpwKOCAAAAACAyuEWjys8f/68Xn/9db3++uuqV6+eGjVqpLy8PB05ckRZWVmSpMjISH300Ucl+mVlZSkjI6PU4wDz8/M1e/ZszZ49W7Vr11aTJk0kST/++KMuXbokSYqNjdWECRMq/NgAAAAAAKhIbhEMhIeHa9asWfruu++0Z88e7du3T3l5eQoNDVX37t3129/+ViNHjpSXl32Ha7VaFR8fr40bN2rfvn3av3+/8vLyVKdOHQ0YMECjR49Wv379KvioAAAAAACoeBaDhxpXCnufH3kzsFgsVV0CAKACcMoHAMBc7P0c6hb3GAAAAAAAADeGYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMTcJhj46quvNGbMGHXq1En169eXj4+PgoOD1aNHD82aNUt5eXk3NO6mTZs0ePBg1a5dW/7+/rr99tsVHx+vnJwcFx8BAAAAAACVz22Cgbfffltz5szRnj175O/vr/DwcFWvXl2bNm3SCy+8oB49eigrK8uhMT/77DP16tVLy5Ytk6+vr2677TalpaXp1Vdf1V133aVLly5VzMEAAAAAAFBJ3CYYGD16tBITE3X+/HkdOnRIW7du1U8//aRNmzapUaNG2rZtm1555RW7x0tPT1dsbKwKCws1bdo0HT16VNu3b1dqaqpat26trVu3avz48RV4RAAAAAAAVDy3CQZiYmIUHR0tb2/vEtu7deumGTNmSLpyuYG9pk+frtzcXPXt21fjxo2TxWKRJDVt2lTz58+XJM2ZM0cnT550zQEAAAAAAFAF3CYYuJY2bdpIkt1L/w3D0JIlSyRJsbGxpfb36NFDbdq0UX5+vpYuXeq6QgEAAAAAqGSmCAY2bdokSerYsaNd7Y8cOaLjx49Lknr27Flmm+LtW7ZscUGFAAAAAABUDa+qLqCiFBYW6vjx41q2bJkmTJiggIAATZ061a6+qampkiRfX181aNCgzDZhYWEl2torJSVF0dHRDvWJiYlRTEyMQ30AAAAAAO4jISFBCQkJDvVJSUmxq53bBQMzZ87Uiy++WGLbkCFDFB8fr7Zt29o1xtmzZyVJwcHBtnsL/FrNmjVLtLVXdna2kpKSHOrjaJAAAAAAAHAv6enpDn+WtJfbBQMNGzZUz549lZ+fr4yMDJ08eVKJiYn6/PPP9frrr8vT0/O6Y+Tk5EiSfHx8ym3j6+srSbp8+bJD9QUFBSkiIsKhPlar1aH2AAAAAAD3YrVaFRUV5VCflJQUZWdnX7ed2wUDw4cP1/Dhw21fb9myRWPGjNGUKVN05swZzZ49+7pj+Pn5SZLy8vLKbZObmytJ8vf3d6i+iIgIrV271qE+AAAAAABzu5FLzKOjo+1aZeD2Nx/s2rWrVq5cKV9fX82ZM0cZGRnX7VN8mUBWVpYMwyizTfElBMVtAQAAAAC4Fbl9MCBJDRo0UEREhIqKirRjx47rtm/ZsqWkK6sCjh07VmabQ4cOlWgLAAAAAMCtyBTBgCQVFBSU+O+1NGnSRPXq1ZMkbdiwocw2xdu7du3qogoBAAAAAKh8pggG0tPTbSsFwsPDr9veYrFo6NChkqR58+aV2r9x40bt27dP3t7euv/++11bLAAAAAAAlcgtgoFt27bptddesy3vv9qqVavUv39/FRQUaMCAAWrevLlt38yZM2W1WjVixIhS/caNGycfHx998803mj59uu1eAxkZGRo1apQkafTo0baVBQAAAAAA3IrcIhg4f/68Xn/9dTVv3lz169dXZGSkwsPDVbNmTfXv31/79u1TZGSkPvrooxL9srKylJGRoRMnTpQas1mzZpo7d648PDw0fvx4NW7cWB07dlTLli21f/9+derUSdOnT6+sQwQAAAAAoEK4RTAQHh6uWbNm6f7771dAQID27dunffv2yd/fX/3799eCBQu0ceNG1apVy6FxH3vsMf3www8aNGiQLl++rL179yosLExxcXFav369AgICKuiIAAAAAACoHBajvOfxwaWKnx8ZFRWltWvXVnU512SxWKq6BABABeCUDwCAudj7OdQtVgwAAAAAAIAbQzAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBibhEMGIah9evXa9y4cerWrZuCg4Pl4+OjBg0aaNiwYUpMTHR4zLi4OFkslmu+9u3bVwFHAwAAAABA5fGq6gJc4fvvv1fv3r0lSR4eHmrRooUCAgKUmpqqL7/8Ul9++aUmTZqk+Ph4h8du3LixmjRpUua+atWqOVU3AAAAAABVzS2CAcMw1KJFC/3hD3/QiBEjVLNmTUlSXl6e4uLiNHXqVE2ePFldu3bVoEGDHBp71KhRiouLq4CqAQAAAACoem5xKUGXLl30448/auzYsbZQQJJ8fHw0ZcoU9e/fX5I0d+7cqioRAAAAAICbklsEA4GBgfLyKn/xQ58+fSRJBw4cqKySAAAAAAC4JbjFpQTXk5OTI0ny9/d3uG9iYqL27NmjzMxMhYSEqEuXLnrsscdUr149V5cJAAAAAEClc/tgwDAMLVq0SJLUs2dPh/uvW7euxNeLFy9WXFyc3nvvPcXExDg8XkpKiqKjox3qExMTc0NzAQAAAADcQ0JCghISEhzqk5KSYlc7tw8G5s6dq+TkZPn4+OiFF16wu1/9+vU1ceJEDR06VGFhYfL391dycrImT56sr7/+WqNGjVJoaKjuu+8+h+rJzs5WUlKSQ30cDRIAAAAAAO4lPT3d4c+S9nLrYGD79u16/vnnJUmTJ09W8+bN7e47ZsyYUtt69OihFStWaNiwYVqyZIlefPFFDRo0SBaLxe5xg4KCFBERYXd7SbJarQ61BwAAAAC4F6vVqqioKIf6pKSkKDs7+7rtLIZhGDda2M3s8OHD6tmzp44fP66HH35Yn376qUMf4K/lwIEDat26taQr3+jw8PDr9omOjlZSUpKioqK0du1al9RRUVz1fQIA3Fzc9JQPAADKYe/nULd4KsGvnThxQn369NHx48c1cOBAJSQkuPTDbqtWrRQSEiJJSktLc9m4AAAAAABUNrcLBs6cOaM+ffro4MGDioqK0qJFi+Tt7e3yeYrHLCgocPnYAAAAAABUFrcKBi5cuKABAwZo9+7dioyM1PLly2/oEYXXc/r0aZ06dUqS1KhRI5ePDwAAAABAZXGbYCA3N1eDBw/Wli1bdMcdd2jVqlWqUaNGhcw1Y8YMGYahoKAgRUZGVsgcAAAAAABUBrcIBgoLCzVixAh9//33at68ub799lvbPQCuZebMmbJarRoxYkSJ7Xv27NEzzzyjPXv2lNiek5OjKVOm6K233pIkvfzyy/Lx8XHdgQAAAAAAUMnc4nGF//rXv/TVV19Jkjw8PDR8+PAy29WvX1+LFi2yfZ2VlaWMjIxSjwPMz8/X7NmzNXv2bNWuXVtNmjSRJP3444+6dOmSJCk2NlYTJkxw/cEAAAAAAFCJ3CIYyM3Ntf07NTVVqampZbZr2rSpXeNZrVbFx8dr48aN2rdvn/bv36+8vDzVqVNHAwYM0OjRo9WvXz+X1A4AAAAAQFWyGDzUuFLY+/zIm4ErH+0IALh5cMoHAMBc7P0c6hb3GAAAAAAAADeGYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGXPZXg1KlT2rlzp9LT03XmzBldvnxZ/v7+CgkJkdVqVXh4uGrXru2q6QAAAAAAgAvccDBgGIbWrFmjJUuWaNWqVcrIyLhuH6vVqn79+mno0KHq3bs3d78HAAAAAKCKORwMnDlzRrNnz9b777+vY8eO2bbb8wik9PR0ffDBB/rggw/UoEEDPf300xo7dqxCQkIcLQMAAAAAALiA3cHA+fPnNX36dM2cOVMXL14sEQRUq1ZNnTt31m233abQ0FCFhIQoMDBQ586d05kzZ5SZmakff/xR//d//6dLly5Jkn7++We9+uqrevPNN/Xiiy/qpZdeUmBgoOuPEAAAAAAAlMuuYODjjz/Wyy+/rFOnTtkCge7du+vBBx9UdHS02rdvL09Pz+uOU1hYqJ07d2rdunX64osvtHHjRl28eFFvvPGG5s6dq2nTpunRRx917ogAAAAAAIDd7AoGYmJiJEk1atTQU089pTFjxqhFixYOT+bp6akOHTqoQ4cOev7553Xo0CG9//77mjNnjk6ePKknnniCYAAAAAAAgEpk1+MKAwICFBcXpyNHjmj69Ok3FAqUJSwsTNOmTdORI0cUFxenatWquWRcAAAAAABgH7tWDBw8eFB16tSpsCICAwP16quvauzYsRU2BwAAAAAAKM2uFQMVGQpcrXbt2pUyDwAAAAAAuMKuYAAAAAAAALgnggEAAAAAAEzMrnsMOOPy5ct6//339cMPP6igoEAREREaO3as6tevX9FTAwAAAACA63AqGNi7d69GjBghi8Wi999/X927dy+x/9y5c+rVq5d2795t27ZixQrNnj1b33zzjTp06ODM9AAAAAAAwElOXUrw9ddfa/fu3Tp16pS6detWav8rr7yiXbt2yTCMEq/MzEwNGzZMubm5zkwPAAAAAACc5FQw8P3338tisahPnz6yWCwl9p0/f17z5s2TxWJRkyZNtGTJEqWkpOjJJ5+UJGVkZOjTTz91ZnoAAAAAAOAkp4KBjIwMSSrzkoCvv/5aOTk5kqQPP/xQgwcPVvv27fXBBx+oXbt2kqSvvvrKmekBAAAAAICTnAoGfvnlF0kq80aCSUlJtn29e/cusW/48OEyDEM7d+50ZnoAAAAAAOAkp4KBs2fPXhnEo/QwP/zwgywWi+69995S+5o2bSrpv8ECAAAAAACoGk4FA9WqVZNU+gN+VlaW9uzZI0nq0aNHqX5+fn6SpMLCQmemBwAAAAAATnIqGLBarZKk9evXl9j+73//W4ZhSJJ69uxZql9mZqYkKSgoyJnpAQAAAACAk5wKBnr16iXDMLRs2TLb/QLOnTun6dOnS5IaNmyotm3bluq3e/duSVKzZs2cmR4AAAAAADjJqWDgySeflIeHh3JyctSlSxd169ZNzZs31+7du2WxWGyPJvy14sccdu7c2ZnpAQAAAACAk5wKBtq3b6/XXntNhmEoLy9PW7duVWZmpgzDULt27fTSSy+V6rNr1y7t27dPknT33Xc7Mz0AAAAAAHCSl7MD/PnPf1ZERITmzJmjtLQ0BQQEqG/fvpowYYL8/f1LtX/33XclSRaLRdHR0c5ODwAAAAAAnOB0MCBJ9913n+677z672s6ZM0dz5sxxxbQAAAAAAMBJTl1KAAAAAAAAbm0EAwAAAAAAmBjBAAAAAAAAJmZXMDB8+HAdOnSoQgvZtWuXhgwZUqFzAAAAAACAkuwKBhYvXqzbbrtNMTEx2rNnj0sL2LVrl373u9+pQ4cOWr58uUvHBgAAAAAA12ZXMNCnTx/l5+frk08+Ufv27RUVFaUFCxbozJkzNzTp6dOn9c4776hz586KiIjQF198oaKiIvXp0+eGxgMAAAAAADfGrscVrl69WosXL9aECRN08OBBrV+/XuvXr9dTTz2lO+64Q926dVPXrl112223KSQkRCEhIQoMDNS5c+d05swZnTlzRvv27dPmzZu1ZcsW7dmzR4WFhTIMQ5LUokULvfnmm3rggQcq9GABAAAAAEBJdgUDkjRs2DANGTJE8+fP11//+lcdOHBAhYWF2rVrl3bt2qW5c+faPWlxINCmTRu99NJLevzxx+Xp6el49QAAAAAAwCkOPZXA09NTTz75pPbt26dVq1ZpxIgRql69ugzDsPsVGBiokSNH6ttvv9XevXs1atQoQgEAAAAAAKqI3SsGfq1v377q27evCgoKtHHjRm3evFm7du1Senq6zpw5o9zcXPn6+io0NFRWq1Xt27dXt27d1L17d4IAAAAAAABuEjccDNgG8PLSXXfdpbvuussV9QAAAAAAgErk0KUEAAAAAADAvRAMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJuZUMDB+/HgdPHjQVbUAAAAAAIBK5lQw8Pbbb6tVq1bq3bu3vvjiCxUUFLiqLgAAAAAAUAmcvpTAMAwlJibqd7/7nRo1aqSJEyfq0KFDrqgNAAAAAABUMKeCgY8//li9evWSYRgyDEOnTp3SW2+9pVatWqlfv35asmSJCgsLXVUrAAAAAABwMaeCgZEjRyopKUk//vijnn/+eYWEhMgwDBUVFWnNmjV68MEH1bhxY/35z39WRkaGq2oGAAAAAAAu4pKnErRu3Vp/+9vf9PPPP+vjjz/WnXfeaVtFcOLECU2ZMkXNmzfXwIEDtWzZMhUVFbliWgAAAAAA4CSXPq7Q19dXI0eO1Lp167R3714999xzJVYRrFq1SkOHDlWTJk0UFxeno0ePunJ6AAAAAADgIJcGA1dr06aNZs6cqZ9//lkfffSRevbsaVtFcOzYMcXHxyssLEz333+/vv7664oqAwAAAAAAXEOFBQPFfH199eijj2rlypV69tlnJUkWi0WSVFhYqBUrVmjQoEFq06aNFi1aVNHlAAAAAACAq1R4MLBt2zY99dRTatiwof7+97/LYrHIMAxZLBa1bNnStorgwIEDGjFihEaMGMGTDAAAAAAAqCQVEgxcvHhRc+bMUadOndSlSxfNmzdPFy5ckGEYCg0N1fjx45Wamqr9+/dr586dGjNmjPz8/GQYhhYtWqTZs2c7NJ9hGFq/fr3GjRunbt26KTg4WD4+PmrQoIGGDRumxMTEGz6WTZs2afDgwapdu7b8/f11++23Kz4+Xjk5OTc8JgAAAAAANwuLYRiGqwbbtm2b5syZo88//1wXL16UdOVDuyT16NFDY8eO1fDhw+Xj41Oq78GDB9W7d29lZGQoPDxcycnJds/73XffqXfv3pIkDw8PtWjRQgEBAUpNTdWFCxckSZMmTVJ8fLxDx/PZZ5/p8ccfV2FhoRo2bKg6depo9+7dys/PV2RkpNauXatq1arZNVZ0dLSSkpIUFRWltWvXOlRHZSu+1AMA4F5ceMoHAAC3AHs/hzq9YqB4dUDnzp3VpUsXffjhh7bVAQEBARozZox27Nih9evX65FHHikzFJCk5s2b649//KOkKyGBIwzDUIsWLfTee+/p9OnT2r9/v7Zv367MzEz96U9/kiRNnjxZ//73v+0eMz09XbGxsSosLNS0adN09OhRbd++XampqWrdurW2bt2q8ePHO1QnAAAAAAA3G6eCgaeffloNGjTQ2LFjlZycbLtfQNu2bfWPf/xDx44d0+zZs9WuXTu7xmvevLkk2VYb2KtLly768ccfNXbsWNWsWdO23cfHR1OmTFH//v0lSXPnzrV7zOnTpys3N1d9+/bVuHHjbH9Fb9q0qebPny9JmjNnjk6ePOlQrQAAAAAA3EycCgbmzJljWx3g7e2thx56SOvWrdPOnTs1duxYVa9e3bFiPG6snMDAQHl5eZW7v0+fPpKkAwcO2DWeYRhasmSJJCk2NrbU/h49eqhNmzbKz8/X0qVLb6BiAAAAAABuDk5fStC0aVNNmTJFR48e1WeffaY777zzhsfq16+fioqKXP5UguIbBfr7+9vV/siRIzp+/LgkqWfPnmW2Kd6+ZcsWF1QIAAAAAEDVKP/P7HZYvny5BgwYcFPfrK74SQdS+R/yfy01NVWS5OvrqwYNGpTZJiwsrERbAAAAAABuRU4FAwMHDnRVHRVm7ty5Sk5Olo+Pj1544QW7+pw9e1aSFBwcXG7oUXwvg+K29kpJSVF0dLRDfWJiYhQTE+NQHwAAAACA+0hISFBCQoJDfVJSUuxq51QwcLPbvn27nn/+eUlXnkpQfHPD6ym+9KC8JyhIV1YTSNLly5cdqik7O1tJSUkO9XE0SAAAAAAAuJf09HSHP0vay6lg4JdfftGIESNkGIYmTJigvn37XrfPN998ozfffFOenp764osvFBQU5EwJ5Tp8+LAGDRqknJwcPfzww3rppZfs7uvn5ydJysvLK7dNbm6uJPvvW1AsKChIERERDvWxWq0OtQcAAAAAuBer1aqoqCiH+qSkpCg7O/u67ZwKBhYuXKjExETVqFFDvXr1sqtPr1699Nvf/lbnz5/XwoULNWbMGGdKKNOJEyfUp08fHT9+XAMHDlRCQoJD90EovkwgKytLhmGU2bf4EoKrH49oj4iICK1du9ahPgAAAAAAc7uRS8yjo6PtWmXg1FMJ1qxZI0nq37+/3X859/f318CBA2UYhlavXu3M9GU6c+aM+vTpo4MHDyoqKkqLFi2St7e3Q2O0bNlS0pVVAceOHSuzzaFDh0q0BQAAAADgVuRUMLBz505ZLBZ17drVoX5dunSx9XelCxcuaMCAAdq9e7ciIyO1fPlyh5f6S1KTJk1Ur149SdKGDRvKbFO83dFjBwAAAADgZuJUMHDy5ElJUsOGDR3qV79+fUnS8ePHnZm+hNzcXA0ePFhbtmzRHXfcoVWrVqlGjRo3NJbFYtHQoUMlSfPmzSu1f+PGjdq3b5+8vb11//33O1U3AAAAAABVyalgoFhhYeENtXe037XGGzFihL7//ns1b95c3377rUJCQq7bb+bMmbJarRoxYkSpfePGjZOPj4+++eYbTZ8+XYZhSJIyMjI0atQoSdLo0aNtKwsAAAAAALgVOXXzwVq1aunnn3/WwYMHHepX3N6eD+/2+Ne//qWvvvpKkuTh4aHhw4eX2a5+/fpatGiR7eusrCxlZGSUedf/Zs2aae7cuXriiSc0fvx4zZo1S3Xq1NHu3buVn5+vTp06afr06S6pHwAAAACAquJUMNC+fXv99NNPWrJkiSZNmmR3vy+//FIWi0V33HGHM9PbFD86UJJSU1OVmppaZrumTZs6NO5jjz2mFi1aaOrUqdq4caP27t2rsLAwPfTQQ3r55ZdtjzUEAAAAAOBW5dSlBL/5zW8kXXk24pw5c+zq88EHHyglJUXSlacZuEJMTIwMw7juKz09vUS/uLg4GYZxzccH9ujRQ8uXL1dmZqZycnK0b98+vfbaa4QCAAAAAAC34FQw8MQTTyg0NFSS9Oyzz+rtt99WQUFBmW0LCgo0ffp0Pffcc5KkoKAgjR492pnpAQAAAACAk5y6lCAgIEDvvfeefve736mgoEAvv/yyZsyYoQEDBuj2229X9erVdeHCBe3du1crV67UyZMnZRiGLBaL3nvvPQUGBrrqOAAAAAAAwA1wKhiQpOHDh+v06dN64YUXlJ+fr5MnT2rBggVltjUMQ15eXvrb3/5W5pMAAAAAAABA5XLJ4wrHjh2rDRs22O4ZUNb1/ZI0YMAAbdy4Ub///e9dMS0AAAAAAHCS0ysGinXu3FkrVqxQZmam1q9fr6NHj+rcuXMKDAxUo0aN1KtXL9v9CAAAAAAAwM3BZcFAsdDQUA0ePNjVwwIAAAAAgArgkksJAAAAAADArYlgAAAAAAAAE3PppQRFRUU6ePCgzp49q5ycHLv63HXXXa4sAQAAAAAAOMAlwcD69es1ffp0rVmzxu5AQJIsFosKCgpcUQIAAAAAALgBTgcD06ZN08SJE0s8lhAAAAAAANwanAoG1q1bpwkTJshiscgwDDVq1Eh33323GjZsKF9fX1fVCAAAAAAAKohTwcCsWbNs/542bZr++Mc/ymKxOF0UAAAAAACoHE4FA5s2bZLFYtEDDzygl156yVU1AQAAAACASuLU4wrPnDkjSRo4cKBLigEAAAAAAJXLqWCgVq1akqSAgACXFAMAAAAAACqXU8FARESEJOngwYOuqAUAAAAAAFQyp4KB2NhYGYahzz//3FX1AAAAAACASuRUMDB06FANGzZMu3bt0rhx41xVEwAAAAAAqCROPZVAkj799FP5+vpqxowZ2rZtm1544QX16NHDdv8BAAAAAABw83IqGPD09LT92zAMJSUlKSkpye7+FotFBQUFzpQAAAAAAACc4FQwYBjGNb8GAAAAAAA3N6eCgbvuuksWi8VVtQAAAAAAgErmVDCwdu1aF5UBAAAAAACqglNPJQAAAAAAALc2ggEAAAAAAEyMYAAAAAAAABNz6h4DVysqKtLixYu1evVq7d27V2fOnFF+fr4OHjxYot3u3bt17tw5BQUF6Y477nDV9AAAAAAA4Aa4JBjYsGGDHn30UWVkZNi2GYZR5hMLlixZori4OAUGBur48ePy8/NzRQkAAAAAAOAGOH0pwTfffKN77rlHGRkZMgxDnp6eCgoKKrf9mDFjZLFYdO7cOa1cudLZ6QEAAAAAgBOcCgaysrL00EMPKT8/XzVq1NDcuXOVlZWlBQsWlNunTp066tmzpyTpu+++c2Z6AAAAAADgJKeCgX/84x86e/asvL29tXr1asXGxqpatWrX7detWzcZhqHt27c7Mz0AAAAAAHCSU8HAypUrZbFY9OCDD6pr165292vVqpUk6dChQ85MDwAAAAAAnORUMHDgwAFJ0j333ONQv+J7EGRnZzszPQAAAAAAcJJTwcC5c+ckSTVr1nSoX25uriTJy8tlT0sEAAAAAAA3wKlgICQkRJJ0+vRph/qlpaVJkmrVquXM9AAAAAAAwElOBQMtWrSQJK1fv96hfsuWLZPFYlFERIQz0wMAAAAAACc5FQz07dtXhmFo8eLFOnLkiF19/vnPfyo5OVmS1K9fP2emBwAAAAAATnIqGHjqqadUrVo15eTkaMiQITpx4sQ12//zn//U6NGjJV25DOHxxx93ZnoAAAAAAOAkp+7+V7duXU2dOlXPP/+8duzYoTZt2uiRRx6Rp6enrc3y5cu1e/duffnll9q+fbsMw5DFYtE777yjatWqOX0AAAAAAADgxjn9WIBnn31WJ0+e1NSpU3Xu3Dm9//77kiSLxSJJGjJkiK1tcSgwefJkPfTQQ85ODQAAAAAAnOTUpQTFJk+erBUrVqhDhw4yDKPcV7t27bRy5Ur96U9/csW0AAAAAADASU6vGCj2m9/8Rr/5zW+0e/durVu3Tunp6crKylL16tXVqFEjRUdHq2PHjq6aDgAAAAAAuIDLgoFibdu2Vdu2bV09LAAAAAAAqAAuuZQAAAAAAADcmpwKBjw8POTl5aVly5Y51G/16tXy9PSUl5fLFywAAAAAAAAHOP3J3DCMSu0HAAAAAABch0sJAAAAAAAwsSoJBi5duiRJ8vPzq4rpAQAAAADA/1clwcDmzZslSXXq1KmK6QEAAAAAwP9n9z0Gdu7cqZSUlDL3ff/998rKyrpmf8MwdPHiRW3fvl2ffvqpLBaLIiMjHakVAAAAAAC4mN3BwJIlS/T666+X2m4Yht59912HJjUMQxaLRU8//bRD/QAAAAAAgGs5dCmBYRglXuVtv96rbt26mjt3ru655x6XHxAAAAAAALCf3SsGhgwZIqvVWmLbE088IYvFov/5n/9Rx44dr9nfw8ND1atXV7NmzdSuXTt5enreUMEAAAAAAMB17A4GwsPDFR4eXmLbE088IUm69957df/997u2MgAAAAAAUOHsDgbKsmDBAkm67moBAAAAAABwc3IqGHj88cddVQcAAAAAAKgCDt18EAAAAAAAuBeCAQAAAAAATMypSwmutnHjRn300UfavHmzfvrpJ507d05FRUXX7GOxWFRQUOCqEgAAAAAAgIOcDgYuXbqkUaNGadGiRZIkwzCcLgoAAAAAAFQOp4OBRx55RMuWLZNhGAoICFC7du20efNmWSwW3X777fL391d6erpOnz4t6coqgU6dOikgIMDp4gEAAAAAgHOcusfAmjVrtHTpUknS0KFDdezYMW3cuNG2/4033tB//vMfnTp1Sps3b1a/fv1kGIZyc3OVkJCgxMRE56oHAAAAAABOcSoY+PjjjyVJ9evX12effaYaNWqU27ZLly76+uuv9fzzz2vXrl0aMmSI8vLynJm+hMOHD2vu3Ll68sknFR4eLi8vL1ksFk2ePPmGxouLi5PFYrnma9++fS6rHwAAAACAquDUpQTFlwz87ne/k5+fX6n9Zd1v4K9//atWr16tnTt3av78+Xr66aedKcFm1qxZmjVrlkvGulrjxo3VpEmTMvdVq1bN5fMBAAAAAFCZnAoGTpw4IUlq3759ie0Wi0WSlJubW6qPh4eHRo4cqUmTJulf//qXy4KBWrVqadCgQerSpYsiIyP14YcfavHixU6PO2rUKMXFxTlfIAAAAAAANyGngoGcnBxJUmBgYIntAQEBunjxos6ePVtmvxYtWkiS9u/f78z0JUyaNKnE1wsXLnTZ2AAAAAAAuCun7jEQHBws6cojC68WGhoqSUpLSyuzX3FgkJmZ6cz0AAAAAADASU6tGGjZsqUyMzOVkZFRYnvbtm2VkZGhNWvWlNkvKSlJUumVBjejxMRE7dmzR5mZmQoJCVGXLl302GOPqV69elVdGgAAAAAATnNqxUDnzp1lGIaSk5NLbP/Nb34jSdq5c6c++OCDEvu+/PJL/fOf/5TFYlHnzp2dmb5SrFu3Tl988YUSExO1ePFivfzyywoLC1NCQkJVlwYAAAAAgNOcWjFw77336t1339X333+vwsJCeXp6SpIeeeQRxcXF6cyZM3rmmWc0b948tWjRQmlpadq2bZsMw5DFYtFTTz3lkoOoCPXr19fEiRM1dOhQhYWFyd/fX8nJyZo8ebK+/vprjRo1SqGhobrvvvscGjclJUXR0dEO9YmJiVFMTIxDfQAAAAAA7iMhIcHhP1CnpKTY1c6pYKBfv36yWq26fPmy1qxZo379+km6cu+BDz/8UMOHD1dBQYG2bdumbdu2SfrvIwxHjRqlIUOGODN9hRozZkypbT169NCKFSs0bNgwLVmyRC+++KIGDRpkewqDPbKzs22XUtjL0SABAAAAAOBe0tPTHf4saS+nggFfX18dOnSozH2DBw9WUlKSXn31VSUlJamgoECS1KpVK73wwgsue0xhZbNYLHrzzTe1ZMkSHTx4UDt37lR4eLjd/YOCghQREeHQnFar1bEiAQAAAABuxWq1KioqyqE+KSkpys7Ovm47p4KB6+nevbu+/fZbFRQU6PTp0woICFCNGjUqcspK0apVK4WEhOjMmTNKS0tzKBiIiIjQ2rVrK644AAAAAIDbuZFLzKOjo+1aZVChwYBtEi8vt7uLv7e3tyTZVkIAAAAAAHArcuqpBDdqxowZCgsLU/PmzatieqedPn1ap06dkiQ1atSoiqsBAAAAAODGVcqKgV87e/as0tPTHbpp381kxowZMgxDQUFBioyMrOpyAAAAAAC4YVWyYuBmMXPmTFmtVo0YMaLE9j179uiZZ57Rnj17SmzPycnRlClT9NZbb0mSXn75Zfn4+FRavQAAAAAAuFqVrBioCBs2bNDgwYNtX1+4cEGSNHXqVM2cOdO2PTk5WY0bN5YkZWVlKSMjo9Rd//Pz8zV79mzNnj1btWvXVpMmTSRJP/74oy5duiRJio2N1YQJEyrwiAAAAAAAqHhuEwzk5+crMzOz1PZLly7ZPsxLUmFh4XXHslqtio+P18aNG7Vv3z7t379feXl5qlOnjgYMGKDRo0erX79+Lq0fAAAAAICq4DbBQHR0tAzDcKhPXFyc4uLiSm0PDg7WpEmTXFQZAAAAAAA3L1PfYwAAAAAAALMjGAAAAAAAwMQIBgAAAAAAMDG3uccAAADAzei1pvOqugQAQAX4S0ZsVZfgMqwYAAAAAADAxOxeMeDp6VmRdQAAAAAAgCpgdzBgGIYsFovDjwQsi8VicXoMAAAAAADgPIcuJXBFKODKcQAAAAAAgHPsXjFQVFRUkXUAAAAAAIAqwM0HAQAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEzMbYKBw4cPa+7cuXryyScVHh4uLy8vWSwWTZ482alxN23apMGDB6t27dry9/fX7bffrvj4eOXk5LiocgAAAAAAqo5XVRfgKrNmzdKsWbNcOuZnn32mxx9/XIWFhWrYsKEaN26s3bt369VXX9Xy5cu1du1aVatWzaVzAgAAAABQmdxmxUCtWrU0aNAgvf766/r66681bNgwp8ZLT09XbGysCgsLNW3aNB09elTbt29XamqqWrdura1bt2r8+PEuqh4AAAAAgKrhNisGJk2aVOLrhQsXOjXe9OnTlZubq759+2rcuHG27U2bNtX8+fPVs2dPzZkzR3/+859Vt25dp+YCAAAAAKCquM2KAVcyDENLliyRJMXGxpba36NHD7Vp00b5+flaunRpZZcHAAAAAIDLEAyU4ciRIzp+/LgkqWfPnmW2Kd6+ZcuWSqsLAAAAAABXIxgoQ2pqqiTJ19dXDRo0KLNNWFhYibYAAAAAANyK3OYeA6509uxZSVJwcLAsFkuZbWrWrFmirb1SUlIUHR3tUJ+YmBjFxMQ41AcAAAAA4D4SEhKUkJDgUJ+UlBS72hEMlCEnJ0eS5OPjU24bX19fSdLly5cdGjs7O1tJSUkO9XE0SAAAAAAAuJf09HSHP0vai2CgDH5+fpKkvLy8ctvk5uZKkvz9/R0aOygoSBEREQ71sVqtDrUHAAAAALgXq9WqqKgoh/qkpKQoOzv7uu0IBspQfJlAVlaWDMMo83KC4ksIitvaKyIiQmvXrnW6RgAAAACAedzIJebR0dF2rTLg5oNlaNmypaQrqwKOHTtWZptDhw6VaAsAAAAAwK2IYKAMTZo0Ub169SRJGzZsKLNN8fauXbtWWl0AAAAAALgawUAZLBaLhg4dKkmaN29eqf0bN27Uvn375O3trfvvv7+yywMAAAAAwGVMHQzMnDlTVqtVI0aMKLVv3Lhx8vHx0TfffKPp06fLMAxJUkZGhkaNGiVJGj16tG1lAQAAAAAAtyK3CQY2bNigWrVq2V4LFy6UJE2dOrXE9qNHj9r6ZGVlKSMjQydOnCg1XrNmzTR37lx5eHho/Pjxaty4sTp27KiWLVtq//796tSpk6ZPn15pxwcAAAAAQEVwm6cS5OfnKzMzs9T2S5cu6dKlS7avCwsL7R7zscceU4sWLTR16lRt3LhRe/fuVVhYmB566CG9/PLLtscaAgAAAABwq3KbYCA6Otq23N9ecXFxiouLu2abHj16aPny5U5UBgAAAADAzcttLiUAAAAAAACOIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATIxgAAAAAAAAEyMYAAAAAADAxAgGAAAAAAAwMYIBAAAAAABMjGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxtwsGVq5cqd69eyskJEQBAQHq2LGj3n33XRUVFTk0TlxcnCwWyzVf+/btq6CjAAAAAACgcnhVdQGu9Oabb+pPf/qTJCksLEzVq1fXjh079Nxzz2nNmjVasmSJPDwcy0IaN26sJk2alLmvWrVqTtcMAAAAAEBVcptgYNOmTZo4caI8PDz06aef6qGHHpIk7dixQ/369dOyZcs0Y8YMvfTSSw6NO2rUKMXFxVVAxQAAAAAAVD23uZRg8uTJMgxDo0ePtoUCkhQeHq4ZM2ZIurKiID8/v6pKBAAAAADgpuMWwcC5c+e0Zs0aSVJsbGyp/cOHD1dgYKAyMzOVmJhY2eUBAAAAAHDTcotgIDk5WXl5efLz81PHjh1L7ff29lZkZKQkacuWLQ6NnZiYqOHDh+uee+7Rgw8+qGnTpunEiRMuqRsAAAAAgKrmFvcYSE1NlSQ1adJEXl5lH1JYWJi+++47W1t7rVu3rsTXixcvVlxcnN577z3FxMTcUL0AAAAAANws3CIYOHv2rCSpZs2a5bYp3lfc9nrq16+viRMnaujQoQoLC5O/v7+Sk5M1efJkff311xo1apRCQ0N13333OVRrSkqKoqOjHeoTExNDCAEAAAAAJpaQkKCEhASH+qSkpNjVzi2CgZycHEmSj49PuW18fX0lSZcvX7ZrzDFjxpTa1qNHD61YsULDhg3TkiVL9OKLL2rQoEGyWCx215qdna2kpCS720tyOEgAAAAAALiX9PR0hz9L2sstggE/Pz9JUl5eXrltcnNzJUn+/v5OzWWxWPTmm29qyZIlOnjwoHbu3Knw8HC7+wcFBSkiIsKhOa1Wq2NFAgAAAADcitVqVVRUlEN9UlJSlJ2dfd12bhEM2HOZgD2XG9irVatWCgkJ0ZkzZ5SWluZQMBAREaG1a9c6XQMAAAAAwDxu5BLz6Ohou1YZuMVTCVq2bClJOnLkiAoKCspsc+jQoRJtneXt7S1J5c4HAAAAAMCtwC2CgQ4dOsjb21s5OTnavn17qf35+fnaunWrJKlr165Oz3f69GmdOnVKktSoUSOnxwMAAAAAoKq4RTAQGBio3r17S5LmzZtXav+iRYt07tw5hYaGuuRGfjNmzJBhGAoKClJkZKTT4wEAAAAAUFXcIhiQpFdeeUUWi0UffvihPv/8c9v2HTt26A9/+IMkafz48SWeXDBz5kxZrVaNGDGixFh79uzRM888oz179pTYnpOToylTpuitt96SJL388svXfBICAAAAAAA3O7cJBnr27Kn4+HgVFRXp4YcfVvPmzRUeHq6OHTvq5MmTGjhwoP74xz+W6JOVlaWMjAydOHGixPb8/HzNnj1bbdu2VZ06ddS5c2d17txZoaGheuWVV1RUVKTY2FhNmDChMg8RAAAAAACXc5tgQLqyamD58uW65557lJmZqbS0NLVr104zZ87U0qVL5enpadc4VqtV8fHx6t+/v6pXr679+/dr165dCgkJ0YMPPqhVq1bpww8/lMViqeAjAgAAAACgYrnF4wqvNmjQIA0aNMiutnFxcYqLiyu1PTg4WJMmTXJxZQAAAAAA3HzcasUAAAAAAABwDMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmBjBAAAAAAAAJkYwAAAAAACAiREMAAAAAABgYgQDAAAAAACYGMEAAAAAAAAmRjAAAAAAAICJEQwAAAAAAGBiBAMAAAAAAJgYwQAAAAAAACZGMAAAAAAAgIkRDAAAAAAAYGIEAwAAAAAAmJhXVRcAAAAA4NaXcmGDsgpOK9irliKq96zqcgA4gBUDAAAAAJy24+IGrTu3XDsubqjqUgA4iGAAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEzM7YKBlStXqnfv3goJCVFAQIA6duyod999V0VFRTc03qZNmzR48GDVrl1b/v7+uv322xUfH6+cnBwXVw4AAAAAQOVzq2DgzTff1MCBA/Xdd9+pZs2aatGihXbs2KHnnntOQ4cOdTgc+Oyzz9SrVy8tW7ZMvr6+uu2225SWlqZXX31Vd911ly5dulRBRwIAAAAAQOVwm2Bg06ZNmjhxojw8PPS///u/OnjwoHbs2KHt27erbt26WrZsmWbMmGH3eOnp6YqNjVVhYaGmTZumo0ePavv27UpNTVXr1q21detWjR8/vgKPCAAAAACAiuc2wcDkyZNlGIZGjx6thx56yLY9PDzcFgi8+eabys/Pt2u86dOnKzc3V3379tW4ceNksVgkSU2bNtX8+fMlSXPmzNHJkyddfCQAAAAAAFQetwgGzp07pzVr1kiSYmNjS+0fPny4AgMDlZmZqcTExOuOZxiGlixZUu54PXr0UJs2bZSfn6+lS5c6WT0AAAAAAFXHLYKB5ORk5eXlyc/PTx07diy139vbW5GRkZKkLVu2XHe8I0eO6Pjx45Kknj17ltmmeLs94wEAAAAAcLNyi2AgNTVVktSkSRN5eXmV2SYsLKxEW3vG8/X1VYMGDZweDwAAAACAm1XZn6JvMWfPnpUk1axZs9w2xfuK29ozXnBwsO3eAs6MJ0lpaWmSpPXr1ys4ONiuPsXq1aunevXqOdQHAIBfi46OruoSTCn95PGqLgGoFCfyjtr++9HJaVVcDVDxkqI/qdT5Tpw4oRMnTjjU58KFC5L++3m0PG4RDOTk5EiSfHx8ym3j6+srSbp8+XKljyf99wdSWFio7Oxsu/oUy87O1v79+x3qAwDAryUlJVV1CQBMINe4rIzcA1VdBlDhMpJunfd58efR8rhFMODn5ydJysvLK7dNbm6uJMnf37/Sx5OkZs2aaf/+/SosLLS7TzFWDAAAAACAud3IioHLly/L09NTzZo1u2Y7twgG7FnWb8/lBr8eLysrS4ZhlHk5gSPjSVdukAgAAAAAwM3GLW4+2LJlS0lXniZQUFBQZptDhw6VaGvPeLm5uTp27JjT4wEAAAAAcLNyi2CgQ4cO8vb2Vk5OjrZv315qf35+vrZu3SpJ6tq163XHa9KkiW3p/oYNG8psU7zdnvEAAAAAALhZuUUwEBgYqN69e0uS5s2bV2r/okWLdO7cOYWGhtp1R2aLxaKhQ4eWO97GjRu1b98+eXt76/7773eueAAAAAAAqpBbBAOS9Morr8hisejDDz/U559/btu+Y8cO/eEPf5AkjR8/vsSTBmbOnCmr1aoRI0aUGm/cuHHy8fHRN998o+nTp8swDElSRkaGRo0aJUkaPXo0NwUEAAAAANzS3CYY6Nmzp+Lj41VUVKSHH35YzZs3V3h4uDp27KiTJ09q4MCB+uMf/1iiT1ZWljIyMsq8s2OzZs00d+5ceXh4aPz48WrcuLE6duyoli1bav/+/erUqZOmT59eWYcHAAAAAECFcJtgQLqyamD58uW65557lJmZqbS0NLVr104zZ87U0qVL5enp6dB4jz32mH744QcNGjRIly9f1t69exUWFqa4uDitX79eAQEBFXQkAAAAAABUDotRvEYeAHBLiYuL01/+8he99tpriouLq+pyAAC4YZzTgKrlVisGAFStr776SnFxcUpJSbnhMRITE/Xcc8+pe/fuatiwoXx9fVWjRg116tRJ8fHxOn/+vMNjJiQkyGKxXPO1atUqh8dNSUlRXFycvvrqK4f7AgBubpzTIElFRUV655131KFDBwUEBCgkJES9e/fW119/fd2+OTk5evvtt9WlSxfVrFlT1apVU1hYmB566CGtW7euVPvDhw9r7ty5evLJJxUeHi4vLy9ZLBZNnjz5mvNUxPsM5uNV1QUAcB9fffWVPvroI1mtVkVERNzQGPPmzdNnn30mLy8vNWjQQO3bt9cvv/yi5ORkbd++XQsWLNDatWvVpEkTh8euU6eOWrZsWea+mjVrOjxeSkqK/vKXv+jxxx/XkCFDHO4PALh5cU5DYWGhBg8erBUrVsjDw0Nt27bV+fPn9d133+m7777T9OnT9dJLL5XZNyMjQ3379tWBAwfk5eWl1q1by9fXVz/99JMWLlyohg0b6q677irRZ9asWZo1a5bDdVbk+wzmQTAA4KYydOhQjRw5UlFRUfL397dt37t3rx566CHt3LlTY8eO1YoVKxweu3///kpISHBhtQAAlI9z2q1t+vTpWrFiherWravVq1crPDxckvS///u/evTRRzV+/HhFRUUpMjKyRL+LFy+qd+/eSktL09ixYzV58mSFhITY9qempiorK6vUfLVq1dKgQYPUpUsXRUZG6sMPP9TixYuvW2dFvs9gHgQDAG4qw4YNK3P77bffrg8//FBdunTR6tWrlZOTIz8/v0quDgAA+3FOu3Xl5eVp2rRpkqS//e1vtlBAkh5++GGtXbtWc+fO1eTJk7V06dISfePj45WWlqbHH39c7733Xqmxy1vpMWnSpBJfL1y40K5aeZ/BFbjHAGACBw8e1EMPPaTatWurWrVqioiI0Pvvvy9JslqtslgsSk9Pt7W/etvq1asVHR2toKAgBQYGqk+fPvrhhx9KjJ+eni6LxaKPPvpIkvTEE0+UuNbRVTcRatOmjaQrS/tyc3NdMuaNslqteuKJJyRJH330UYnjjY6OLtHu19/fq0VHR8tisWjt2rXlbk9JSdGDDz6ounXrysPDo8y/EJ04cUKxsbFq0KCB/Pz8dNttt+ntt99WQUFBucewceNGPfDAA6pbt658fHzUqFEjPfbYY/rxxx8d/XYAQKXhnOZ6nNNKS0xM1NmzZxUYGKgHH3yw1P7Y2FhJ0urVq0tcw5+Tk6MPPvhAHh4eev31129oble6md5nuLmxYgBwczt37lRUVJSysrLk7++v22+/XadPn9bYsWOve7JcuHChJk6cqJo1a6pVq1Y6fPiw1qxZo++//14LFy7U8OHDJUl+fn7q2bOnUlNTderUKbVs2VJ16tSxjeOqa9o2bdokSQoLC1NQUJDD/Xfs2KGHH35YJ06cUGBgoDp06KCRI0eqefPmDo8VGRkpHx8fpaamlrrOs127dg6PV55169ZpypQp8vb2VuvWrVW9evVSbTIzM9WlSxcdO3ZM7dq1U40aNbRv3z6NGzdOGzZs0OLFi+XhUTIHnj17tn7/+9/LMAzVqVNH4eHhSktL0yeffKJFixbpiy++0MCBA112HADgCpzT/otz2n9VxDlt8+bNkqQuXbrI29u71P5OnTrJz89POTk5SklJUa9evSRJP/zwg7KyshQREaFGjRrpk08+0fLly3XmzBk1aNBAAwYM0G9/+9tSx1BRnH2fwUQMAG6rsLDQaNeunSHJ6N+/v3HmzBnbvi+++MLw9fU1vL29DUnG4cOHbfuaNm1qSDK8vLyMP/zhD0ZeXp5hGIaRn59vjB8/3pBkBAYGGseOHSsx3+OPP25IMhYsWOCyYygqKjKOHz9ufPrpp0b9+vUNLy8v46uvvnJojAULFhiSynx5enoakydPvqHaisd9/PHHy21T/L28+vt7taioKEOSkZiYWOZ2T09P46mnnjIuXrxo23fp0iXDMAzjtddes/2c2rVrV2KOpKQkIygoyJBk/P3vfy8xdnJysuHl5WVIMqZNm2YUFhYahmEYOTk5xjPPPGNIMoKCgkr9fAGgKnFOu4JzWuWc0x555BFDkvHUU0+V26Zly5aGJGPevHm2bVOnTjUkGcOGDTPuvvvuMn9OPXv2LPH+LU/xezA+Pt6h2l3xPoP5cCkB4Ma+/fZb7dq1S6Ghofr8889L3KV42LBhmjBhgvLz88vtf8cdd+ivf/2rLSn38vLSW2+9pY4dO+rcuXO2pZsV4auvvpLFYpGHh4fq16+vkSNHqlWrVlq7dq0GDx7s0FjBwcF69tlntWHDBp08eVI5OTlKTk7Wo48+qsLCQk2aNEl///vfK+hInNO2bVvNnj1b1apVs227+sZCklRQUKCEhARZrVbbtrvuukvx8fGSpLfffluGYdj2FS/HHDx4sMaNG2f7q4Wvr6/+/ve/64477lB2drZmz55dgUcGAI7hnHYF57TKOaedPXtW0rWf8FC8r7itJB0/flyStGzZMiUmJuqVV17RiRMndOnSJS1evFi1atXShg0bbJciuJIr32cwH4IBwI19++23kqQHHnigzOVjxdcTlueZZ5655vbVq1c7WWH5QkND1bNnT3Xr1k0NGzaUxWLRf/7zH3388ce6fPmyQ2MNGTJE77zzjnr06KE6derI19dXERER+vjjj/XCCy9IunLDn5vxOb8jR4687nLD7t27q2PHjqW2jxo1Sn5+fkpPT9f+/ftt27/55htJ0rPPPluqj8Vi0XPPPVeiHQDcDDinXcE5rXLOaTk5OZIkHx+fctv4+vpKUomf4cWLFyVJ+fn5evTRRzV58mTVrVtX/v7+euCBBzRv3jxJ0pIlS7Rz506HaroeV77PYD4EA4AbS01NlSS1b9++zP1NmzZVYGBguf1vu+22a24/cOCAkxWWr1evXlq/fr02bdqkn376SXv27FG3bt00Z84cPfDAAy6b5y9/+Yt8fX2VnZ2t77//3mXjukp5PwN72gQEBKhx48aS/vuzysrK0i+//CLpyt2Ky3LHHXeU6AMANwPOadfHOa20Gz2nFd+9Py8vr9w2xTfzu3rVw9V3/X/++edL9bn//vtt94FwdRhVWe8zuCduPgi4seLUukaNGuW2qVGjhs6dO1fmvqtvtnS1unXrSpLDf40YPny4bYnd1davX3/dvrfddpuWL1+u5s2ba9WqVVq/fr3uvPNOh+YvS2BgoO644w5t375daWlptu3z58/X/PnzS7V/5ZVX1L9/f6fntVdAQMB125T3c5Ku/KxSU1NtP6sLFy5ct9+N/nwBoCJxTrs+zmll95FK/nyTk5PLXGEwYMAATZw4UVLZlwn8WlmXG1z97+InAvxamzZtdPDgwXKf7uAqFfU+g3siGADcWPEJ+OoT569d6xehX375Ra1bty61/dSpU5Ku/ctZWbZu3aqMjAyH+lwtICBA0dHR+uc//6nt27e77ORWfL3p1Y9BOnLkiDZs2FCq7cmTJx0a22KxSFKJ6yGvVvyLrjOK/1pSll//rK6+A/SpU6dUv379Un2Kj9HRny8AVCTOafbhnFZSWee07OzsMr8fLVq0sP27+MkMhw4dKrOWgoICHTlypERbSbb3mMViKfNpBtJ/L0EoLCwsc78rVdT7DO6HSwkAN9aqVStJKvcatiNHjpT7lxVJ5T76qXh78fjFin9hKE96eroMwyj1ckTxLzrXepaxIwoLC23XKjZq1Mi2PS4ursxaY2JibG2ud7zSf3+RLe8XnYMHDzpR/RXl/ZwuXbpk+6Wl+GcVHBys2rVrS5L27t1bZr89e/aU6AMANwPOadfHOa20ss5p0dHRZX4/EhISbG26du0qSfrPf/5T5k0tt23bptzcXPn4+CgiIsK2vXv37pKuhCflrQgoDhsaNmxY5n5Xc/X7DO6JYABwY3369JEkffnll2X+FeXqE2BZ3nvvvWtu79u3b4ntxdfYVdQNbrKzs5WYmChJJU7Czpg3b56ysrLk6emp6Ohoh/rac7xhYWGSrvxl6dcWL158zSWK9tq4caNSUlJKbZ8/f75ycnLUtGnTEn8l69evnyTp3XffLdXHMAzb9uJ2AHAz4Jx2fZzTSnLmnHb33XerZs2aOnfunL744otS+4tvItivX78SqxGaN29uu3niRx99VKpfSkqKduzYIUm65557HKrpRlTE+wxuquKfiAigqhQWFhrt27c3JBmDBg0yzp49a9u3ZMkSw8/P77rPfB43blyJZz7/6U9/MiQZNWrUMH7++ecS802fPt2QZIwYMcIoKipyuN6ff/7ZeP75543du3eX2rdp0yajW7duhiSjXbt2RkFBQYn9ixYtMpo2bWr07NmzxPbs7GxjxIgRxpYtW0psLygoMObMmWP4+fld9znF5dm6dashyWjWrFmJZzJf7b333jMkGU2aNDEOHDhg2/6f//zHaNCgge37X94zn3+9/WpXP/M5PDzcSE9Pt+374YcfjJo1axqSjHfffbdEv6uf+fz222/bnvmcm5trPPvss7ZnPh8/ftzB7wgAVBzOaZzTKvuc9sYbbxiSjHr16hkpKSm27Z999pnh4eFhWCwWY/PmzaX6LV++3JBkVKtWzVi1apVt+08//WR07NjRkGTceeed153/8ccfNyQZ8fHx5bZx5n0GXI1gAHBzO3bsMIKDg20nqM6dOxtWq9WQZDz77LO2X5iOHDli61O8berUqYbFYjFCQ0ONyMhIo1atWoYkw8PDw/j8889LzZWWlmb4+PgYkoymTZsavXr1MqKioowFCxbYVevhw4cNSYYkIyQkxOjYsaPRoUMH27ySjObNmxtpaWml+i5YsMA279XOnj1r6xscHGx06NDBiIyMtH1PJBn9+/c3Ll++7ND31TCu/JLasmVLQ5IRGhpqdO/e3YiKijKef/55W5vLly8bd9xxh+2XnbZt2xqtWrWy/bJZ3i9LjvwS9fvf/95o3Lix4eXlZURERBitW7e2Hdt9991n+yXpau+9955hsVgMSUbdunVLfE98fX2Nf//73w5/PwCgonFO45xWmee0/Px84ze/+Y3tfdK+fXsjLCzMVs/UqVPL7VscOkkyWrZsaXTs2NEWnISFhRkZGRml+qxfv94IDQ21vXx9fW3v9au3X/3+duZ9BlyNYAAwgbS0NGPEiBFGaGio4efnZ7Rr1874+9//bhiGYTtxXP2Xl+Jfog4fPmysWrXKuOuuu4waNWoY1atXN+655x4jKSmp3LlWr15tREVFGYGBgbaT9GuvvWZXnZcvXzY++OAD47e//a3RqlUrIygoyPDy8jJq165t3HPPPcY777xjXLp0qcy+5f0SlZeXZ0ybNs0YMmSI0aJFCyMwMNDw9vY26tWrZwwcOND45z//eUN/CSp24MAB48EHHzTq1KljeHp6GpKMqKioEm1OnjxpxMbGGnXq1DF8fX2NNm3a2P6q4Ypfol577TXj+PHjxqhRo4z69esbPj4+RuvWrY233nrLyM/PL7f/+vXrjSFDhhi1a9c2vL29jQYNGhgjR4409uzZc8PfDwCoaJzTOKeVpaLOaQUFBcbMmTON8PBwo1q1akZQUJBxzz332BU2LF261Lj33nuN4OBgw9fX12jVqpUxYcIE48yZM2W2T0xMtH2Yv9br6hUxzrzPgKtZDMPBu6QAcBuZmZmqVauWgoODS1wXaLValZGRocOHD8tqtVZdgQAA2IlzGgDcOG4+CJjYggULJEk9evSo4koAAHAO5zQAuHEEA4Cb27Vrl+bMmVPiuc+GYejTTz/Vn//8Z0nS008/XVXlAQBgN85pAFAxvKq6AAAVKzMzU2PGjNEzzzyjpk2bKjQ0VIcOHVJmZqYkacyYMbrvvvuquEoAAK6PcxoAVAxWDABu7vbbb9f48ePVrl07ZWdnKzk5WYZh6N5779XChQv1/vvvV3WJAADYhXMaAFQMbj4IAAAAAICJsWIAAAAAAAATIxgAAAAAAMDECAYAAAAAADAxggEAAAAAAEyMYAAAAAAAABMjGAAAAAAAwMQIBgAAAAAAMDGCAQAAAAAATOz/AaagJzbDfX3aAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAABAcAAAKsCAYAAACQz9RVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABUsklEQVR4nO39fdzX8/0//t+OSkVHJxJCUovJ1DpRRKbIhmZE9KbtPZlzw+yU2dt3Yxu9jVk0ZoZ4b4ZYTiZyWpNzKicb5ixiQtE5UT1/f+zX8dEqjqPj6Ox4Xq+Xy3FxHM/n4+T+fB1Hnq/jdjyfj2dFURRFAAAAgNJqsLYLAAAAANYu4QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeHAWvLCCy/k4osvzrBhw9K1a9c0atQoFRUV+cUvfrG2S1up8ePHp6Ki4lM/fve7363tMgEAAKihRmu7gLK69NJLM2LEiLVdxirZfPPNs++++65w3/bbb7+GqwEAAKC2hANrSZcuXfKDH/wgPXr0SM+ePXPOOefk//7v/9Z2WdXSuXPnjBo1am2XAQAAQB0RDqwlRx999DJfN2jgDg8AAADWDr+RrmcWLVqUP/zhD+nfv39at26dJk2apGPHjjnhhBMybdq0tV0eAAAA6yFXDqxH5s6dmwMOOCDjx49PZWVldtppp2y66aZ55pln8rvf/S6jR4/O3XffnR49eqzWOt5+++2cffbZefPNN9O0adN07tw5X/3qV9O+ffvVOi8AAACrh3BgPXL88cdn/Pjx2X///XPFFVdks802q9r3m9/8Jt/97nfzX//1X3nuuefSsGHD1VbH888/n5/+9KfLbGvUqFFOPvnknHfeeWnUyI8VAADA+sRtBeuJ5557Ln/+85+z5ZZb5tprr10mGEiSU089NQMHDsyLL76YO+64Y7XU0LJly5x66qmZMGFC3nrrrcyfPz9PP/10vvvd76aioiIXXnhhTjzxxNUyNwAAAKuPP/GuJ8aOHZuiKLLffvulefPmK2zTv3//jB07Ng899FD233//ZbZPnz69xnM+/fTTady4cdXXPXr0WO6Wha5du+bXv/51dt999wwePDiXX355TjzxxHTv3r3G8wEAALB2CAfWE6+88kqS5IorrsgVV1zxqW3ffffdZb5+6aWX8uabb9Z4ziVLllS77cEHH5zu3btnypQpue2224QDAAAA6xHhwHpi6S/q3bt3T7du3T617S677LLM12+88cZqq+uTdthhh0yZMmWNzQcAAEDdEA6sJ7beeuskSd++fTNy5Mi1XM2KzZw5M0lWetsDAAAA6yYLEq4n9ttvvyTJrbfemg8//HAtV7O8N998Mw888ECSZOedd17L1QAAAFATwoH1RI8ePTJ48OBMmzYtBx98cKZOnbpcm/nz5+dPf/pT3n777dVSw4gRIzJjxozltj/99NP52te+lg8++CCdOnXKgQceuFrmBwAAYPWoKIqiWNtFlNGkSZOWeezfyy+/nBkzZqRdu3bZaqutqraPGTMmW2yxRZJk7ty5Oeigg3LvvfemcePG6datWzp27JiiKDJ16tQ89dRT+eijj/Lcc8+lc+fOdV5zq1atMm/evHTv3j0dO3ZMgwYN8vLLL2fy5MlZsmRJ2rdvnzvvvDM77LBDnc8NAADA6iMcWEvGjx+fPffc8zPbvfrqq+nQoUPV10uWLMn111+fP/7xj3nyySfz3nvvpUWLFtliiy3Su3fvHHDAAfnqV7+aDTbYoM5r/tWvfpUHH3wwf//73/Puu+9m/vz5adGiRb7whS/kwAMPzHHHHWe9AQAAgPWQcAAAAABKzpoDAAAAUHLCAQAAACi5Rmu7gLLo0aNHXn311VRWVmbbbbdd2+UAAABQz7300kuZN29eOnbsmMmTJ39qW2sOrCGtWrXK7Nmz13YZAAAAlEzLli0za9asT23jyoE1pLKyMrNnz07Lli3TvXv3tV0OkGTKlCn+XQJAHXFehXXP0n+XlZWVn9lWOLCGbLvttnnzzTfTvXv3jB8/fm2XAyTp379/JkyY4N8lANQB51VY9yz9d1mdW9stSAgAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJNVrbBQCsLcOGDUv//v3ToUOHtV0KAKz3nFdh/SYcAEpr2LBha7sEAKg3nFdh/ea2AgAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkmu0tgtg3VNRUbG2SwBgNSiKYm2XAACso1w5AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHL1Jhx44YUXcvHFF2fYsGHp2rVrGjVqlIqKivziF7+o1bj33HNPBg4cmDZt2mTDDTdM586d85Of/CTz5s2ro8oBAABg7Wq0tguoK5deemlGjBhRp2NeeOGF+d73vpeKiop86Utfyuabb54HHngg55xzTm666aZMnDgxbdq0qdM5AQAAYE2rN1cOdOnSJT/4wQ/ypz/9Kc8991z++7//u1bjTZ48Od///vfTsGHD3H777ZkwYUJuuOGGvPzyyxkwYEBeeOGFHH/88XVUPQAAAKw99ebKgaOPPnqZrxs0qF3uce6556Yoihx55JHZb7/9qrZvtNFGueKKK/K5z30uN910U55//vl07ty5VnMBAADA2lRvrhyoSx999FFuv/32JMnQoUOX27/NNtukb9++SZIxY8as0doAAACgrgkHVuCf//xnFixYkCTp1avXCtss3T558uQ1VhcAAACsDvXmtoK69OqrryZJWrVqlebNm6+wzdZbb71M2+qaMmVK+vfvX6M+w4YNy7Bhw2rUBwAAgPpj1KhRGTVqVI36TJkypdpthQMrMHfu3CRJs2bNVtqmsrIySTJnzpwajT179uxMmDChRn1qGiYAAABQv0ydOrXGv0vWhHBgDWvZsmW6d+9eoz4dOnRYLbUAAACwfujQoUP69etXoz5TpkzJ7Nmzq9VWOLACS28lmD9//krbzJs3L0nSokWLGo3dvXv3jB8/fpVrAwAAoHxW5Xbz/v37V/tqAwsSrsDSv9TPmjWr6haD/zRt2rRl2gIAAMD6SjiwAttvv3022mijJMkTTzyxwjZLt/fs2XON1QUAAACrg3BgBRo3bpyvfvWrSZJrr712uf2vvfZaHnrooSTJQQcdtEZrAwAAgLpW6nBg5MiR6dy5c775zW8ut+/0009PRUVFrrrqqtx5551V2xcsWJCjjjoqixcvzuDBg9O5c+c1WTIAAADUuXqzIOGkSZNy4oknVn398ssvJ0kuu+yy/PWvf63aPmbMmGyxxRZJkhkzZuSFF15I27ZtlxuvZ8+eueCCC/K9730vAwcOTL9+/bLZZpvlgQceyFtvvZXtt98+v/vd71bzUQEAAMDqV2/CgTlz5uTRRx9dbvsbb7yRN954o+rrhQsXVnvM7373u+natWsuuOCCPPbYY5k/f37at2+fH//4x/nxj39c9VQDAAAAWJ/Vm3Cgf//+KYqiRn1+9rOf5Wc/+9mnttl7772z995716IyAAAAWLeVes0BAAAAQDgAAAAApSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcvUuHBg9enT69++fjTfeOM2aNUu3bt1y3nnn5eOPP67xWPPnz8+5556bXr16pUWLFtlggw3Stm3b7L///rn11ltXQ/UAAACw5jVa2wXUpVNPPTUjRoxIo0aNstdee6WysjL33XdfTjvttNx222256667suGGG1ZrrJkzZ2aPPfbIP/7xj1RWVma33XZLq1at8tJLL+X222/P7bffnlNOOSUjRoxYzUcFAAAAq1e9uXLg5ptvzogRI1JZWZlHH30048aNy0033ZQXX3wxXbt2zcSJE3PmmWdWe7yzzz47//jHP7LTTjvltddey7hx43L99dfnySefzO23355GjRrloosuyiOPPLIajwoAAABWv3oTDpxzzjlJktNPPz09e/as2t6mTZtccsklSZKRI0dm9uzZ1RrvvvvuS5Kcdtppad269TL7Bg4cmD333DNJ8vDDD9e6dgAAAFib6kU48Oabb+bxxx9PkgwdOnS5/bvvvnu23nrrLFy4MGPHjq3WmE2bNq1WuzZt2lS/UAAAAFgH1YtwYPLkyUmS1q1bp2PHjits06tXr2Xafpb99tsvSfK///u/ee+995bZN3bs2Nx///1p27ZtDjjggFUtGwAAANYJ9WJBwldffTVJ0r59+5W22XrrrZdp+1lOO+20PPbYYxk3bly22Wab9O3bt2pBwieffDJ9+/bNFVdckZYtW9b+AAAAAGAtqhfhwNy5c5MkzZo1W2mbysrKJMmcOXOqNWazZs1y22235YwzzsgFF1yQcePGVe3bZJNNsvfee2errbaqca1TpkxJ//79a9Rn2LBhGTZsWI3nAgAAoH4YNWpURo0aVaM+U6ZMqXbbehEOrA5vvfVWDjzwwDz99NP5xS9+kcMPPzybbbZZ/vGPf+R//ud/ctZZZ+Xmm2/OAw88kObNm1d73NmzZ2fChAk1qqWmYQIAAAD1y9SpU2v8u2RN1ItwYOkv5/Pnz19pm3nz5iVJWrRoUa0xjzjiiDz++OM577zz8sMf/rBqe+/evfPXv/41O+20U5566qmcf/75Oeuss6pda8uWLdO9e/dqt0+SDh061Kg9AAAA9UuHDh3Sr1+/GvWZMmVKtZ/YVy/CgaW/PE+bNm2lbZbuq84v2m+++WbuvvvuJMnhhx++3P4NNtgghxxySJ555pncc889NQoHunfvnvHjx1e7PQAAAKzK7eb9+/ev9tUG9eJpBT169EiSzJw5c6ULDj7xxBNJkp49e37meK+//nrV5yu70mDpQoT/+SQDAAAAWN/Ui3CgXbt26d27d5Lk2muvXW7/xIkTM23atDRp0iQDBw78zPE+udDgo48+usI2jzzySJKs9NGJAAAAsL6oF+FAkpxxxhlJkuHDh2fSpElV22fOnJkTTzwxSXLSSSct8+jBMWPGpHPnzhkwYMAyY7Vv374qbPjOd76TqVOnLrP/j3/8Y66//vokydChQ+v8WAAAAGBNqhdrDiTJoEGDcsopp+Siiy5Knz59MmDAgDRr1iz33ntvZs2alb59++bnP//5Mn1mz56dF154IR9++OFy41155ZXZc88989xzz2WHHXZInz590qZNmzz33HP5+9//niT5xje+ka9//etr5PgAAABgdak34UCSjBgxIn379s1vf/vbPPTQQ/n444/TqVOnnH766fnud7+bxo0bV3usLl265Nlnn82FF16YO+64I48//ngWLlyYjTfeOPvss0++9a1vZciQIavxaAAAAGDNqCiKoljbRZTB0lUi+/Xrt84/raCiomJtlwDAauCUDwDlUpPfQ+vNmgMAAADAqhEOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEquUV0NVBRFXn755UydOjXvvfdePvjgg2y44YZp3bp1OnTokE6dOqWioqKupgMAAADqSK3CgRdffDFjxozJnXfemccffzwLFixYadtmzZqld+/e2WeffXLQQQdlu+22q83UAAAAQB2pcTiwePHi3HDDDRk5cmQeeeSRqu1FUXxqv3nz5mX8+PEZP358fvzjH2eXXXbJySefnCFDhqRhw4Y1rxwAAACoE9UOB4qiyKhRo3L22Wfn9ddfr9q21DbbbJMddtghrVu3ziabbJIWLVpk9uzZmTlzZt5777384x//yLRp06raP/roo3n00Ufz4x//OD/96U9zxBFHpEEDSyAAAADAmlatcOBvf/tbTjrppPz973+vCgTatWuXgw8+OP3790+fPn3Stm3bzxxn+vTpefjhh/O3v/0tf/nLXzJt2rS8/vrrOfroo3PhhRfmt7/9bb70pS/V7ogAAACAGqlWONC/f/8kSYMGDTJ48OAcf/zx2WuvvWo8Wdu2bXPQQQfloIMOyoUXXpj7778/l156acaMGZNnn302e+65ZxYtWlTjcQEAAIBVV63r+CsqKnLkkUfm+eefzw033LBKwcCK7Lnnnrnhhhvy/PPPZ9iwYXUyJgAAAFAz1bpy4KmnnkqXLl1WWxGdOnXKlVdeme9///urbQ4AAABgxap15cDqDAY+accdd1wj8wAAAAD/j8cDAAAAQMkJBwAAAKDkqrXmQG2NGTMmDzzwQBYtWpTu3bvnsMMOy0YbbbQmpgYAAAA+Q63CgRdffLFqEcEzzzwzvXv3Xmb/Rx99lK9+9au57777ltk+fPjwjBs3Lh07dqzN9AAAAEAdqNVtBddff33++te/ZuLEienWrdty+3/5y1/m3nvvTVEUy3y89NJLOeigg7JkyZLaTA8AAADUgVqFAw8++GCSZO+9907jxo2X2bdw4cKMGDEiFRUVadmyZS688MLcfPPNGThwYJLkmWeeyejRo2szPQAAAFAHahUOvP7666moqEivXr2W23fXXXdlzpw5SZIrrrgi3/nOd3LAAQfklltuSadOnZIkN954Y22mBwAAAOpArcKBGTNmJEnatWu33L7x48cnSVq3bp2DDjqoanvDhg1z+OGHpyiKTJ48uTbTAwAAAHWgVuHA+++/nyTL3VKQJA899FAqKioyYMCAVFRULLPvc5/7XJJk+vTptZkeAAAAqAO1CgeaNm2aJHn33XeX2f7BBx9k0qRJSZLddtttuX6VlZVJ/v00AwAAAGDtqlU4sPR2gieffHKZ7ePGjcvHH3+cZMXhwNIrDpo3b16b6QEAAIA6UKtwYNddd01RFLnxxhvzxhtvJEkWLVqUX//610n+vd5Az549l+v33HPPJUnat29fm+kBAACAOlCrcODII49MksydOzfdu3fPYYcdlm7dumXixImpqKjIN7/5zTRosPwUDzzwQCoqKvLFL36xNtMDAAAAdaBW4cDuu++eY489NkVR5L333svo0aPz/PPPJ/n3LQc/+clPluvzyiuvVN2GsKJbDgAAAIA1q1bhQJJceuml+c1vfpMdd9wxjRs3zsYbb5zDDjssEydOTOvWrZdrf8kll1R9vs8++9R2egAAAKCWGtV2gIqKipxyyik55ZRTqtX+Bz/4QU4++eRUVFRYcwAAAADWAbUOB2qqbdu2a3pKAAAA4FPU+rYCAAAAYP0mHAAAAICSq1Y48MMf/jCzZ89erYW88847OfXUU1frHAAAAMDyqhUOXHDBBenUqVPOOuusvP/++3VawMyZM3Paaaflc5/7XC6++OI6HRsAAAD4bNUKB7bffvu89957Ofvss9OuXbsceeSRuf/++1MUxSpNunjx4tx666055JBD0q5du5x//vlZsGBBtt9++1UaDwAAAFh11XpawTPPPJNf//rXOffcczN79uxcc801ueaaa7LJJpukb9++6dOnT3bZZZd07tw5rVu3TuPGjav6fvTRR3nvvffy3HPP5dFHH82jjz6aiRMn5r333kuSFEWRli1b5owzzsh3v/vd1XOUAAAAwEpVKxxo1KhRfvSjH+WYY47Jr371q/zud7/LrFmzMmPGjNx666259dZbl2m/0UYbpXnz5pkzZ04++OCD5cZbesXBxhtvnBNPPDHf//7306pVq9ofDQAAAFBjNXpawcYbb5xzzjkn06ZNy6WXXpo+ffqkKIrlPubPn5+33347CxYsWG5fkuy+++75/e9/n2nTpuXnP/+5YAAAAADWompdOfCfmjVrluOOOy7HHXdcpk+fnnHjxuXRRx/NM888k6lTp+a9997LwoUL07Rp02yyySbp2LFjunbtmj59+uQrX/lKNttss7o+DgAAAGAVrVI48Elt27bNEUcckSOOOKIu6gEAAADWsBrdVgAAAADUP8IBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASq5W4cAZZ5yRqVOn1lEpdWP06NHp379/Nt544zRr1izdunXLeeedl48//niVx7zllltywAEHpG3btmncuHE222yz7Lbbbjn77LPrsHIAAABYO2oVDgwfPjzbbrtt9t1339x8881ZvHhxXdW1Sk499dQMGTIkDz74YHbeeefsu+++ef3113Paaadlr732ygcffFCj8T766KMMGTIkgwYNyj333JMdd9wxhxxySLp06ZKXX345F1100Wo6EgAAAFhzGtV2gCVLluTuu+/O3XffnbZt2+aoo47K0Ucfnfbt29dFfdV28803Z8SIEamsrMyECRPSs2fPJMmMGTOy1157ZeLEiTnzzDNz/vnnV3vMY445JqNHj86gQYNy+eWXp02bNlX7lixZkscee6zOjwMAAADWtFpdOXDVVVdl1113TVEUKYoib731Vn75y1+mU6dO2X///XPbbbdlyZIldVXrpzrnnHOSJKeffnpVMJAkbdq0ySWXXJIkGTlyZGbPnl2t8e69995cc8016dKlS2644YZlgoEkadCgQfr06VNH1QMAAMDaU6tw4IgjjsiDDz6Yp59+Ot/+9rfTqlWrFEWRxYsX54477sigQYOyzTbb5Kyzzsobb7xRVzUv580338zjjz+eJBk6dOhy+3ffffdsvfXWWbhwYcaOHVutMS+++OIk/75VYYMNNqi7YgEAAGAdUydPK+jSpUsuvvji/Otf/8qVV16ZPn36VF1N8Oabb+bss89Ox44dc+CBB2bs2LEpiqIupq0yefLkJEnr1q3TsWPHFbbp1avXMm0/zeLFi3PvvfcmSfbYY49Mnz49v/nNb3LCCSfk1FNPzdVXX5158+bVUfUAAACwdtV6zYFPatq0aYYNG5Zhw4bl2Wefze9+97v86U9/yuzZs7N48eL89a9/zV//+te0a9cuxxxzTL71rW9lyy23rPW8r776apJ86joHW2+99TJtP80rr7xS9cv/I488khNPPHG5MOCHP/xhrrvuuuy11141qnXKlCnp379/jfosfU0BAAAop1GjRmXUqFE16jNlypRqt63TcOCTunTpkpEjR+ZXv/pVrrvuuvz+97/Po48+miSZNm1afvrTn+bss8/O/vvvnxNOOCFf/vKXV3muuXPnJkmaNWu20jaVlZVJkjlz5nzmeDNnzqz6/Kijjspuu+2W888/P507d87LL7+cM844I2PHjs2BBx6YSZMmZbvttqt2rbNnz86ECROq3T5JjcMEAAAA6pepU6fW+HfJmlht4cBSG264YY488sgMGTIkP/7xjzNy5MhUVFQkSRYtWpRbbrklt9xyS3bYYYf8/Oc/z0EHHbS6S/pMn7ztYauttsq4cePSpEmTJEm3bt1y6623pnv37nn22WczfPjwXHHFFdUeu2XLlunevXuN6unQoUON2gMAAFC/dOjQIf369atRnylTplR7Uf7VHg5MmTIll112Wf785z9n7ty5qaioSFEUadCgQbbddtu8+OKLSZLnnnsuhxxySA477LBcc801adiwYbXnaN68eZJk/vz5K22z9LaAFi1aVHu85N+X9C8NBpZq2LBhjjvuuJx88sm55557ql1nknTv3j3jx4+vUR8AAADKbVVuN+/fv3+1rzaokwUJ/9OCBQvyhz/8ITvvvHN22mmn/P73v8+cOXNSFEU22WST/OhHP8qLL76YF154IZMnT84xxxyTxo0bpyiKXHfddbnssstqNN/Sv6xPmzZtpW2W7qvOX+E7dOhQdXXD5z73uRW2Wbr9rbfeqkGlAAAAsO6p03Bg8uTJOf7447PFFlvkuOOOy5NPPln11ILddtst//d//5c33ngjw4cPr3qqQLdu3XLZZZfl6aefTvv27VMURS6//PIazdujR48k/14rYGULDj7xxBNJkp49e37meJWVldl+++2TJDNmzFhhm6Xbl65lAAAAAOurWocD8+fPz+WXX57evXunV69eufzyyzN37twURZFmzZrluOOOy1NPPZWJEyfm61//eho3brzCcbbbbrv84Ac/SJK8/PLLNaqhXbt26d27d5Lk2muvXW7/xIkTM23atDRp0iQDBw6s1piHHnpokqz0toG77747SbLzzjvXqFYAAABY19QqHDjuuOOy5ZZb5vjjj8+kSZOqrhLo0qVLfvvb3+Zf//pXLr300nTt2rVa43Xq1CnJp68dsDJnnHFGkmT48OGZNGlS1faZM2fmxBNPTJKcdNJJadmyZdW+MWPGpHPnzhkwYMBy451yyinZeOONM3bs2OVuc7juuuvypz/9qaodAAAArM9qtSDh5ZdfXrXAYOPGjTN48OCccMIJ2X333VdpvAYNVj2rGDRoUE455ZRcdNFF6dOnTwYMGJBmzZrl3nvvzaxZs9K3b9/8/Oc/X6bP7Nmz88ILL+TDDz9cbrw2bdrk+uuvzwEHHJDjjz8+F198cXbYYYe8/PLLmTx5cpLkzDPPrPaVCAAAALCuqvXTCtq3b5/jjjsuRx11VDbddNNajbXPPvtkyZIlq9x/xIgR6du3b37729/moYceyscff5xOnTrl9NNPz3e/+92V3tKwMl/+8pfz1FNP5Zxzzsk999yTW265JS1atMjAgQPzne98J1/5yldWuVYAAABYV9QqHPjrX/+a/fbbr2pl/3XBkCFDMmTIkGq1rc6jID7/+c9n1KhRtS8MAAAA1lG1CgdcUg8AAADrvzp9lCEAAACw/qlVOPD+++9n8ODBOfjgg3PfffdVq899992Xgw8+OIceemjmzZtXm+kBAACAOlCr2wquv/76jBkzJs2aNcs111xTrT4777xz7rnnnsyfPz/77rtvjjrqqNqUAAAAANRSra4cuOuuu5L8+ykDlZWV1epTWVmZ/fbbL0VR5M4776zN9AAAAEAdqFU48NRTT6WioiK77bZbjfr16dOnqj8AAACwdtUqHHjrrbeSJO3atatRvy233DJJ8q9//as20wMAAAB1oFbhQFEUSZIlS5bUqN/S9osWLarN9AAAAEAdqFU40KZNmyTJyy+/XKN+r7zySpKkdevWtZkeAAAAqAO1Cge6du2aoigyZsyYGvUbM2ZMKioqssMOO9RmegAAAKAO1Coc2GeffZIkkydPzpVXXlmtPn/4wx8yadKkJMl+++1Xm+kBAACAOlCrcOCoo46qujXghBNOyK9//essXrx4hW0XL16cCy64IN/+9reTJC1btswxxxxTm+kBAACAOtCoNp0rKyszcuTIDB06NIsWLcoPf/jDXHDBBdlvv/3yhS98IZWVlZk3b17+8Y9/5I477sj06dNTFEUqKioycuTItGzZsq6OAwAAAFhFtQoHkuSwww7LjBkz8r3vfS+LFi3K9OnTc9VVV62wbVEUadSoUS688MIMHTq0tlMDAAAAdaBWtxUsddJJJ2XixIlVaxAURbHcR5IMHDgwDz30UNWtBQAAAMDaV+srB5baeeedc8cdd2TGjBmZOHFi3njjjcyZMyctWrRIu3bt8qUvfSmbbLJJXU0HAAAA1JE6CweWatOmTQYNGlTXwwIAAACrSZ3cVgAAAACsv4QDAAAAUHJ1elvBkiVL8vLLL+f999/Phx9+WK0+e+yxR12WAAAAANRQnYQDDz30UM4777zcfffd1Q4FkqSioiKLFi2qixIAAACAVVTrcOCCCy7IaaedtswjCwEAAID1R63CgQceeCA//OEPU1FRkaIostVWW2XPPfdMu3bt0qRJk7qqEQAAAFiNahUOjBgxourzn//85/nxj3+cBg2scQgAAADrk1qFAw8//HAqKioyaNCg/OQnP6mrmgAAAIA1qFZ/5p85c2aS5Gtf+1qdFAMAAACsebUKB9q0aZMkadasWZ0UAwAAAKx5tQoHunfvniR5+eWX66IWAAAAYC2oVThw1FFHpSiKXHfddXVVDwAAALCG1SocOOiggzJ48OA8/fTT+dGPflRXNQEAAABrUK2eVpAkf/zjH9OkSZNccMEFefLJJ3Pqqadm1113rVqPAAAAAFi31SocaNiwYdXnRVFk/PjxGT9+fLX7V1RUZNGiRbUpAQAAAKilWoUDRVF86tcAAADAuq9W4cAee+yRioqKuqoFAAAAWAtqFQ7U5BYCAIAyOn2zvdZ2CQCsBsPfuW9tl1CnavW0AgAAAGD9JxwAAACAkhMOAAAAQMnVas2BT1qyZEluuummjBs3Lv/4xz/y3nvv5eOPP87LL7+8TLtnn302c+bMScuWLbPjjjvW1fQAAADAKqqTcODBBx/MN7/5zUydOrVqW1EUK3ySwU033ZSzzz47LVq0yFtvvZWmTZvWRQkAAADAKqr1bQV33XVX9tprr0ydOjVFUaRhw4Zp2bLlStsfe+yxSZI5c+Zk7NixtZ0eAAAAqKVahQOzZs3K4Ycfno8//jiVlZX5/e9/n1mzZuWqq65aaZ8tttgiffr0SZLce++9tZkeAAAAqAO1Cgd++9vf5v3330+jRo1y55135uijj85GG230mf122223FEWRSZMm1WZ6AAAAoA7UKhwYO3ZsKioqMnjw4Oy6667V7rf99tsnSV555ZXaTA8AAADUgVqFA//85z+TJAMGDKhRv1atWiVJZs+eXZvpAQAAgDpQq3Bgzpw5SZLWrVvXqN/HH3+cJGnUqM6epAgAAACsolqFA0tDgZkzZ9ao39JHHrZp06Y20wMAAAB1oFbhwLbbbpskefjhh2vU784770xFRUW6detWm+kBAACAOlCrcOArX/lKiqLIjTfemOnTp1erz7333psHHnggSbLPPvvUZnoAAACgDtQqHDj22GOz0UYbZf78+TnkkEM+c4HBhx9+OIcffniSZOONN84RRxxRm+kBAACAOlCrFQE333zznHPOOTn11FPz8MMPZ/vtt8/RRx+dxYsXV7UZO3ZsXn/99dxxxx25/fbbs2TJklRUVOQ3v/lNmjVrVusDAAAAAGqn1o8LOOWUU/LOO+/k3HPPrfpvklRUVCRJvva1r1W1LYoiSXLWWWflG9/4Rm2nBgAAAOpArW4rWOoXv/hFbr/99vTo0SNFUaz0o0uXLvnrX/+a//mf/6mLaQEAAIA6UOsrB5bad999s+++++bZZ5/N3/72t0ydOjWzZs1KZWVl2rVrl379+mWnnXaqq+kAAACAOlJn4cBSXbp0SZcuXep6WAAAAGA1qZPbCgAAAID1V63CgQYNGqRRo0a59dZba9Rv3LhxadiwYRo1qvMLFwAAAIAaqvVv50ufQLCm+gEAAAB1y20FAAAAUHJrJRxYsGBBkqRp06ZrY3oAAADgE9ZKOPDII48kSTbbbLO1MT0AAADwCdVec+Dpp5/OlClTVrjvvvvuy6xZsz61f1EUmT9/fiZNmpQ//vGPqaioSO/evWtSKwAAALAaVDscGDNmTM4+++zlthdFkYsvvrhGkxZFkYqKihx//PE16gcAAADUvRrdVlAUxTIfK9v+WR+bb755Lr/88uy11151fkAAAABAzVT7yoFBgwalQ4cOy2w78sgjU1FRkZNOOik9e/b81P4NGjRIZWVlOnbsmK5du6Zhw4arVDAAAABQt6odDnTr1i3dunVbZtuRRx6ZJBkwYEAOOOCAuq0MAAAAWCOqHQ6syFVXXZUkn3nVAAAAALDuqlU4cMQRR9RVHQAAAMBaUqMFCQEAAID6RzgAAAAAJVer2wo+6aGHHsrVV1+dRx55JG+88UbmzJmTJUuWfGqfioqKLFq0qK5KAAAAAFZBrcOBBQsW5Fvf+lZGjx6dJCmKotZFAQAAAGtOrcOBr3/967n11ltTFEWaNWuWrl275pFHHklFRUW+8IUvZMMNN8zUqVMzY8aMJP++WmCnnXZKs2bNal08AAAAUHu1WnPgnnvuyS233JIkGTRoUP71r3/loYceqtr/y1/+Mo899ljeeeedPProo9l3331TFEUWLlyYUaNG5f77769d9QAAAECt1SocuOaaa5IkW2yxRa699to0b958pW179+6dsWPH5jvf+U6eeeaZDBo0KB999FFtpgcAAADqQK3CgaW3D/zXf/1XmjZtutz+Fa0/cMEFF6Rz5855+umnc+WVV9ZmegAAAKAO1CocmD59epLki1/84jLbKyoqkiQLFy5cfsIGDfKNb3wjRVHkhhtuqM30AAAAQB2oVTjw4YcfJklatGixzPaliw2+//77K+y37bbbJkleeOGF2kwPAAAA1IFahQOtWrVK8u/HGX7SJptskiR56aWXVthvaWgwc+bM2kwPAAAA1IFahQPbbbddkuS1115bZnuXLl1SFEXuueeeFfabMGFCkuWvOAAAAADWvFqFA7169UpRFJk8efIy2/fdd98kydNPP53LLrtsmX1/+ctfcv3116eioiK9evWqzfQAAABAHahVODBgwIAkyX333ZfFixdXbf/6179edWvBiSeemJ133jlDhw7NzjvvnEMPPbTqKQbHHntsbaYHAAAA6kCtwoF99tknHTp0SOPGjZe5haBVq1b5wx/+kIYNG6Yoijz55JO5/vrr8+STT1YFA9/61rcyaNCgWhUPAAAA1F6twoEmTZrklVdeyVtvvZV99tlnmX0HHnhgJkyYkAEDBlSFBEVR5POf/3wuueSSXH755bUqHAAAAKgbjVbn4LvuumvuvvvuLFq0KDNmzEizZs3SvHnz1TklAAAAUEOrNRyomqRRo7Rt23ZNTAUAAADUUK1uK1hVl1xySXr27JmddtppbUwPAAAAfMIauXLgP7311luZMmVKKioq1sb0AAAAwCeslSsHAAAAgHWHcAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5OpdODB69Oj0798/G2+8cZo1a5Zu3brlvPPOy8cff1zrsceOHZuKiopUVFRk7733roNqAQAAYO2rV+HAqaeemiFDhuTBBx/MzjvvnH333Tevv/56TjvttOy111754IMPVnns999/P8ccc0wqKirqsGIAAABY++pNOHDzzTdnxIgRqayszKOPPppx48blpptuyosvvpiuXbtm4sSJOfPMM1d5/JNPPjlvv/12jj/++DqsGgAAANa+aocDDRs2rLOPc845p84PZOmYp59+enr27Fm1vU2bNrnkkkuSJCNHjszs2bNrPPaYMWPypz/9Kd/73vey8847103BAAAAsI6odjhQFEXVf2v7UdfefPPNPP7440mSoUOHLrd/9913z9Zbb52FCxdm7NixNRp7xowZOf7447P99tvn7LPPrpN6AQAAYF1So9sK6uoX+7oOCCZPnpwkad26dTp27LjCNr169VqmbXWdcMIJmTFjRq644oo0bdq0doUCAADAOqhRdRsuWbJkddZRK6+++mqSpH379itts/XWWy/Ttjquu+663HjjjfnOd76Tvn371q5IAAAAWEdVOxxYl82dOzdJ0qxZs5W2qaysTJLMmTOnWmNOnz493/72t9OpU6c6XSNhypQp6d+/f436DBs2LMOGDauzGgAAAFi/jBo1KqNGjapRnylTplS7bb0IB1aHY489Nu+//35uuummbLTRRnU27uzZszNhwoQa9alpmAAAAED9MnXq1Br/LlkT9SIcaN68eZJk/vz5K20zb968JEmLFi0+c7yrr746t912W0444YQ6/8W8ZcuW6d69e436dOjQoU5rAAAAYP3SoUOH9OvXr0Z9pkyZUu0n9tWLcGDpL8/Tpk1baZul+6rzi/aYMWOSJI8//vhy4cD06dOTJE8++WTVvuuuuy5t27atVq3du3fP+PHjq9UWAAAAklW73bx///7VvtqgXoQDPXr0SJLMnDkzr7766gqfWPDEE08kSXr27FntcZf2WZFZs2ZVvcgffvhhTcoFAACAdUqNHmW4rmrXrl169+6dJLn22muX2z9x4sRMmzYtTZo0ycCBAz9zvJtvvjlFUazw46qrrkqSDBgwoGqby/4BAABYn9WLcCBJzjjjjCTJ8OHDM2nSpKrtM2fOzIknnpgkOemkk9KyZcuqfWPGjEnnzp0zYMCANVssAAAArEPqxW0FSTJo0KCccsopueiii9KnT58MGDAgzZo1y7333ptZs2alb9+++fnPf75Mn9mzZ+eFF15wWwAAAAClVm+uHEiSESNG5Prrr8+uu+6ahx56KGPHjk27du0yfPjw3Hfffdlwww3XdokAAACwzqk3Vw4sNWTIkAwZMqRabVdltcdV6QMAAADrsnp15QAAAABQc8IBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMnVu3Bg9OjR6d+/fzbeeOM0a9Ys3bp1y3nnnZePP/64RuNMnjw55557bgYMGJDNN988G2ywQTbeeON86Utfym9/+9sajwcAAADrqkZru4C6dOqpp2bEiBFp1KhR9tprr1RWVua+++7Laaedlttuuy133XVXNtxww88cZ9GiRenZs2eSpLKyMr17987mm2+eN954Iw8//HAmTpyYa665JuPGjUurVq1W81EBAADA6lVvrhy4+eabM2LEiFRWVubRRx/NuHHjctNNN+XFF19M165dM3HixJx55pnVHm+nnXbKDTfckBkzZuS+++7Ln//85zzwwAOZPHlytthiizz22GP53ve+txqPCAAAANaMehMOnHPOOUmS008/veqv/knSpk2bXHLJJUmSkSNHZvbs2Z85VqNGjfLEE0/k0EMPTZMmTZbZ17Vr15x33nlJkuuuu87tBQAAAKz36kU48Oabb+bxxx9PkgwdOnS5/bvvvnu23nrrLFy4MGPHjq31fD169EiSfPDBB5kxY0atxwMAAIC1qV6EA5MnT06StG7dOh07dlxhm169ei3TtjZefPHFJEnjxo3TunXrWo8HAAAAa1O9WJDw1VdfTZK0b99+pW223nrrZdquqqIoqm4r2H///Ze77eCzTJkyJf37969Rn2HDhmXYsGE16gMAAED9MWrUqIwaNapGfaZMmVLttvUiHJg7d26SpFmzZittU1lZmSSZM2dOreY666yz8vDDD6eysjLDhw+vcf/Zs2dnwoQJNepT0zABAACA+mXq1Kk1/l2yJupFOLCmXHPNNTn77LPToEGDXHnlldluu+1qPEbLli3TvXv3GvXp0KFDjecBAACg/ujQoUP69etXoz5Tpkyp1qL8ST0JB5o3b54kmT9//krbzJs3L0nSokWLVZpj9OjR+da3vpUkufzyy3PooYeu0jjdu3fP+PHjV6kvAAAA5bQqt5v379+/2lcb1IsFCZf+ZX3atGkrbbN036r8Ff4vf/lLhg4dmiVLluSyyy6rCgkAAACgPqgX4cDSRwvOnDlzpQsOPvHEE0mSnj171mjsm2++OYcddlgWL16cSy+9NMccc0ztigUAAIB1TL0IB9q1a5fevXsnSa699trl9k+cODHTpk1LkyZNMnDgwGqPe9ttt2XIkCFZtGhRLr300hx33HF1VjMAAACsK+pFOJAkZ5xxRpJk+PDhmTRpUtX2mTNn5sQTT0ySnHTSSWnZsmXVvjFjxqRz584ZMGDAcuONHTs2hxxySBYtWpTf/e53ggEAAADqrXqxIGGSDBo0KKecckouuuii9OnTJwMGDEizZs1y7733ZtasWenbt29+/vOfL9Nn9uzZeeGFF/Lhhx8us/2dd97JwQcfnI8++ijt2rXLQw89lIceemiF855//vlp06bNajsuAAAAWN3qTTiQJCNGjEjfvn3z29/+Ng899FA+/vjjdOrUKaeffnq++93vpnHjxtUaZ8GCBVm4cGGS5I033sjVV1+90rY/+9nPhAMAAACs1+pVOJAkQ4YMyZAhQ6rVdmWPgujQoUOKoqjjygAAAGDdVG/WHAAAAABWjXAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKDnhAAAAAJSccAAAAABKTjgAAAAAJSccAAAAgJITDgAAAEDJCQcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUnHAAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEpOOAAAAAAlJxwAAACAkhMOAAAAQMkJBwAAAKDkhAMAAABQcsIBAAAAKLlGa7sAAABg/ffMh29l9uIP07Jh03RtusXaLgeoIVcOAAAAtfbMh9Pz4IKpeebD6Wu7FGAVCAcAAACg5IQDAAAAUHLCAQAAACg54QAAAACUXL0LB0aPHp3+/ftn4403TrNmzdKtW7ecd955+fjjj1dpvCeffDKHHnpoNt988zRt2jQdO3bMySefnHfeeaeOKwcAAIC1o16FA6eeemqGDBmSBx98MDvvvHP23XffvP766znttNOy11575YMPPqjReDfeeGP69OmTG2+8Mdtss00OPPDANGjQICNHjswXv/jFvPTSS6vpSAAAAGDNqTfhwM0335wRI0aksrIyjz76aMaNG5ebbropL774Yrp27ZqJEyfmzDPPrPZ4//rXv3LEEUdk0aJFueyyy/LYY4/l+uuvzz//+c984xvfyNtvv52hQ4emKIrVeFQAAACw+tWbcOCcc85Jkpx++unp2bNn1fY2bdrkkksuSZKMHDkys2fPrtZ4v/nNb7JgwYLsvffeOfbYY6u2N2zYMJdeemlatmyZxx9/PHfddVcdHgUAAACsefUiHHjzzTfz+OOPJ0mGDh263P7dd989W2+9dRYuXJixY8dWa8wxY8asdLzKysoccMABSZK//OUvq1o2AAAArBPqRTgwefLkJEnr1q3TsWPHFbbp1avXMm0/zdy5c6vWE1jarzbjAQAAwLqsXoQDr776apKkffv2K22z9dZbL9P200ydOrXq85WNWZPxAAAAYF3WaG0XUBfmzp2bJGnWrNlK21RWViZJ5syZU+3xPm3MmoyXpOpKhIkTJ6ZVq1bV6rNU27Zt07Zt2xr1AYD/1L9//7VdQim9MuuptV0CrBHvLJpb9d9rZ7m6lvrvkTV8Xp0+fXqmT59eoz7z5s1Lkmo9aa9ehAPrg6XflMWLF1d7UcSlZs+enRdeeGF1lAVAiUyYMGFtlwCUwMJicaZ9PGttlwGr3bT16Ly69PfRT1MvwoHmzZsnSebPn7/SNktfjBYtWlR7vKVjtmzZslbjJUnHjh3zwgsvZPHixdlwww2r1WcpVw4AAACU26pcOfDBBx+kYcOGK12b75PqRTjQoUOHJMm0adNW2mbpvqVtP80222xT9fnrr7+erl271mq8xMKFAAAArLvqxYKEPXr0SJLMnDlzpQsEPvHEE0mSnj17fuZ4LVq0yLbbbrtMv9qMBwAAAOuyehEOtGvXLr17906SXHvttcvtnzhxYqZNm5YmTZpk4MCB1RrzoIMOWul48+bNy2233ZYkOfjgg1e1bAAAAFgn1ItwIEnOOOOMJMnw4cMzadKkqu0zZ87MiSeemCQ56aSTllk/YMyYMencuXMGDBiw3HinnnpqNtpoo9xzzz25/PLLq7YvXrw4J554YmbNmpXevXvnK1/5yuo6JAAAAFgjKoqiKNZ2EXXlO9/5Ti666KJssMEGGTBgQJo1a5Z77703s2bNSt++fXP33XcvsxjgqFGjcuSRR2abbbbJ1KlTlxtv9OjROfzww7N48eLssssu6dChQx5//PG88sor2XzzzTNx4sSq2w8AAABgfVVvrhxIkhEjRuT666/Prrvumoceeihjx45Nu3btMnz48Nx33301fkrAoYcemkcffTQHH3xwXnnllYwZMyaLFy/Ot7/97Tz11FOCAQAAAOqFehUOJMmQIUMyYcKEzJ49OwsWLMgzzzyT0047LY0bN16u7bBhw1IUxQqvGlhqp512yk033ZR33nknCxcuzNSpUzNy5Mhsvvnmq/EooLzuuOOOHH300enVq1e22GKLNGnSJM2bN0/37t1zxhlnZMaMGTUec9SoUamoqPjUjzvvvHM1HM3qNX78+FRUVKR///5ruxQAVsA5rfrW5XPa6NGj079//2y88cZp1qxZunXrlvPOOy8ff/zxZ/a95ZZbcsABB6Rt27Zp3LhxNttss+y22245++yzl2s7f/78XHvttfn+97+f/v37p0WLFqmoqPjMP0i+8cYb+dGPfpQvf/nL6dChQ5o3b54mTZqkffv2OeywwzJx4sRVPnbKpV48yhBYNwwbNixXX311rrrqqgwbNmyVxvjTn/6UP/3pT9l2223TpUuXbLrpppk5c2Yee+yxnHvuubniiity3333Zccdd6zx2J06dcruu+++wn1bbbVVjccbP3589txzz/Tr1y/jx4+vcX8A1l3OaST/XodsxIgRadSoUfbaa69UVlbmvvvuy2mnnZbbbrstd9111wqvTv7oo4/yjW98I6NHj86GG26YXXfdNZtvvnmmT5+ev//977nooovy//1//98yfV588cV8/etfr3GNzz//fH71q19l4403zhe+8IX07t07ixYtynPPPZfrr78+119/ff73f/83P/rRj1b5daAchAPAOuUHP/hBzj///LRt23aZ7fPmzcu3vvWtjB49OkcffXQefvjhGo+9++67Z9SoUXVUKQB8Oue09dvNN9+cESNGpLKyMhMmTKh6hPmMGTOy1157ZeLEiTnzzDNz/vnnL9f3mGOOyejRozNo0KBcfvnladOmTdW+JUuW5LHHHluuT/PmzXPkkUemZ8+e6dGjR2bNmpX999//M+vs2rVrJk+enC9+8Ytp0GDZC8P//Oc/57//+7/z4x//OF/72teyww471PRloETq3W0FwPqte/fuy72JSpLKyspccMEFSZJHHnkkc+bMWdOlAUCNOKet384555wkyemnn14VDCRJmzZtcskllyRJRo4cmdmzZy/T7957780111yTLl265IYbblgmGEiSBg0apE+fPsvN16lTp1x55ZU56aST0rdv3zRr1qxadW6++ebp3r37csFAkhx++OHp169flixZknvuuada41FewgEogWeffTaDBw9OmzZtstFGG6Vr1675zW9+kyVLlqRDhw6pqKhYZu2NT24bM2ZMdt9997Ro0SLNmzdP//79M3bs2GXGnzp1aioqKnL11VcnSY488shl7n382c9+VifH0ajRvy92atCgQTbYYIM6GXNV9e/fP3vuuWeSZMKECcscb4cOHZZpV1FRsdJLNH/2s5+t8DX65PbXX389Rx11VLbeeutssMEGK7y8dcGCBTnjjDOy7bbbpmnTptlyyy1z1FFH5c0331zpMTz//PNVT2xp0qRJWrdunQEDBuSGG26o6csBsMY4p9U957Tlvfnmm3n88ceTJEOHDl1u/+67756tt946CxcuXO5n6OKLL07y71sS1vb3Nvl/P2tNmjRZy5WwrnNbAdRzEyZMyH777ZcPPvggnTp1ype//OXMnDkzp512Wh555JFP7XvRRRflwgsvTK9evbL//vvn5ZdfzoQJEzJhwoRcdNFFOfnkk5P8+y8gRxxxRCZOnJiXX345ffv2XWbxnO7du9f6OBYuXJgzzjgjSfLlL3+5xk8fSZKXXnop//M//5N33nknlZWV6dKlSw444IDlEv3q2HfffdO0adOMGzcum2++efbdd9+qfasy3sq8+OKL6dGjRxo3bpy+ffumKIrlxv/oo48yYMCAPP300+nfv3969uyZiRMn5sorr8zYsWPzt7/9Ldttt90yfW6//fYccsgh+fDDD7P99tvn4IMPzjvvvJMJEybkvvvuy7hx43LFFVfU2XEA1AXntP/HOe3/WR3ntMmTJydJWrdunY4dO66wTa9evTJt2rRMnjw5hx9+eJJk8eLFuffee5Mke+yxR6ZPn57rrrsuL7zwQpo0aZIePXpk8ODBqaysrFE9q+r222/P/fffn6ZNm+YrX/nKGpmT9VgB1FsLFiwottpqqyJJ8f3vf79YvHhx1b6///3vxeabb14kKZIUr776atW+bbbZpkhSVFRUFH/84x+XGfO6664rKioqikaNGhXPPPPMMvuOOOKIIklx1VVX1br2J598sjjiiCOKb37zm8V+++1XtGnTpkhS9O7du3jjjTdqNNZVV11VdZz/+dG0adNi+PDhq1Tj/fffXyQp+vXrt9I2/fr1K5IU999//wr3//SnPy2SFD/96U9XuD1J8Y1vfKP48MMPVzp/kmLbbbctXnvttap9H3zwQTF48OAiSdGnT59l+k2fPr1o2bJlkaT4xS9+USxZsqRq3+OPP15svPHGRZLi97///We/CABriHPavzmnrZlz2kUXXVQkKbp3777SNqecckqRpDjkkEOqtv3zn/+sOo5rrrmmqKysXO77tOmmmxb33nvvZ9aw9DXp1KlTtes+4YQTiiOOOKIYPHhw0bVr1yJJ0bx58+LGG2+s9hiUl9sKoB678cYb8+abb2abbbbJueeeu8y9aF/4whdy5plnfmr/Aw88cLlVc//rv/4rBx98cBYtWpSLLrpotdSdJK+//nquvvrqXHPNNbnjjjsyY8aM7L333rnuuutqvApz27Zt85Of/CSPPvpo3n333cyZMyePP/54vvnNb2bhwoU5/fTTq+4rXNe0bt06I0eO/MxLAc8///y0b9++6uumTZvmkksuyUYbbZRHHnkkDz30UNW+yy+/PLNnz85OO+2Un/zkJ6moqKja16tXr/zkJz9JkvzqV7+q46MBWHXOaf/mnLZmzmlz585Nkk+973/pX/8/uWbEzJkzqz4/6qijstNOO+Xxxx/P3LlzM2XKlAwcODDvvvtuDjzwwLz44os1qqk6rr322lx99dW56aab8swzz2TTTTfNqFGjMnjw4Dqfi/pHOAD12IQJE5Ikhx566Arvefusx+UcccQRn7p9dT7qaNCgQSmKIosWLcrUqVPzhz/8Ic8991y6dOmSG2+8sUZj7bvvvvnFL36RnXfeOW3atEnz5s3Tq1evXH311VUrDJ999tl5++23V8eh1Mree++dli1bfmqbVq1a5YADDlhu+2abbVZ1aegnv1dLP1/Z9/eoo45K8u/LP//1r3+tQtUAdc857d+c09btc1pRFFWfb7XVVhk3blx69eqVysrKdOvWLbfeemu6dOmSefPmZfjw4XU+/6xZs1IURWbOnFn1hIXBgwfn8MMPz+LFi+t8PuoXaw5APfbGG28kyTKLCX1Sq1at0rJly+VW2V1qZffYLd2+dPzq+sEPfpAZM2Yst/3THsXUsGHDbLPNNjnqqKMyYMCA7LjjjjnyyCOz++67r3AF6Jr6zne+k3PPPTczZszIXXfdlf/+7/9OkkycODF/+MMflms/aNCgDBo0qNbzVtfKvnf/2eaTfyn5pBV9r5Yu6LSy72+rVq3SunXrvPfee3njjTey5ZZb1rBqgLrnnPbZnNOWt7Jz2ooWQmzTpk1VwNK8efMkyfz581da67x585IkLVq0qNq2tN/SOf7zKomGDRvmuOOOy8knn7xanx7QunXr7LHHHvnSl76Ur33ta7nuuuvSt2/fnHTSSattTtZ/wgEogZWdZD9r32f5ZDpeHTfeeGNee+215bZX9znNHTp0yJ577pnbb789d999d9Wbntpo2LBhtttuu8yYMWOZNxsvvfRS1UrV/1lDXb6RWrJkyafuX5VFqlakpt8rgHWVc9rKOadV34pej2222aYqHFgaZEybNm2lYyzd98nQY2m4URRFPve5z62w39Ltb7311qqUXiMVFRUZNmxYbr/99owZM0Y4wKdyWwHUY0vvY/zkI50+afbs2Zk1a9ZK+7/66qsr3L50vHbt2tWonqlTp6YoiuU+amLpvX/vvPNOjfp9mqX3B/5n2r+iWmv6CKvGjRsn+X/3Lv6nFb2xrKmVfX8/ue+T36ulPxevvPLKCvvMnj0777333jJtAdY257TqcU5b1srOaSt6PT45d48ePZL8+/Vc2c/OE088kSTp2bNn1bbKyspsv/32SbLCK0s+uX1NPbFgdfycUT8JB6Ae22OPPZIko0ePzqJFi5bbf+21135q///7v/9b4fZrrrkmyb+fd/xJS980rGiuurBw4cJMnDgxSfL5z3++TsacNGlS/vnPfyZJdt555xr1rc7xLn0j8txzzy23b8GCBbn//vtrNOeKzJo1K7fddtty2999993ceeedSZb9Xi39fEV/NUmSK6+8Mkmy3XbbCQeAdYZz2mdzTlveqp7T2rVrl969eydZ8c/WxIkTM23atDRp0iQDBw5cZt+hhx6aJCu9beDuu+9OUvPv0apa+mjFuvo5ox5b3Y9DANae+fPnF1tssUWRpPjRj360zGOfnnvuuaJt27af+dinP//5z8uMOXr06KJBgwZFo0aNiqeeemqZfWeddVaRpDj11FNXqd633367uOSSS4rZs2cvt++NN94oDj300CJJ0aFDh+KDDz5YZv9f/vKXYvvtty/22muv5V6DkSNHFnPmzFluzAkTJhQdOnQokhS77757jet97bXXiiTFZpttVnz00UcrbPPHP/6xSFK0b99+mcdVzZs3r/j6179e9fqv7LFP/7n9kz752KftttuumDZtWtW+Dz/8sOr12nnnnZfpN3369KJFixZFkuKXv/zlMo99mjRpUtG6dWuPMgTWOc5pzmlr+pw2ZsyYIklRWVlZPPnkk1XbZ8yYUfWYwO9///vL9Xv33XerHqH4u9/9bpl9f/7zn4uKiooiSXH77bd/6vzVfZThZZddVjz//PPLbf/oo4+Kyy67rNhggw2KJMWdd975qeOAcADquXvvvbdo2rRp1XODDzvssOIrX/lK0bhx4+LQQw8t2rdvXyQp3nzzzao+S99InXrqqVXPYR46dGixyy67VJ24f/3rXy8311NPPVU0aNCgaNCgQbH33nsXRx55ZHHUUUcVt9xyS7VqffXVV4skRePGjYudd965GDJkSHHooYcWffr0KRo3blwkKbbccstiypQpy/Vd+tznbbbZZpnt77//fpGkaNKkSdGnT59iyJAhxcEHH1x06dKl6li6du1a/Otf/6rZC/v/16tXryJJsf322xdf//rXi6OOOqo47bTTqvZ/9NFHVW1atmxZfPWrXy3222+/YtNNNy222mqr4lvf+lat30jtuuuuxS677FJstNFGxf77718MGTKk2HLLLave5K3oDcNtt91W9XPRuXPn4vDDDy8GDBhQNGrUqEhSHHnkkav0egCsTs5pzmlr+px2yimnFEmKDTbYoNh3332LwYMHF61atSqSFH379i0WLFiwwn533XVXVU077rhjccghhxQ9evSo+j6deeaZK+w3aNCgYpdddil22WWXYocddqj6fi/dtssuuxSXX375Mn369etXFSIccMABxdChQ4sBAwZUBWYNGjQozj333FV+DSgP4QCUwFNPPVUcdNBBRevWrYumTZsWX/jCF4pf/epXxcKFC4vGjRsXDRo0WOavFkvfSL366qvFDTfcUOy6665FZWVl0axZs+JLX/pScdttt610rjFjxhR9+/YtmjdvXpWMf9qbgU+aP39+ccEFFxQHHnhg0alTp6J58+ZFo0aNijZt2hR77LFH8atf/WqFf4EpipW/kVq4cGFx5plnFvvtt1/RsWPHqjE33XTTYu+99y4uu+yyYuHChdWqb0Vee+21YujQocUWW2xR9SZkRW/mTjrppKJdu3bFBhtsUGy11VbFscceW7z99tsrfcNUkzdS/fr1K+bNm1f88Ic/LDp27Fg0bty42HzzzYthw4YVr7/++kr7/+Mf/yiOOOKIqrpatWpV7LnnnsV11123yq8HwOrmnOactiKr85x2/fXXF3vssUfRokWLYsMNNyy6dOlSDB8+/DNf6xdeeKE44ogjiq222qrYYIMNik022aQYOHBgMW7cuJX2Wfrz+mkf//k63n777cWxxx5bdOvWrdh0002LRo0aFc2bNy923HHH4oQTTljuqhhYmYqisIQ1lNXf/va39OvXL127ds3TTz9dtb1Dhw557bXX8uqrr1brsUMAsLY5pwHUjgUJoZ579913V7jK7rPPPptjjjkmSXLkkUeu6bIAoMac0wBWn0ZruwBg9fr73/+ePffcM1/4whfyuc99LhtuuGFeffXVTJo0KUuWLMmXv/zlnHzyyWu7TAD4TM5pAKuPcADquc9//vP59re/nQkTJuTBBx/M3Llz07x58+y2224ZOnRojjnmmDRq5H8FAKz7nNMAVh9rDgAAAEDJWXMAAAAASk44AAAAACUnHAAAAICSEw4AAABAyQkHAAAAoOSEAwAAAFBywgEAAAAoOeEAAAAAlJxwAAAAAEru/wdgyx9sx58BigAAAABJRU5ErkJggg==", "text/plain": [ "
" ] diff --git a/prompttools/experiment/experiments/experiment.py b/prompttools/experiment/experiments/experiment.py index 872f12e1..30c01828 100644 --- a/prompttools/experiment/experiments/experiment.py +++ b/prompttools/experiment/experiments/experiment.py @@ -461,12 +461,7 @@ def aggregate(self, metric_name, column_name, is_average=False): # Define the custom colors custom_colors = [ - "black", - "#7e1e9c", - "#15b01a", - "#448ee4", - "#ff7fa7", - "#029386", + "black", "#771541", "#EB8F4C", "#594F3B", "#A8B7AB", "#9C92A3" ] plt.ylabel("Latency (s)") diff --git a/prompttools/experiment/experiments/style.mplstyle b/prompttools/experiment/experiments/style.mplstyle index 00520178..aa62ae2f 100755 --- a/prompttools/experiment/experiments/style.mplstyle +++ b/prompttools/experiment/experiments/style.mplstyle @@ -43,9 +43,6 @@ savefig.dpi : 100 # Hegel AI color cycle axes.prop_cycle: cycler('color', ["black", "771541", "EB8F4C","594F3B","A8B7AB","9C92A3"]) -# Not good for colorblind people -#axes.prop_cycle: cycler('color', ['black', '7e1e9c', '15b01a', '448ee4', 'ff7fa7', '029386', 'ed872d', 'ae1717', 'gray', 'e03fd8', '011288', '0b4008']) - #font.family : serif #text.usetex : True #font.serif : Palatino diff --git a/prompttools/version.py b/prompttools/version.py index d103c2d6..49a555da 100644 --- a/prompttools/version.py +++ b/prompttools/version.py @@ -1,2 +1,2 @@ -__version__ = '0.0.45a0+e77fcc5' -git_version = 'e77fcc5bcd6fa7b123ae17fd660659bcb3e7c8bf' +__version__ = '0.0.45a0+6151062' +git_version = '6151062e36d63229b66d1c4193f0173ad022502d' From 326e8c49d268c0c55164abbabd009c875deca6df Mon Sep 17 00:00:00 2001 From: "Steven Krawczyk (Hegel AI)" Date: Sun, 31 Dec 2023 13:29:33 -0800 Subject: [PATCH 25/52] Add feedback API (#119) * Add feedback API * Export feedback func * Get feedback working --- prompttools/logger/__init__.py | 3 ++- prompttools/logger/logger.py | 43 +++++++++++++++++++++++++++++++--- 2 files changed, 42 insertions(+), 4 deletions(-) diff --git a/prompttools/logger/__init__.py b/prompttools/logger/__init__.py index 47bc34ee..5bc0ab72 100644 --- a/prompttools/logger/__init__.py +++ b/prompttools/logger/__init__.py @@ -5,9 +5,10 @@ # LICENSE file in the root directory of this source tree. -from .logger import Logger +from .logger import Logger, add_feedback __all__ = [ "Logger", + "add_feedback", ] diff --git a/prompttools/logger/logger.py b/prompttools/logger/logger.py index 361a5709..c9cc70df 100644 --- a/prompttools/logger/logger.py +++ b/prompttools/logger/logger.py @@ -4,6 +4,7 @@ # This source code's license can be found in the # LICENSE file in the root directory of this source tree. import json +import uuid import requests import threading @@ -27,6 +28,7 @@ class Logger: def __init__(self): self.backend_url = f"{HEGEL_BACKEND_URL}/sdk/logger" self.data_queue = queue.Queue() + self.feedback_queue = queue.Queue() self.worker_thread = threading.Thread(target=self.worker) # When the main thread is joining, put `None` into queue to signal worker thread to end @@ -34,6 +36,13 @@ def __init__(self): self.worker_thread.start() + def add_feedback(self, log_id, metric_name, value): + self.feedback_queue.put({ + "log_id": log_id, + "key": metric_name, + "value": value + }) + def execute_and_add_to_queue(self, callable_func, **kwargs): if "hegel_model" in kwargs: hegel_model = kwargs["hegel_model"] @@ -43,14 +52,17 @@ def execute_and_add_to_queue(self, callable_func, **kwargs): start = perf_counter() result = callable_func(**kwargs) latency = perf_counter() - start + log_id = str(uuid.uuid4()) self.data_queue.put( { "hegel_model": hegel_model, "result": result.model_dump_json(), "input_parameters": json.dumps(kwargs), "latency": latency, + "log_id": log_id, } ) + result.log_id = log_id return result def wrap(self, callable_func): @@ -58,13 +70,22 @@ def wrap(self, callable_func): def worker(self): while True: + # Process logging data if not self.data_queue.empty(): - result = self.data_queue.get() - if result is None: + data = self.data_queue.get() + if data is None: # Shutdown signal return - self.log_data_to_remote(result) + self.log_data_to_remote(data) self.data_queue.task_done() + # Process feedback data + if not self.feedback_queue.empty(): + feedback_data = self.feedback_queue.get() + if feedback_data is None: # Shutdown signal + return + self.send_feedback_to_remote(feedback_data) + self.feedback_queue.task_done() + def log_data_to_remote(self, data): try: headers = { @@ -78,6 +99,19 @@ def log_data_to_remote(self, data): except requests.exceptions.RequestException as e: print(f"Error sending data to Flask API: {e}") + def send_feedback_to_remote(self, feedback_data): + feedback_url = f"{HEGEL_BACKEND_URL}/sdk/add_feedback/" + try: + headers = { + "Content-Type": "application/json", + "Authorization": os.environ["HEGELAI_API_KEY"], + } + + response = requests.post(feedback_url, json=feedback_data, headers=headers) + if response.status_code != 200: + print(f"Failed to send feedback to Flask API. Status code: {response.status_code}") + except requests.exceptions.RequestException as e: + print(f"Error sending feedback to Flask API: {e}") sender = Logger() # Monkey-patching @@ -86,3 +120,6 @@ def log_data_to_remote(self, data): except Exception: print("You may need to add `OPENAI_API_KEY=''` to your `.env` file.") raise + +def add_feedback(*args): + sender.add_feedback(*args) \ No newline at end of file From a00b03a9e5d1ccc01fb101ce1babdb7b1cb48188 Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 3 Jan 2024 11:38:09 -0500 Subject: [PATCH 26/52] Saving template to harness's DataFrame --- .../harness/chat_prompt_template_harness.py | 27 +++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/prompttools/harness/chat_prompt_template_harness.py b/prompttools/harness/chat_prompt_template_harness.py index 7ddb563a..b6137c8a 100644 --- a/prompttools/harness/chat_prompt_template_harness.py +++ b/prompttools/harness/chat_prompt_template_harness.py @@ -82,23 +82,46 @@ def run(self, clear_previous_results: bool = False): self.prepare() super().run(clear_previous_results=clear_previous_results) - # Add user inputs to DataFrame if len(self.experiment.full_df) > 0: + # Add user inputs to DataFrame repeat = len(self.experiment.full_df) // len(self.user_inputs) user_inputs = deepcopy(self.user_inputs) user_inputs_col_name = "user_inputs" user_input_df = pd.DataFrame({user_inputs_col_name: user_inputs * repeat}) + # Full DF if user_inputs_col_name in self.experiment.full_df.columns: self.experiment.full_df = self.experiment.full_df.drop(user_inputs_col_name, axis=1) self.experiment.full_df.reset_index(drop=True, inplace=True) - self.experiment.full_df = pd.concat([user_input_df, self.experiment.full_df], axis=1) + # Partial DF if user_inputs_col_name in self.experiment.partial_df.columns: self.experiment.partial_df = self.experiment.partial_df.drop(user_inputs_col_name, axis=1) self.experiment.partial_df.reset_index(drop=True, inplace=True) self.experiment.partial_df = pd.concat([user_input_df, self.experiment.partial_df], axis=1) + # Add prompt template to DataFrame + repeat = len(self.experiment.full_df) // len(self.message_templates) + templates = deepcopy(self.message_templates) + template_indices = list(range(len(templates))) + template_col_name = "templates" + template_index_col_name = "template_index" + template_df = pd.DataFrame( + {template_index_col_name: template_indices * repeat, template_col_name: templates * repeat} + ) + # Full DF + if template_col_name in self.experiment.full_df.columns: + self.experiment.full_df = self.experiment.full_df.drop(template_col_name, axis=1) + self.experiment.full_df = self.experiment.full_df.drop(template_index_col_name, axis=1) + self.experiment.full_df.reset_index(drop=True, inplace=True) + self.experiment.full_df = pd.concat([template_df, self.experiment.full_df], axis=1) + # Partial DF + if template_col_name in self.experiment.partial_df.columns: + self.experiment.partial_df = self.experiment.partial_df.drop(template_col_name, axis=1) + self.experiment.partial_df = self.experiment.partial_df.drop(template_index_col_name, axis=1) + self.experiment.partial_df.reset_index(drop=True, inplace=True) + self.experiment.partial_df = pd.concat([template_df, self.experiment.partial_df], axis=1) + def get_table(self, get_all_cols: bool = False) -> pd.DataFrame: columns_to_hide = [ "stream", From 0b36e1baadf7ef525eeff695cd646cdff83d93e8 Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 3 Jan 2024 11:38:33 -0500 Subject: [PATCH 27/52] Adding generic aggregate method to all harnesses --- prompttools/harness/harness.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/prompttools/harness/harness.py b/prompttools/harness/harness.py index 4f547de7..9e101308 100644 --- a/prompttools/harness/harness.py +++ b/prompttools/harness/harness.py @@ -4,7 +4,7 @@ # This source code's license can be found in the # LICENSE file in the root directory of this source tree. -from typing import Callable, Optional +from typing import Callable, Optional, Union from prompttools.experiment import Experiment @@ -12,6 +12,7 @@ import os import pickle import requests +import pandas as pd class ExperimentationHarness: @@ -89,6 +90,30 @@ def _get_state(self): def _load_state(cls, state, experiment_id: str, revision_id: str, experiment_type_str: str): raise NotImplementedError("Should be implemented by specific harness class.") + def aggregate( + self, + groupby_column: str, + aggregate_columns: Union[str, list[str]], + method: str, + custom_df: Optional[pd.DataFrame] = None, + ) -> pd.DataFrame: + """ + Aggregate data based on the specified column, method. + + Args: + groupby_column (str): + aggregate_columns (Union[str, list[str]]): + method (str): aggregation method (e.g., 'mean', 'sum', 'count', 'min', 'max', 'median', 'std', etc.) + """ + if method not in ["mean", "sum", "count", "min", "max", "median", "std"]: + raise ValueError(f"Unsupported aggregation method: {method}") + + if custom_df is None: + custom_df = self.full_df + + result_df = custom_df.groupby(groupby_column)[aggregate_columns].agg(method).reset_index() + return result_df + def save_experiment(self, name: Optional[str] = None): r""" name (str, optional): Name of the experiment. This is optional if you have previously loaded an experiment From 5d0fff31a2b49d7f8390d03439245291604d015f Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 3 Jan 2024 11:39:02 -0500 Subject: [PATCH 28/52] Adding bespoke aggregate method to harness to avoid hashing type issues --- .../harness/chat_prompt_template_harness.py | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/prompttools/harness/chat_prompt_template_harness.py b/prompttools/harness/chat_prompt_template_harness.py index b6137c8a..468b1872 100644 --- a/prompttools/harness/chat_prompt_template_harness.py +++ b/prompttools/harness/chat_prompt_template_harness.py @@ -4,7 +4,7 @@ # This source code's license can be found in the # LICENSE file in the root directory of this source tree. -from typing import Type +from typing import Type, Union import jinja2 import pandas as pd @@ -160,6 +160,28 @@ def visualize(self, get_all_cols: bool = False): logging.getLogger().setLevel(logging.INFO) logging.info(tabulate(table, headers="keys", tablefmt="psql")) + def aggregate(self, groupby_column: str, aggregate_columns: Union[str, list[str]], method: str) -> pd.DataFrame: + """ + Aggregate data based on the specified column, method. + + Args: + groupby_column (str): + aggregate_columns (Union[str, list[str]]): + method (str): aggregation method (e.g., 'mean', 'sum', 'count', 'min', 'max', 'median', 'std', etc.) + """ + if groupby_column == "user_inputs": + df = self.full_df.copy() + df["user_inputs"] = [tuple(d.items()) for d in self.full_df["user_inputs"]] + else: + df = self.full_df + + if groupby_column == "templates": + result = super().aggregate("template_index", aggregate_columns, method) + result["templates"] = [self.message_templates[i] for i in result["template_index"]] + return result + else: + return super().aggregate(groupby_column, aggregate_columns, method, custom_df=df) + def _get_state(self): state_params = { "experiment_cls_constructor": self.experiment_cls_constructor, From 37191aabfc71f4ea36f090e48abcdf395e293ffb Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 3 Jan 2024 11:39:17 -0500 Subject: [PATCH 29/52] Adding various harness to documentation --- docs/source/harness.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/source/harness.rst b/docs/source/harness.rst index 907debe2..8dcd1b3e 100644 --- a/docs/source/harness.rst +++ b/docs/source/harness.rst @@ -18,8 +18,14 @@ a corresponding experiment, and keeps track of the templates and inputs used for .. autoclass:: ChatModelComparisonHarness +.. autoclass:: ChatPromptTemplateExperimentationHarness + +.. autoclass:: ModelComparisonHarness + .. autoclass:: MultiExperimentHarness .. autoclass:: PromptTemplateExperimentationHarness +.. autoclass:: RetrievalAugmentedGenerationExperimentationHarness + .. autoclass:: SystemPromptExperimentationHarness From 9cf33192e32bc716ccce898295765e327e0eaa34 Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 3 Jan 2024 11:39:25 -0500 Subject: [PATCH 30/52] Updating harness examples --- ..._Loading_ExperimentHarness_to_Remote.ipynb | 300 +++++++++++++++--- 1 file changed, 251 insertions(+), 49 deletions(-) diff --git a/examples/notebooks/remote/Saving_and_Loading_ExperimentHarness_to_Remote.ipynb b/examples/notebooks/remote/Saving_and_Loading_ExperimentHarness_to_Remote.ipynb index 6d92cc01..c843f51b 100644 --- a/examples/notebooks/remote/Saving_and_Loading_ExperimentHarness_to_Remote.ipynb +++ b/examples/notebooks/remote/Saving_and_Loading_ExperimentHarness_to_Remote.ipynb @@ -81,14 +81,11 @@ " \n", " \n", " \n", + " template_index\n", + " templates\n", " user_inputs\n", " model\n", " messages\n", - " temperature\n", - " top_p\n", - " n\n", - " presence_penalty\n", - " frequency_penalty\n", " response\n", " response_usage\n", " latency\n", @@ -97,65 +94,65 @@ " \n", " \n", " 0\n", + " 0\n", + " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}]\n", " {'input': 'first'}\n", " gpt-3.5-turbo\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", - " 1.0\n", - " 1.0\n", - " 1\n", - " 0.0\n", - " 0.0\n", - " The first president of the United States was George Washington. He served two terms from 1789 to 1797.\n", - " {'completion_tokens': 24, 'prompt_tokens': 23, 'total_tokens': 47}\n", - " 1.251051\n", + " The first president of the United States was George Washington. He served from 1789 to 1797.\n", + " {'completion_tokens': 22, 'prompt_tokens': 23, 'total_tokens': 45}\n", + " 1.620099\n", " \n", " \n", " 1\n", + " 1\n", + " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}]\n", " {'input': 'second'}\n", " gpt-3.5-turbo\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the second president?'}]\n", - " 1.0\n", - " 1.0\n", - " 1\n", - " 0.0\n", - " 0.0\n", " The second president of the United States was John Adams. He served from 1797 to 1801.\n", " {'completion_tokens': 22, 'prompt_tokens': 23, 'total_tokens': 45}\n", - " 0.949733\n", + " 0.657733\n", " \n", " \n", " 2\n", + " 0\n", + " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}]\n", " {'input': 'first'}\n", " gpt-3.5-turbo\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first vice president?'}]\n", - " 1.0\n", - " 1.0\n", - " 1\n", - " 0.0\n", - " 0.0\n", - " The first Vice President of the United States was John Adams. He served as Vice President under President George Washington from 1789 to 1797.\n", - " {'completion_tokens': 30, 'prompt_tokens': 24, 'total_tokens': 54}\n", - " 0.936664\n", + " The first Vice President of the United States was John Adams. He served under President George Washington from 1789 to 1797.\n", + " {'completion_tokens': 27, 'prompt_tokens': 24, 'total_tokens': 51}\n", + " 0.809008\n", " \n", " \n", " 3\n", + " 1\n", + " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}]\n", " {'input': 'second'}\n", " gpt-3.5-turbo\n", " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the second vice president?'}]\n", - " 1.0\n", - " 1.0\n", - " 1\n", - " 0.0\n", - " 0.0\n", - " The second vice president of the United States was Thomas Jefferson. He served as vice president from 1797 to 1801 under President John Adams.\n", + " The second Vice President of the United States was Thomas Jefferson. He served as Vice President under President John Adams from 1797 to 1801.\n", " {'completion_tokens': 30, 'prompt_tokens': 24, 'total_tokens': 54}\n", - " 0.971230\n", + " 0.922675\n", " \n", " \n", "\n", "" ], "text/plain": [ + " template_index \\\n", + "0 0 \n", + "1 1 \n", + "2 0 \n", + "3 1 \n", + "\n", + " templates \\\n", + "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}] \n", + "1 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}] \n", + "2 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}] \n", + "3 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}] \n", + "\n", " user_inputs model \\\n", "0 {'input': 'first'} gpt-3.5-turbo \n", "1 {'input': 'second'} gpt-3.5-turbo \n", @@ -168,29 +165,23 @@ "2 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first vice president?'}] \n", "3 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the second vice president?'}] \n", "\n", - " temperature top_p n presence_penalty frequency_penalty \\\n", - "0 1.0 1.0 1 0.0 0.0 \n", - "1 1.0 1.0 1 0.0 0.0 \n", - "2 1.0 1.0 1 0.0 0.0 \n", - "3 1.0 1.0 1 0.0 0.0 \n", - "\n", " response \\\n", - "0 The first president of the United States was George Washington. He served two terms from 1789 to 1797. \n", + "0 The first president of the United States was George Washington. He served from 1789 to 1797. \n", "1 The second president of the United States was John Adams. He served from 1797 to 1801. \n", - "2 The first Vice President of the United States was John Adams. He served as Vice President under President George Washington from 1789 to 1797. \n", - "3 The second vice president of the United States was Thomas Jefferson. He served as vice president from 1797 to 1801 under President John Adams. \n", + "2 The first Vice President of the United States was John Adams. He served under President George Washington from 1789 to 1797. \n", + "3 The second Vice President of the United States was Thomas Jefferson. He served as Vice President under President John Adams from 1797 to 1801. \n", "\n", " response_usage \\\n", - "0 {'completion_tokens': 24, 'prompt_tokens': 23, 'total_tokens': 47} \n", + "0 {'completion_tokens': 22, 'prompt_tokens': 23, 'total_tokens': 45} \n", "1 {'completion_tokens': 22, 'prompt_tokens': 23, 'total_tokens': 45} \n", - "2 {'completion_tokens': 30, 'prompt_tokens': 24, 'total_tokens': 54} \n", + "2 {'completion_tokens': 27, 'prompt_tokens': 24, 'total_tokens': 51} \n", "3 {'completion_tokens': 30, 'prompt_tokens': 24, 'total_tokens': 54} \n", "\n", " latency \n", - "0 1.251051 \n", - "1 0.949733 \n", - "2 0.936664 \n", - "3 0.971230 " + "0 1.620099 \n", + "1 0.657733 \n", + "2 0.809008 \n", + "3 0.922675 " ] }, "metadata": {}, @@ -203,6 +194,217 @@ "harness.visualize()" ] }, + { + "cell_type": "markdown", + "id": "f246c958", + "metadata": {}, + "source": [ + "### Examples of `aggregation` API" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "88d81001", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
user_inputslatency
0((input, first),)1.214553
1((input, second),)0.790204
\n", + "
" + ], + "text/plain": [ + " user_inputs latency\n", + "0 ((input, first),) 1.214553\n", + "1 ((input, second),) 0.790204" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agg_df = harness.aggregate(groupby_column='user_inputs', aggregate_columns='latency', method='mean')\n", + "agg_df" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "349bec6c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
template_indexlatencytemplates
001.214553[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}]
110.790204[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}]
\n", + "
" + ], + "text/plain": [ + " template_index latency \\\n", + "0 0 1.214553 \n", + "1 1 0.790204 \n", + "\n", + " templates \n", + "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}] \n", + "1 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}] " + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agg_df = harness.aggregate(groupby_column='templates', aggregate_columns='latency', method='mean')\n", + "agg_df" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "18419516", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
template_indexlatencylatencytemplates
001.2145531.214553[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}]
110.7902040.790204[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}]
\n", + "
" + ], + "text/plain": [ + " template_index latency latency \\\n", + "0 0 1.214553 1.214553 \n", + "1 1 0.790204 0.790204 \n", + "\n", + " templates \n", + "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} president?'}] \n", + "1 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the {{input}} vice president?'}] " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agg_df = harness.aggregate(groupby_column='templates', aggregate_columns=['latency', 'latency'], method='mean')\n", + "agg_df" + ] + }, { "cell_type": "code", "execution_count": 4, From c4b5c7d34a4dad87769b624e778c8af65f2f7f6e Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 3 Jan 2024 11:45:37 -0500 Subject: [PATCH 31/52] Adding a note to `custom_df` in aggregate --- prompttools/harness/harness.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/prompttools/harness/harness.py b/prompttools/harness/harness.py index 9e101308..5632f7ed 100644 --- a/prompttools/harness/harness.py +++ b/prompttools/harness/harness.py @@ -104,6 +104,9 @@ def aggregate( groupby_column (str): aggregate_columns (Union[str, list[str]]): method (str): aggregation method (e.g., 'mean', 'sum', 'count', 'min', 'max', 'median', 'std', etc.) + custom_df (Optional[pd.DataFrame]): By default, this method uses `self.full_df`, however, a specific + subclass implementation can chooses to override this (mainly to make modification before or after + group by) """ if method not in ["mean", "sum", "count", "min", "max", "median", "std"]: raise ValueError(f"Unsupported aggregation method: {method}") From 493f825ddadfd6d33fa05ca9f7b04087eb26079e Mon Sep 17 00:00:00 2001 From: greydoubt <43443470+greydoubt@users.noreply.github.com> Date: Wed, 17 Jan 2024 10:01:53 -0800 Subject: [PATCH 32/52] slight cleanup (#120) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c0666d06..b95d01b7 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ Welcome to `prompttools` created by [Hegel AI](https://hegel-ai.com/)! This repo offers a set of open-source, self-hostable tools for experimenting with, testing, and evaluating LLMs, vector databases, and prompts. The core idea is to enable developers to evaluate using familiar interfaces like _code_, _notebooks_, and a local _playground_. -In just a few lines of codes, you can test your prompts and parameters across different models (whether you are using +In just a few lines of code, you can test your prompts and parameters across different models (whether you are using OpenAI, Anthropic, or LLaMA models). You can even evaluate the retrieval accuracy of vector databases. ```python From 68abe7a65b910ce952fccd87aa0f3a05c41b5ae7 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 25 Jan 2024 23:18:34 -0500 Subject: [PATCH 33/52] Fix repeat order in Chat Prompt Template Harness --- prompttools/harness/chat_prompt_template_harness.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/prompttools/harness/chat_prompt_template_harness.py b/prompttools/harness/chat_prompt_template_harness.py index 468b1872..a0db70c9 100644 --- a/prompttools/harness/chat_prompt_template_harness.py +++ b/prompttools/harness/chat_prompt_template_harness.py @@ -107,7 +107,10 @@ def run(self, clear_previous_results: bool = False): template_col_name = "templates" template_index_col_name = "template_index" template_df = pd.DataFrame( - {template_index_col_name: template_indices * repeat, template_col_name: templates * repeat} + { + template_index_col_name: [i for i in template_indices for _ in range(repeat)], + template_col_name: [s for s in templates for _ in range(repeat)], + } ) # Full DF if template_col_name in self.experiment.full_df.columns: From e314fa3ce9e11356f6f1e5e3f4e4addc580d83cf Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 25 Jan 2024 23:19:57 -0500 Subject: [PATCH 34/52] Add Mistral experiment --- .../experiments/mistral_experiment.py | 94 +++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 prompttools/experiment/experiments/mistral_experiment.py diff --git a/prompttools/experiment/experiments/mistral_experiment.py b/prompttools/experiment/experiments/mistral_experiment.py new file mode 100644 index 00000000..222fae73 --- /dev/null +++ b/prompttools/experiment/experiments/mistral_experiment.py @@ -0,0 +1,94 @@ +# Copyright (c) Hegel AI, Inc. +# All rights reserved. +# +# This source code's license can be found in the +# LICENSE file in the root directory of this source tree. + +import os +import requests +import json + +from typing import Optional + + +from .experiment import Experiment + + +class MistralChatCompletionExperiment(Experiment): + r""" + This class defines an experiment for Mistral's chatcompletion API. It accepts lists for each argument + passed into the API, then creates a cartesian product of those arguments, and gets results for each. + + Note: + - All arguments here should be a ``list``, even if you want to keep the argument frozen + (i.e. ``temperature=[1.0]``), because the experiment will try all possible combination + of the input arguments. + - You should set ``os.environ["MISTRAL_API_KEY"] = YOUR_KEY`` in order to connect with Mistral's API. + + Args: + model (list[str]): + the model(s) that will complete your prompt (e.g. "mistral-tiny") + + messages (list[str]): + Input prompts, encoded as a list of dict with role and content. + The first prompt role should be `user` or `system`. + + temperature (list[float], optional): + The amount of randomness injected into the response + + top_p (list[float], optional): + use nucleus sampling. + + max_tokens (list[int]): + The maximum number of tokens to generate in the completion.. + + stream (list[bool], optional): + Whether to incrementally stream the response using server-sent events. + + safe_prompt (list[bool]): + Whether to inject a safety prompt before all conversations. + + random_seed (list[int], optional): + The seed to use for random sampling. If set, different calls will generate deterministic results. + """ + + url = "https://api.mistral.ai/v1/chat/completions" + + def __init__( + self, + model: list[str], + messages: list[str], + temperature: list[float] = [None], + top_p: list[float] = [None], + max_tokens: list[Optional[int]] = [None], + stream: list[bool] = [False], + safe_prompt: list[bool] = [False], + random_seed: list[Optional[int]] = [None], + ): + self.completion_fn = self.mistral_completion_fn + + self.all_args = dict( + model=model, + messages=messages, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + safe_prompt=safe_prompt, + random_seed=random_seed, + ) + super().__init__() + + def mistral_completion_fn(self, **input_args): + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {os['MISTRAL_API_KEY']}"} + return requests.post(self.url, headers=headers, data=json.dumps(input_args)) + + @staticmethod + def _extract_responses(response: dict) -> list[str]: + return response["choices"][0]["message"]["content"] + + def _get_model_names(self): + return [combo["model"] for combo in self.argument_combos] + + def _get_prompts(self): + return [combo["messages"] for combo in self.argument_combos] From 6c6b2abf9032bd177416f23b2d3ea6dd11af10ec Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 25 Jan 2024 23:20:59 -0500 Subject: [PATCH 35/52] Adding Mistral to doc --- docs/source/experiment.rst | 2 ++ prompttools/experiment/__init__.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/source/experiment.rst b/docs/source/experiment.rst index 5229e777..d499556e 100644 --- a/docs/source/experiment.rst +++ b/docs/source/experiment.rst @@ -41,6 +41,8 @@ LLMs .. autoclass:: GoogleVertexChatCompletionExperiment +.. autoclass:: MistralChatCompletionExperiment + .. autoclass:: LlamaCppExperiment .. autoclass:: ReplicateExperiment diff --git a/prompttools/experiment/__init__.py b/prompttools/experiment/__init__.py index 70d8ca5d..26df82fc 100644 --- a/prompttools/experiment/__init__.py +++ b/prompttools/experiment/__init__.py @@ -16,6 +16,7 @@ from .experiments.chromadb_experiment import ChromaDBExperiment from .experiments.weaviate_experiment import WeaviateExperiment from .experiments.lancedb_experiment import LanceDBExperiment +from .experiments.mistral_experiment import MistralChatCompletionExperiment from .experiments.mindsdb_experiment import MindsDBExperiment from .experiments.langchain_experiment import SequentialChainExperiment, RouterChainExperiment from .experiments.stablediffusion_experiment import StableDiffusionExperiment @@ -32,6 +33,7 @@ "LanceDBExperiment", "LlamaCppExperiment", "HuggingFaceHubExperiment", + "MistralChatCompletionExperiment", "MindsDBExperiment", "OpenAIChatExperiment", "OpenAICompletionExperiment", From 10a8d7f3cdd6e4267db3a51812de66376f1a614b Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 26 Jan 2024 23:57:29 -0500 Subject: [PATCH 36/52] Adding Mistral example --- .../notebooks/MistralChatExperiment.ipynb | 293 ++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 examples/notebooks/MistralChatExperiment.ipynb diff --git a/examples/notebooks/MistralChatExperiment.ipynb b/examples/notebooks/MistralChatExperiment.ipynb new file mode 100644 index 00000000..9e6a56c4 --- /dev/null +++ b/examples/notebooks/MistralChatExperiment.ipynb @@ -0,0 +1,293 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0a13ddc8", + "metadata": {}, + "source": [ + "# Mistral Chat Experiment Example" + ] + }, + { + "cell_type": "markdown", + "id": "623f0cfe", + "metadata": {}, + "source": [ + "## Installations" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "885dabeb", + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install --quiet --force-reinstall prompttools" + ] + }, + { + "cell_type": "markdown", + "id": "2eac35f8", + "metadata": {}, + "source": [ + "## Setup imports and API keys" + ] + }, + { + "cell_type": "markdown", + "id": "5edba05a", + "metadata": {}, + "source": [ + "First, we'll need to set our API keys. If we are in DEBUG mode, we don't need to use a real OpenAI key, so for now we'll set them to empty strings." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "ed4e635e", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"MISTRAL_API_KEY\"] = \"\" # Insert your key here" + ] + }, + { + "cell_type": "markdown", + "id": "842f1e47", + "metadata": {}, + "source": [ + "Then we'll import the relevant `prompttools` modules to setup our experiment." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "beaa70a1", + "metadata": {}, + "outputs": [], + "source": [ + "from prompttools.experiment import MistralChatCompletionExperiment" + ] + }, + { + "cell_type": "markdown", + "id": "622dea9a", + "metadata": {}, + "source": [ + "## Run an experiment" + ] + }, + { + "cell_type": "markdown", + "id": "3babfe5a", + "metadata": {}, + "source": [ + "Next, we create our test inputs. We can iterate over models, inputs, and configurations like temperature." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9114cfbf", + "metadata": {}, + "outputs": [], + "source": [ + "models = [\"mistral-tiny\"]\n", + "messages = [\n", + " [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n", + " {\"role\": \"user\", \"content\": \"Who was the first president?\"},\n", + " ]\n", + "]\n", + "temperatures = [0.0, 1.0]\n", + "# You can add more parameters that you'd like to test here.\n", + "\n", + "experiment = MistralChatCompletionExperiment(models, messages, temperature=temperatures)" + ] + }, + { + "cell_type": "markdown", + "id": "f3fa5450", + "metadata": {}, + "source": [ + "We can then run the experiment to get results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83b33130", + "metadata": {}, + "outputs": [], + "source": [ + "experiment.run()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7598332b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
modelmessagestemperatureresponseresponse_usagelatency
0gpt-3.5-turbo[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]0.0George Washington{'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}0.000006
1gpt-3.5-turbo[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]1.0George Washington{'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}0.000005
2gpt-3.5-turbo-0613[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]0.0George Washington{'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}0.000003
3gpt-3.5-turbo-0613[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]1.0George Washington{'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}0.000002
\n", + "
" + ], + "text/plain": [ + " model \\\n", + "0 gpt-3.5-turbo \n", + "1 gpt-3.5-turbo \n", + "2 gpt-3.5-turbo-0613 \n", + "3 gpt-3.5-turbo-0613 \n", + "\n", + " messages \\\n", + "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", + "1 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", + "2 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", + "3 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", + "\n", + " temperature response \\\n", + "0 0.0 George Washington \n", + "1 1.0 George Washington \n", + "2 0.0 George Washington \n", + "3 1.0 George Washington \n", + "\n", + " response_usage \\\n", + "0 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "1 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "2 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "3 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + "\n", + " latency \n", + "0 0.000006 \n", + "1 0.000005 \n", + "2 0.000003 \n", + "3 0.000002 " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "experiment.visualize()" + ] + }, + { + "cell_type": "markdown", + "id": "266c13eb", + "metadata": {}, + "source": [ + "## Evaluate the model response" + ] + }, + { + "cell_type": "markdown", + "id": "bebb8023", + "metadata": {}, + "source": [ + "To evaluate the results, we'll define an eval function. We can use semantic distance to check if the model's response is similar to our expected output.\n", + "\n", + "Since we are using semantic similarity, you need to have the library `sentence_transformers` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78c0c9f6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 128534d3cedf91d2c9c01c1e38a8555ddf97bd24 Mon Sep 17 00:00:00 2001 From: Kevin Date: Sun, 28 Jan 2024 23:58:22 -0500 Subject: [PATCH 37/52] Fix example --- .../notebooks/MistralChatExperiment.ipynb | 72 +++++++++++++++++-- 1 file changed, 68 insertions(+), 4 deletions(-) diff --git a/examples/notebooks/MistralChatExperiment.ipynb b/examples/notebooks/MistralChatExperiment.ipynb index 9e6a56c4..c221ae5e 100644 --- a/examples/notebooks/MistralChatExperiment.ipynb +++ b/examples/notebooks/MistralChatExperiment.ipynb @@ -54,6 +54,70 @@ "os.environ[\"MISTRAL_API_KEY\"] = \"\" # Insert your key here" ] }, + { + "cell_type": "code", + "execution_count": 4, + "id": "155d2fcc", + "metadata": {}, + "outputs": [], + "source": [ + "from mistralai.client import MistralClient\n", + "from mistralai.models.chat_completion import ChatMessage\n", + "\n", + "\n", + "api_key = os.environ[\"MISTRAL_API_KEY\"]\n", + "model = \"mistral-tiny\"\n", + "\n", + "client = MistralClient(api_key=api_key)\n", + "\n", + "messages = [\n", + " ChatMessage(role=\"user\", content=\"What is the best French cheese?\")\n", + "]\n", + "\n", + "# No streaming\n", + "chat_response = client.chat(\n", + " model=model,\n", + " messages=messages,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "b5f86d60", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Determining the \"best\" French cheese is subjective as it depends on personal preferences. Some popular and highly regarded French cheeses include:\\n\\n1. Roquefort: A blue-veined cheese made from sheep\\'s milk. It is known for its strong, pungent, and tangy flavor.\\n2. Camembert: A soft, creamy cheese with a white rind and a rich, earthy taste. It is one of the most famous French cheeses.\\n3. Comté: A nutty, firm, and slightly sweet cheese made from unpasteurized cow\\'s milk. It is often compared to Swiss Emmenthal.\\n4. Brie de Meaux: A soft, creamy cheese with a white rind and a mild, buttery taste. It is considered the original Brie.\\n5. Munster: A smelly, soft, and runny cheese with a pungent aroma and a strong, savory flavor.\\n6. Reblochon: A soft, creamy cheese with a bloomy rind and a rich, milky, and slightly sweet taste.\\n7. Époisses: A strong-smelling, soft, and runny cheese with a pungent flavor and a velvety texture.\\n\\nEach of these cheeses has its unique characteristics and flavor profile, so it\\'s essential to try them to determine which one suits your taste preferences the best.'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chat_response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e4c8a211", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44170e69", + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "markdown", "id": "842f1e47", @@ -95,12 +159,12 @@ "metadata": {}, "outputs": [], "source": [ + "from mistralai.models.chat_completion import ChatMessage\n", + "\n", "models = [\"mistral-tiny\"]\n", "messages = [\n", - " [\n", - " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n", - " {\"role\": \"user\", \"content\": \"Who was the first president?\"},\n", - " ]\n", + " ChatMessage(role=\"system\", content=\"You are a helpful assistant.\"),\n", + " ChatMessage(role=\"user\", content=\"Who was the first president?\"),\n", "]\n", "temperatures = [0.0, 1.0]\n", "# You can add more parameters that you'd like to test here.\n", From c832b082ae0cab199888cca060ee8dd6e6ed8597 Mon Sep 17 00:00:00 2001 From: Kevin Date: Mon, 29 Jan 2024 00:06:17 -0500 Subject: [PATCH 38/52] Touch up implementation and example --- .../notebooks/MistralChatExperiment.ipynb | 166 +++++------------- .../experiments/mistral_experiment.py | 38 ++-- 2 files changed, 66 insertions(+), 138 deletions(-) diff --git a/examples/notebooks/MistralChatExperiment.ipynb b/examples/notebooks/MistralChatExperiment.ipynb index c221ae5e..5813afb8 100644 --- a/examples/notebooks/MistralChatExperiment.ipynb +++ b/examples/notebooks/MistralChatExperiment.ipynb @@ -44,7 +44,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 14, "id": "ed4e635e", "metadata": {}, "outputs": [], @@ -54,70 +54,6 @@ "os.environ[\"MISTRAL_API_KEY\"] = \"\" # Insert your key here" ] }, - { - "cell_type": "code", - "execution_count": 4, - "id": "155d2fcc", - "metadata": {}, - "outputs": [], - "source": [ - "from mistralai.client import MistralClient\n", - "from mistralai.models.chat_completion import ChatMessage\n", - "\n", - "\n", - "api_key = os.environ[\"MISTRAL_API_KEY\"]\n", - "model = \"mistral-tiny\"\n", - "\n", - "client = MistralClient(api_key=api_key)\n", - "\n", - "messages = [\n", - " ChatMessage(role=\"user\", content=\"What is the best French cheese?\")\n", - "]\n", - "\n", - "# No streaming\n", - "chat_response = client.chat(\n", - " model=model,\n", - " messages=messages,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "b5f86d60", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Determining the \"best\" French cheese is subjective as it depends on personal preferences. Some popular and highly regarded French cheeses include:\\n\\n1. Roquefort: A blue-veined cheese made from sheep\\'s milk. It is known for its strong, pungent, and tangy flavor.\\n2. Camembert: A soft, creamy cheese with a white rind and a rich, earthy taste. It is one of the most famous French cheeses.\\n3. Comté: A nutty, firm, and slightly sweet cheese made from unpasteurized cow\\'s milk. It is often compared to Swiss Emmenthal.\\n4. Brie de Meaux: A soft, creamy cheese with a white rind and a mild, buttery taste. It is considered the original Brie.\\n5. Munster: A smelly, soft, and runny cheese with a pungent aroma and a strong, savory flavor.\\n6. Reblochon: A soft, creamy cheese with a bloomy rind and a rich, milky, and slightly sweet taste.\\n7. Époisses: A strong-smelling, soft, and runny cheese with a pungent flavor and a velvety texture.\\n\\nEach of these cheeses has its unique characteristics and flavor profile, so it\\'s essential to try them to determine which one suits your taste preferences the best.'" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chat_response" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4c8a211", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "44170e69", - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", "id": "842f1e47", @@ -128,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 10, "id": "beaa70a1", "metadata": {}, "outputs": [], @@ -154,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 11, "id": "9114cfbf", "metadata": {}, "outputs": [], @@ -163,8 +99,10 @@ "\n", "models = [\"mistral-tiny\"]\n", "messages = [\n", - " ChatMessage(role=\"system\", content=\"You are a helpful assistant.\"),\n", - " ChatMessage(role=\"user\", content=\"Who was the first president?\"),\n", + " [ChatMessage(role=\"system\", content=\"You are a helpful assistant.\"),\n", + " ChatMessage(role=\"user\", content=\"Who was the first president?\"),],\n", + " [ChatMessage(role=\"system\", content=\"You are a helpful assistant. Keep your answer concise\"),\n", + " ChatMessage(role=\"user\", content=\"Who was the second president?\"),],\n", "]\n", "temperatures = [0.0, 1.0]\n", "# You can add more parameters that you'd like to test here.\n", @@ -182,8 +120,8 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "83b33130", + "execution_count": 12, + "id": "fbddc7fd", "metadata": {}, "outputs": [], "source": [ @@ -192,7 +130,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 13, "id": "7598332b", "metadata": {}, "outputs": [ @@ -217,85 +155,69 @@ " \n", " \n", " \n", - " model\n", - " messages\n", " temperature\n", + " messages\n", " response\n", - " response_usage\n", " latency\n", " \n", " \n", " \n", " \n", " 0\n", - " gpt-3.5-turbo\n", - " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", " 0.0\n", - " George Washington\n", - " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", - " 0.000006\n", + " [role='system' content='You are a helpful assistant.', role='user' content='Who was the first president?']\n", + " The first president of the United States was George Washington. He served two terms from April 30, 1789, to March 4, 1797. Washington played a crucial role in the founding of the United States and was unanimously chosen by the Electoral College to be the first president. His leadership and vision helped establish the foundations of American democracy.\n", + " 1.334489\n", " \n", " \n", " 1\n", - " gpt-3.5-turbo\n", - " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", " 1.0\n", - " George Washington\n", - " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", - " 0.000005\n", + " [role='system' content='You are a helpful assistant.', role='user' content='Who was the first president?']\n", + " The first president of the United States was George Washington. He served two terms from April 30, 1789, to March 4, 1797. Washington played a crucial role in the founding of the United States and is often referred to as the \"Father of His Country.\" His leadership during the American Revolution and his commitment to upholding the Constitution helped establish a sense of national identity and unity.\n", + " 0.920111\n", " \n", " \n", " 2\n", - " gpt-3.5-turbo-0613\n", - " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", " 0.0\n", - " George Washington\n", - " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", - " 0.000003\n", + " [role='system' content='You are a helpful assistant. Keep your answer concise', role='user' content='Who was the second president?']\n", + " The second president of the United States was John Adams, serving from 1797 to 1801. He succeeded George Washington and was followed by Thomas Jefferson. Adams is known for his role in the American Revolution and his diplomacy in Europe.\n", + " 0.628162\n", " \n", " \n", " 3\n", - " gpt-3.5-turbo-0613\n", - " [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}]\n", " 1.0\n", - " George Washington\n", - " {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75}\n", - " 0.000002\n", + " [role='system' content='You are a helpful assistant. Keep your answer concise', role='user' content='Who was the second president?']\n", + " The second president of the United States was John Adams, serving from 1797 to 1801. He succeeded George Washington and was preceded by Thomas Jefferson. Adams was a Founding Father and a leading figure in the American Revolution.\n", + " 0.608941\n", " \n", " \n", "\n", "" ], "text/plain": [ - " model \\\n", - "0 gpt-3.5-turbo \n", - "1 gpt-3.5-turbo \n", - "2 gpt-3.5-turbo-0613 \n", - "3 gpt-3.5-turbo-0613 \n", - "\n", - " messages \\\n", - "0 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", - "1 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", - "2 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", - "3 [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Who was the first president?'}] \n", + " temperature \\\n", + "0 0.0 \n", + "1 1.0 \n", + "2 0.0 \n", + "3 1.0 \n", "\n", - " temperature response \\\n", - "0 0.0 George Washington \n", - "1 1.0 George Washington \n", - "2 0.0 George Washington \n", - "3 1.0 George Washington \n", + " messages \\\n", + "0 [role='system' content='You are a helpful assistant.', role='user' content='Who was the first president?'] \n", + "1 [role='system' content='You are a helpful assistant.', role='user' content='Who was the first president?'] \n", + "2 [role='system' content='You are a helpful assistant. Keep your answer concise', role='user' content='Who was the second president?'] \n", + "3 [role='system' content='You are a helpful assistant. Keep your answer concise', role='user' content='Who was the second president?'] \n", "\n", - " response_usage \\\n", - "0 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", - "1 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", - "2 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", - "3 {'completion_tokens': 18, 'prompt_tokens': 57, 'total_tokens': 75} \n", + " response \\\n", + "0 The first president of the United States was George Washington. He served two terms from April 30, 1789, to March 4, 1797. Washington played a crucial role in the founding of the United States and was unanimously chosen by the Electoral College to be the first president. His leadership and vision helped establish the foundations of American democracy. \n", + "1 The first president of the United States was George Washington. He served two terms from April 30, 1789, to March 4, 1797. Washington played a crucial role in the founding of the United States and is often referred to as the \"Father of His Country.\" His leadership during the American Revolution and his commitment to upholding the Constitution helped establish a sense of national identity and unity. \n", + "2 The second president of the United States was John Adams, serving from 1797 to 1801. He succeeded George Washington and was followed by Thomas Jefferson. Adams is known for his role in the American Revolution and his diplomacy in Europe. \n", + "3 The second president of the United States was John Adams, serving from 1797 to 1801. He succeeded George Washington and was preceded by Thomas Jefferson. Adams was a Founding Father and a leading figure in the American Revolution. \n", "\n", " latency \n", - "0 0.000006 \n", - "1 0.000005 \n", - "2 0.000003 \n", - "3 0.000002 " + "0 1.334489 \n", + "1 0.920111 \n", + "2 0.628162 \n", + "3 0.608941 " ] }, "metadata": {}, @@ -319,9 +241,7 @@ "id": "bebb8023", "metadata": {}, "source": [ - "To evaluate the results, we'll define an eval function. We can use semantic distance to check if the model's response is similar to our expected output.\n", - "\n", - "Since we are using semantic similarity, you need to have the library `sentence_transformers` installed." + "We have many other examples of how you can evaluate the model's responses. Please reference the examples in the OpenAIChatExperiment notebook or AnthropicExperiment notebook." ] }, { diff --git a/prompttools/experiment/experiments/mistral_experiment.py b/prompttools/experiment/experiments/mistral_experiment.py index 222fae73..22b1fe08 100644 --- a/prompttools/experiment/experiments/mistral_experiment.py +++ b/prompttools/experiment/experiments/mistral_experiment.py @@ -5,8 +5,6 @@ # LICENSE file in the root directory of this source tree. import os -import requests -import json from typing import Optional @@ -14,6 +12,16 @@ from .experiment import Experiment +try: + import mistralai + from mistralai.client import MistralClient + from mistralai.models.chat_completion import ChatMessage +except ImportError: + mistralai = None + MistralClient = None + ChatMessage = None + + class MistralChatCompletionExperiment(Experiment): r""" This class defines an experiment for Mistral's chatcompletion API. It accepts lists for each argument @@ -29,9 +37,8 @@ class MistralChatCompletionExperiment(Experiment): model (list[str]): the model(s) that will complete your prompt (e.g. "mistral-tiny") - messages (list[str]): - Input prompts, encoded as a list of dict with role and content. - The first prompt role should be `user` or `system`. + messages (list[ChatMessage]): + Input prompts (using Mistral's Python library). The first prompt role should be `user` or `system`. temperature (list[float], optional): The amount of randomness injected into the response @@ -40,10 +47,7 @@ class MistralChatCompletionExperiment(Experiment): use nucleus sampling. max_tokens (list[int]): - The maximum number of tokens to generate in the completion.. - - stream (list[bool], optional): - Whether to incrementally stream the response using server-sent events. + The maximum number of tokens to generate in the completion. safe_prompt (list[bool]): Whether to inject a safety prompt before all conversations. @@ -61,10 +65,15 @@ def __init__( temperature: list[float] = [None], top_p: list[float] = [None], max_tokens: list[Optional[int]] = [None], - stream: list[bool] = [False], safe_prompt: list[bool] = [False], random_seed: list[Optional[int]] = [None], ): + if mistralai is None: + raise ModuleNotFoundError( + "Package `mistralai` is required to be installed to use this experiment." + "Please use `pip install mistralai` to install the package" + ) + self.client = MistralClient(api_key=os.environ["MISTRAL_API_KEY"]) self.completion_fn = self.mistral_completion_fn self.all_args = dict( @@ -73,19 +82,18 @@ def __init__( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - stream=stream, safe_prompt=safe_prompt, random_seed=random_seed, ) super().__init__() def mistral_completion_fn(self, **input_args): - headers = {"Content-Type": "application/json", "Authorization": f"Bearer {os['MISTRAL_API_KEY']}"} - return requests.post(self.url, headers=headers, data=json.dumps(input_args)) + response = self.client.chat(**input_args) + return response @staticmethod - def _extract_responses(response: dict) -> list[str]: - return response["choices"][0]["message"]["content"] + def _extract_responses(response) -> list[str]: + return response.choices[0].message.content def _get_model_names(self): return [combo["model"] for combo in self.argument_combos] From 133e02a1ea3abd717770cc4d28b395aaaedb1261 Mon Sep 17 00:00:00 2001 From: Kevin Date: Mon, 29 Jan 2024 00:11:07 -0500 Subject: [PATCH 39/52] Ignoring macOS file --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 335a567c..3ad58506 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,9 @@ prompttools/version.py # C extensions *.so +macOS +*/.DS_Store + # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore From a20a88ce06b4ad0404a6b9dd96585081304972f1 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 12:07:56 -0800 Subject: [PATCH 40/52] Adding logging test case and notebook --- examples/notebooks/remote/Logging.ipynb | 164 ++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 examples/notebooks/remote/Logging.ipynb diff --git a/examples/notebooks/remote/Logging.ipynb b/examples/notebooks/remote/Logging.ipynb new file mode 100644 index 00000000..d8b5726c --- /dev/null +++ b/examples/notebooks/remote/Logging.ipynb @@ -0,0 +1,164 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Remote Logging Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Logging with main client" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "from os.path import join, dirname\n", + "from pathlib import Path\n", + "from dotenv import load_dotenv\n", + "\n", + "# You can set your API Key\n", + "# os.environ[\"OPENAI_API_KEY\"] = \"\"\n", + "# os.environ[\"HEGELAI_API_KEY\"] = \"\"\n", + "\n", + "# You can also import in from your .env file\n", + "dotenv_path = join(Path(os.getcwd()).parent.parent.parent, \".env\") # \".../path_to_env/.env\"\n", + "load_dotenv(dotenv_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import prompttools.logger" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The first president of the United States was George Washington.\n" + ] + } + ], + "source": [ + "import openai\n", + "\n", + "model = \"gpt-3.5-turbo\"\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n", + " {\"role\": \"user\", \"content\": \"Who was the first president?\"},\n", + "]\n", + "\n", + "response = openai.chat.completions.create(model=model, messages=messages)\n", + "message = response.choices[0].message.content\n", + "\n", + "print(message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Logging with single client" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "prod\n" + ] + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Logging with main async client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Logging with single async client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From b04f1c2fc4d82346857e3073d02843a79b104e2f Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 17:04:31 -0800 Subject: [PATCH 41/52] Expand logging example --- examples/notebooks/remote/Logging.ipynb | 57 +++++++++++++++++-------- 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/examples/notebooks/remote/Logging.ipynb b/examples/notebooks/remote/Logging.ipynb index d8b5726c..5ced4a82 100644 --- a/examples/notebooks/remote/Logging.ipynb +++ b/examples/notebooks/remote/Logging.ipynb @@ -36,15 +36,22 @@ "from pathlib import Path\n", "from dotenv import load_dotenv\n", "\n", - "# You can set your API Key\n", + "# You can set your API Key as such\n", "# os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "# os.environ[\"HEGELAI_API_KEY\"] = \"\"\n", "\n", - "# You can also import in from your .env file\n", + "# You can also import your keys in from your .env file\n", "dotenv_path = join(Path(os.getcwd()).parent.parent.parent, \".env\") # \".../path_to_env/.env\"\n", "load_dotenv(dotenv_path)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following line of import enables you to start logging while using the normal OpenAI SDK." + ] + }, { "cell_type": "code", "execution_count": 2, @@ -54,6 +61,13 @@ "import prompttools.logger" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Passing arguments with keyword is STRONGLY recommended. Logging is done in the background without blocking your response." + ] + }, { "cell_type": "code", "execution_count": 3, @@ -76,6 +90,7 @@ " {\"role\": \"user\", \"content\": \"Who was the first president?\"},\n", "]\n", "\n", + "# Passing arguments with keyword is STRONGLY recommended\n", "response = openai.chat.completions.create(model=model, messages=messages)\n", "message = response.choices[0].message.content\n", "\n", @@ -86,24 +101,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Logging with single client" + "### Logging with client instances" ] }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "prod\n" - ] - } - ], - "source": [] - }, { "cell_type": "code", "execution_count": null, @@ -132,6 +132,27 @@ "### Logging with single async client" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, From 20231a70e831caca5b5469df1e03a299fd1778f7 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 17:05:07 -0800 Subject: [PATCH 42/52] Adding logging example for instance --- examples/notebooks/remote/Logging.ipynb | 41 +++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/examples/notebooks/remote/Logging.ipynb b/examples/notebooks/remote/Logging.ipynb index 5ced4a82..c128b0cb 100644 --- a/examples/notebooks/remote/Logging.ipynb +++ b/examples/notebooks/remote/Logging.ipynb @@ -104,12 +104,47 @@ "### Logging with client instances" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also initialize the client instance with `openai.OpenAI`. Again, logging is done in the background without blocking." + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "This is a test.\n" + ] + } + ], + "source": [ + "from openai import OpenAI\n", + "\n", + "client = OpenAI(\n", + " # This is the default and can be omitted\n", + " api_key=os.environ.get(\"OPENAI_API_KEY\"),\n", + ")\n", + "\n", + "response = client.chat.completions.create(\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": \"Say this is a test\",\n", + " }\n", + " ],\n", + " model=\"gpt-3.5-turbo\",\n", + ")\n", + "\n", + "message = response.choices[0].message.content\n", + "print(message)" + ] }, { "cell_type": "markdown", From 77c4d29646405965af56ccf6db1c08ab1b2ecc4d Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 17:05:25 -0800 Subject: [PATCH 43/52] Formatting --- test/test_logger.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_logger.py b/test/test_logger.py index 8e6b4793..8b0adcfc 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -6,7 +6,6 @@ if False: # Skipping this in CI - import openai import prompttools.logger # noqa: F401 Importing this line will monkey-patch `openai.chat.completions.create` From f358a9f3aa8db52146192ea064ee5db2c1bb5c17 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 17:07:25 -0800 Subject: [PATCH 44/52] Refactor logger `add_to_queue` --- prompttools/logger/logger.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/prompttools/logger/logger.py b/prompttools/logger/logger.py index c9cc70df..9990c629 100644 --- a/prompttools/logger/logger.py +++ b/prompttools/logger/logger.py @@ -37,12 +37,20 @@ def __init__(self): self.worker_thread.start() def add_feedback(self, log_id, metric_name, value): - self.feedback_queue.put({ - "log_id": log_id, - "key": metric_name, - "value": value - }) - + self.feedback_queue.put({"log_id": log_id, "key": metric_name, "value": value}) + + def add_to_queue(self, hegel_model: str, result: dict, input_parameters: dict, latency: float, log_id: str): + # TODO: Deal with other_args + self.data_queue.put( + { + "hegel_model": hegel_model, + "result": result, + "input_parameters": input_parameters, # TODO: Need to record `*args` + "latency": latency, + "log_id": log_id, + } + ) + def execute_and_add_to_queue(self, callable_func, **kwargs): if "hegel_model" in kwargs: hegel_model = kwargs["hegel_model"] @@ -53,15 +61,7 @@ def execute_and_add_to_queue(self, callable_func, **kwargs): result = callable_func(**kwargs) latency = perf_counter() - start log_id = str(uuid.uuid4()) - self.data_queue.put( - { - "hegel_model": hegel_model, - "result": result.model_dump_json(), - "input_parameters": json.dumps(kwargs), - "latency": latency, - "log_id": log_id, - } - ) + self.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id) result.log_id = log_id return result From 1ba5abe3c9a6fb35432dbc757bb8cdb89f225cdd Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 17:09:04 -0800 Subject: [PATCH 45/52] Accounting for `*args` in logger --- prompttools/logger/logger.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/prompttools/logger/logger.py b/prompttools/logger/logger.py index 9990c629..9cb94c27 100644 --- a/prompttools/logger/logger.py +++ b/prompttools/logger/logger.py @@ -39,7 +39,9 @@ def __init__(self): def add_feedback(self, log_id, metric_name, value): self.feedback_queue.put({"log_id": log_id, "key": metric_name, "value": value}) - def add_to_queue(self, hegel_model: str, result: dict, input_parameters: dict, latency: float, log_id: str): + def add_to_queue( + self, hegel_model: str, result: dict, input_parameters: dict, latency: float, log_id: str, other_args + ): # TODO: Deal with other_args self.data_queue.put( { @@ -51,17 +53,17 @@ def add_to_queue(self, hegel_model: str, result: dict, input_parameters: dict, l } ) - def execute_and_add_to_queue(self, callable_func, **kwargs): + def execute_and_add_to_queue(self, callable_func, *args, **kwargs): if "hegel_model" in kwargs: hegel_model = kwargs["hegel_model"] del kwargs["hegel_model"] else: hegel_model = None start = perf_counter() - result = callable_func(**kwargs) + result = callable_func(*args, **kwargs) latency = perf_counter() - start log_id = str(uuid.uuid4()) - self.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id) + self.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id, args) result.log_id = log_id return result From eda1b542919565fa41ea0d759619d851d95be616 Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 17:09:54 -0800 Subject: [PATCH 46/52] Logging instance --- prompttools/logger/logger.py | 39 ++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/prompttools/logger/logger.py b/prompttools/logger/logger.py index 9cb94c27..56c0ae26 100644 --- a/prompttools/logger/logger.py +++ b/prompttools/logger/logger.py @@ -115,13 +115,48 @@ def send_feedback_to_remote(self, feedback_data): except requests.exceptions.RequestException as e: print(f"Error sending feedback to Flask API: {e}") + sender = Logger() -# Monkey-patching + + +def logging_wrapper(original_fn): + def wrapped_function(*args, **kwargs): + # Call the original function with the provided arguments + + if "hegel_model" in kwargs: + hegel_model = kwargs["hegel_model"] + del kwargs["hegel_model"] + else: + hegel_model = None + start = perf_counter() + result = original_fn(*args, **kwargs) + latency = perf_counter() - start + log_id = str(uuid.uuid4()) + sender.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id, args) + result.log_id = log_id + return result + + return wrapped_function + + +# Monkey-patching main client try: openai.chat.completions.create = sender.wrap(openai.chat.completions.create) except Exception: + print("Error monkey-patching main client") print("You may need to add `OPENAI_API_KEY=''` to your `.env` file.") raise +# Monkey-patching client instance +try: + # This is working as of openai SDK version 1.11.1 + openai.resources.chat.completions.Completions.create = logging_wrapper( + openai.resources.chat.completions.Completions.create + ) +except Exception: + print("Error monkey-patch individual client.") + raise + + def add_feedback(*args): - sender.add_feedback(*args) \ No newline at end of file + sender.add_feedback(*args) From a42db4ec1670dfd00f7a3b0cb45b1880b3772cec Mon Sep 17 00:00:00 2001 From: Kevin Date: Thu, 8 Feb 2024 17:22:25 -0800 Subject: [PATCH 47/52] Switch to kwargs only --- examples/notebooks/remote/Logging.ipynb | 53 ++++++++++++++----------- prompttools/logger/logger.py | 22 +++++----- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/examples/notebooks/remote/Logging.ipynb b/examples/notebooks/remote/Logging.ipynb index c128b0cb..b849b80f 100644 --- a/examples/notebooks/remote/Logging.ipynb +++ b/examples/notebooks/remote/Logging.ipynb @@ -65,19 +65,24 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Passing arguments with keyword is STRONGLY recommended. Logging is done in the background without blocking your response." + "Passing arguments with keyword is REQUIRED. Logging is done in the background without blocking your response." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "The first president of the United States was George Washington.\n" + "ename": "TypeError", + "evalue": "create() takes 1 argument(s) but 2 were given", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[5], line 10\u001b[0m\n\u001b[1;32m 4\u001b[0m messages \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 5\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msystem\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are a helpful assistant.\u001b[39m\u001b[38;5;124m\"\u001b[39m},\n\u001b[1;32m 6\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWho was the first president?\u001b[39m\u001b[38;5;124m\"\u001b[39m},\n\u001b[1;32m 7\u001b[0m ]\n\u001b[1;32m 9\u001b[0m \u001b[38;5;66;03m# Passing arguments with keyword is STRONGLY recommended\u001b[39;00m\n\u001b[0;32m---> 10\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mopenai\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompletions\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmessages\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m message \u001b[38;5;241m=\u001b[39m response\u001b[38;5;241m.\u001b[39mchoices[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;241m.\u001b[39mmessage\u001b[38;5;241m.\u001b[39mcontent\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28mprint\u001b[39m(message)\n", + "File \u001b[0;32m~/miniconda3/envs/ptools/lib/python3.11/site-packages/openai/_utils/_utils.py:246\u001b[0m, in \u001b[0;36mrequired_args..inner..wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 244\u001b[0m given_params\u001b[38;5;241m.\u001b[39madd(positional[i])\n\u001b[1;32m 245\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mIndexError\u001b[39;00m:\n\u001b[0;32m--> 246\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 247\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m() takes \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(positional)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m argument(s) but \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(args)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m were given\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 248\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 250\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m key \u001b[38;5;129;01min\u001b[39;00m kwargs\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m 251\u001b[0m given_params\u001b[38;5;241m.\u001b[39madd(key)\n", + "\u001b[0;31mTypeError\u001b[0m: create() takes 1 argument(s) but 2 were given" ] } ], @@ -90,8 +95,8 @@ " {\"role\": \"user\", \"content\": \"Who was the first president?\"},\n", "]\n", "\n", - "# Passing arguments with keyword is STRONGLY recommended\n", - "response = openai.chat.completions.create(model=model, messages=messages)\n", + "# Passing arguments with keyword is REQUIRED\n", + "response = openai.chat.completions.create(model, messages=messages)\n", "message = response.choices[0].message.content\n", "\n", "print(message)" @@ -150,21 +155,28 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Logging with main async client" + "### Logging with async client instance\n" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", + "execution_count": 7, "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(1, 2)\n" + ] + } + ], "source": [ - "### Logging with single async client" + "def test_fn(*args):\n", + " print(args)\n", + "\n", + "\n", + "test_fn(1,2)" ] }, { @@ -181,13 +193,6 @@ "outputs": [], "source": [] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "code", "execution_count": null, diff --git a/prompttools/logger/logger.py b/prompttools/logger/logger.py index 56c0ae26..3b026513 100644 --- a/prompttools/logger/logger.py +++ b/prompttools/logger/logger.py @@ -40,30 +40,34 @@ def add_feedback(self, log_id, metric_name, value): self.feedback_queue.put({"log_id": log_id, "key": metric_name, "value": value}) def add_to_queue( - self, hegel_model: str, result: dict, input_parameters: dict, latency: float, log_id: str, other_args + self, + hegel_model: str, + result: dict, + input_parameters: dict, + latency: float, + log_id: str, ): - # TODO: Deal with other_args self.data_queue.put( { "hegel_model": hegel_model, "result": result, - "input_parameters": input_parameters, # TODO: Need to record `*args` + "input_parameters": input_parameters, "latency": latency, "log_id": log_id, } ) - def execute_and_add_to_queue(self, callable_func, *args, **kwargs): + def execute_and_add_to_queue(self, callable_func, **kwargs): if "hegel_model" in kwargs: hegel_model = kwargs["hegel_model"] del kwargs["hegel_model"] else: hegel_model = None start = perf_counter() - result = callable_func(*args, **kwargs) + result = callable_func(**kwargs) latency = perf_counter() - start log_id = str(uuid.uuid4()) - self.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id, args) + self.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id) result.log_id = log_id return result @@ -120,7 +124,7 @@ def send_feedback_to_remote(self, feedback_data): def logging_wrapper(original_fn): - def wrapped_function(*args, **kwargs): + def wrapped_function(**kwargs): # Call the original function with the provided arguments if "hegel_model" in kwargs: @@ -129,10 +133,10 @@ def wrapped_function(*args, **kwargs): else: hegel_model = None start = perf_counter() - result = original_fn(*args, **kwargs) + result = original_fn(**kwargs) latency = perf_counter() - start log_id = str(uuid.uuid4()) - sender.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id, args) + sender.add_to_queue(hegel_model, result.model_dump_json(), json.dumps(kwargs), latency, log_id) result.log_id = log_id return result From 8f228f7a13818b48c3444962a47999fbd0a868b1 Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 9 Feb 2024 15:57:30 -0800 Subject: [PATCH 48/52] Adding Google Gemini chat experiment --- .../google_gemini_chat_experiment.py | 83 +++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 prompttools/experiment/experiments/google_gemini_chat_experiment.py diff --git a/prompttools/experiment/experiments/google_gemini_chat_experiment.py b/prompttools/experiment/experiments/google_gemini_chat_experiment.py new file mode 100644 index 00000000..fecd64cd --- /dev/null +++ b/prompttools/experiment/experiments/google_gemini_chat_experiment.py @@ -0,0 +1,83 @@ +# Copyright (c) Hegel AI, Inc. +# All rights reserved. +# +# This source code's license can be found in the +# LICENSE file in the root directory of this source tree. + +try: + import google.generativeai as genai + from google.generativeai.types import content_types + from google.generativeai.types import generation_types + from google.generativeai.types import safety_types +except ImportError: + genai = None + content_types, generation_types, safety_types = None, None, None + + +from .experiment import Experiment +from typing import Optional +import copy + + +class GoogleGeminiChatCompletionExperiment(Experiment): + r""" + This class defines an experiment for Google GenAI's chat API. It accepts lists for each argument + passed into Vertex AI's API, then creates a cartesian product of those arguments, and gets results for each. + + Note: + - All arguments here should be a ``list``, even if you want to keep the argument frozen + (i.e. ``temperature=[1.0]``), because the experiment will try all possible combination + of the input arguments. + - You need to set up your Google Vertex AI credentials properly before executing this experiment. One option + is to execute on Google Cloud's Colab. + + Args: + model (list[str]): Which model to call, as a string or a ``types.Model`` (e.g. ``'models/text-bison-001'``). + + contents (list[content_types]): Message for the chat model to respond. + + generation_config (list[generation_types]): Configurations for the generation of the model. + + safety_settings (list[safety_types]): Configurations for the safety features of the model. + """ + + def __init__( + self, + model: list[str], + contents: list[content_types.ContentsType], + generation_config: list[Optional[generation_types.GenerationConfigType]] = [None], + safety_settings: list[Optional[safety_types.SafetySettingOptions]] = [None], + ): + if genai is None: + raise ModuleNotFoundError( + "Package `google-generativeai` is required to be installed to use Google GenAI API in this experiment." + "Please use `pip install google-generativeai` to install the package or run this in Google Colab." + ) + + self.completion_fn = self.google_text_completion_fn + + self.all_args = dict( + model=model, + contents=contents, + generation_config=generation_config, + safety_settings=safety_settings, + ) + super().__init__() + + def google_text_completion_fn(self, **input_args): + params = copy.deepcopy(input_args) + model = genai.GenerativeModel(input_args["model"]) + del params["model"] + response = model.generate_content(**params) + return response + + @staticmethod + def _extract_responses(response) -> list[str]: + # `response.text` will return the top response + return response.text + + def _get_model_names(self): + return [combo["model"] for combo in self.argument_combos] + + def _get_prompts(self): + return [combo["message"] for combo in self.argument_combos] From 7f957a7cefd3bd3b920ed8639901c79be04005b6 Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 9 Feb 2024 15:57:51 -0800 Subject: [PATCH 49/52] Adding Google Gemini chat experiment (colab) notebook example --- .../GoogleGeminiChatExperiment.ipynb | 173 ++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 examples/notebooks/GoogleGeminiChatExperiment.ipynb diff --git a/examples/notebooks/GoogleGeminiChatExperiment.ipynb b/examples/notebooks/GoogleGeminiChatExperiment.ipynb new file mode 100644 index 00000000..3415baff --- /dev/null +++ b/examples/notebooks/GoogleGeminiChatExperiment.ipynb @@ -0,0 +1,173 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Google Gemini Chat Experiment Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install --quiet --force-reinstall prompttools" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup imports and API keys" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In order for the Google GenAI API to work, you must set up your Google AI Studio credentials (one example in the following cell) or execute this experiment on Google Colab.\n", + "\n", + "Executing on Google Colab may require the least amount of set-up." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import google.generativeai as genai\n", + "\n", + "from google.colab import userdata\n", + "\n", + "GOOGLE_API_KEY = \"\" # You can manually set your key\n", + "# GOOGLE_API_KEY = userdata.get('GOOGLE_API_KEY') # Or, you can read it from your account\n", + "\n", + "genai.configure(api_key=GOOGLE_API_KEY)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once you succeed in setting up your credential, you should be able to execute the following cell without error and see the list of models you have access to." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for m in genai.list_models():\n", + " if 'generateContent' in m.supported_generation_methods:\n", + " print(m.name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we'll import the relevant `prompttools` modules to setup our experiment." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ExecuteTime": { + "end_time": "2023-07-28T21:15:15.360723Z", + "start_time": "2023-07-28T21:15:15.230441Z" + }, + "collapsed": true, + "jupyter": { + "outputs_hidden": true + } + }, + "source": [ + "## Run an experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we create our test inputs. We can iterate over models (`\"gemini-pro\"` in this case, you can also use the ultra model if you have access to it), contents (equivalent of prompt). You can also experiment with configurations like temperature using `generation_config` or `safety_settings`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "from prompttools.experiment import GoogleVertexChatCompletionExperiment\n", + "\n", + "model = ['gemini-pro']\n", + "contents = [\"What is the meaning of life?\", \"Who was the first president?\"]\n", + "\n", + "experiment = GoogleVertexChatCompletionExperiment(model=model, contents=contents)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "experiment.run()\n", + "experiment.visualize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluate the model response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Please reference other notebooks (such as Google PaLM 2, Anthropic) for detailed evaluation of the model's response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 10dd707735c8890844c1004d088a9c8dcec4de2e Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 9 Feb 2024 15:58:06 -0800 Subject: [PATCH 50/52] Adding Google Gemini chat to doc and import --- docs/source/experiment.rst | 2 ++ prompttools/experiment/__init__.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/source/experiment.rst b/docs/source/experiment.rst index d499556e..ce64b00e 100644 --- a/docs/source/experiment.rst +++ b/docs/source/experiment.rst @@ -37,6 +37,8 @@ LLMs .. autoclass:: HuggingFaceHubExperiment +.. autoclass:: GoogleGeminiChatCompletionExperiment + .. autoclass:: GooglePaLMCompletionExperiment .. autoclass:: GoogleVertexChatCompletionExperiment diff --git a/prompttools/experiment/__init__.py b/prompttools/experiment/__init__.py index 26df82fc..6f712f91 100644 --- a/prompttools/experiment/__init__.py +++ b/prompttools/experiment/__init__.py @@ -10,6 +10,7 @@ from .experiments.openai_completion_experiment import OpenAICompletionExperiment from .experiments.anthropic_completion_experiment import AnthropicCompletionExperiment from .experiments.huggingface_hub_experiment import HuggingFaceHubExperiment +from .experiments.google_gemini_chat_experiment import GoogleGeminiChatCompletionExperiment from .experiments.google_palm_experiment import GooglePaLMCompletionExperiment from .experiments.google_vertex_chat_experiment import GoogleVertexChatCompletionExperiment from .experiments.llama_cpp_experiment import LlamaCppExperiment @@ -28,6 +29,7 @@ "AnthropicCompletionExperiment", "ChromaDBExperiment", "Experiment", + "GoogleGeminiChatCompletionExperiment", "GooglePaLMCompletionExperiment", "GoogleVertexChatCompletionExperiment", "LanceDBExperiment", From 714b046ff251762c7123edeb4c4ff10472e57130 Mon Sep 17 00:00:00 2001 From: Kevin Date: Fri, 9 Feb 2024 15:59:19 -0800 Subject: [PATCH 51/52] Updating README supported models --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b95d01b7..f5707bd0 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,9 @@ LLMs - LLaMA.Cpp (LLaMA 1, LLaMA 2) - **Supported** - HuggingFace (Hub API, Inference Endpoints) - **Supported** - Anthropic - **Supported** -- Google PaLM - **Supported** +- Mistral AI - **Supported** +- Google Gemini - **Supported** +- Google PaLM (legacy) - **Supported** - Google Vertex AI - **Supported** - Azure OpenAI Service - **Supported** - Replicate - **Supported** From 5a807328435d269d7ed17b53f86283e116e08244 Mon Sep 17 00:00:00 2001 From: Kevin Date: Sat, 10 Feb 2024 16:09:50 -0800 Subject: [PATCH 52/52] Fix typing --- .../experiment/experiments/google_gemini_chat_experiment.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prompttools/experiment/experiments/google_gemini_chat_experiment.py b/prompttools/experiment/experiments/google_gemini_chat_experiment.py index fecd64cd..60a4c627 100644 --- a/prompttools/experiment/experiments/google_gemini_chat_experiment.py +++ b/prompttools/experiment/experiments/google_gemini_chat_experiment.py @@ -44,9 +44,9 @@ class GoogleGeminiChatCompletionExperiment(Experiment): def __init__( self, model: list[str], - contents: list[content_types.ContentsType], - generation_config: list[Optional[generation_types.GenerationConfigType]] = [None], - safety_settings: list[Optional[safety_types.SafetySettingOptions]] = [None], + contents: list["content_types.ContentsType"], + generation_config: list[Optional["generation_types.GenerationConfigType"]] = [None], + safety_settings: list[Optional["safety_types.SafetySettingOptions"]] = [None], ): if genai is None: raise ModuleNotFoundError(