From f5c72fc40ae44f845b04c7c5432edba94a2197d1 Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 11:05:54 -0600 Subject: [PATCH 01/10] chore: add isort, mypy, prettier --- .pre-commit-config.yaml | 58 +++++++++++++++++++++++++++++++---------- requirements.txt | 2 ++ tox.ini | 6 +++++ 3 files changed, 52 insertions(+), 14 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 93ec654d..13257020 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,10 +2,30 @@ default_language_version: # default language version for each language python: python3.11 repos: + - repo: https://github.com/pre-commit/mirrors-eslint + rev: v8.54.0 + hooks: + - id: eslint + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v3.1.0 + hooks: + - id: prettier - repo: https://github.com/psf/black - rev: 23.10.1 + rev: 23.11.0 + hooks: + - id: black + - repo: https://github.com/PyCQA/flake8 + rev: 6.1.0 + hooks: + - id: flake8 + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.7.0 hooks: - - id: black + - id: mypy - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: @@ -26,7 +46,7 @@ repos: - id: trailing-whitespace - id: check-yaml - repo: https://github.com/gruntwork-io/pre-commit - rev: v0.1.22 # Get the latest from: https://github.com/gruntwork-io/pre-commit/releases + rev: v0.1.23 # Get the latest from: https://github.com/gruntwork-io/pre-commit/releases hooks: - id: terraform-fmt - id: helmlint @@ -36,18 +56,28 @@ repos: # - id: yapf # - id: markdown-link-check - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook - rev: v9.5.0 + rev: v9.9.0 hooks: - id: commitlint stages: [commit-msg] - additional_dependencies: ['@commitlint/config-angular'] + additional_dependencies: ["@commitlint/config-angular"] ci: - # for more information, see https://pre-commit.ci - autofix_commit_msg: | - [pre-commit.ci] auto fixes from pre-commit.com hooks - autofix_prs: true - autoupdate_branch: '' - autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' - autoupdate_schedule: weekly - skip: [terraform-fmt, helmlint, terraform-validate, tflint, shellcheck, yapf, markdown-link-check, commitlint] - submodules: false + # for more information, see https://pre-commit.ci + autofix_commit_msg: | + [pre-commit.ci] auto fixes from pre-commit.com hooks + autofix_prs: true + autoupdate_branch: "" + autoupdate_commit_msg: "[pre-commit.ci] pre-commit autoupdate" + autoupdate_schedule: weekly + skip: + [ + terraform-fmt, + helmlint, + terraform-validate, + tflint, + shellcheck, + yapf, + markdown-link-check, + commitlint, + ] + submodules: false diff --git a/requirements.txt b/requirements.txt index 907b9f01..d9a199c5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,8 @@ black==23.11.0 flake8==6.1.0 flake8-coding==1.3.2 pre-commit==3.5.0 +isort==5.12.0 +mypy==1.7.0 # production # ------------ diff --git a/tox.ini b/tox.ini index 44742ae3..24a61b09 100644 --- a/tox.ini +++ b/tox.ini @@ -4,6 +4,12 @@ envlist = py3.11,flake8 skip_missing_interpreters = true +[isort] +skip =venv,node_modules,api/terraform/python/lambda_openai/lambda_dist_pkg,api/terraform/python/lambda_openai/venv + +[mypy] +files = api/terraform/python/ + [gh-actions] python = 3.8: gitlint,py38,flake8 From b380d3228ec3bca37648bcb8c63e48297e773e15 Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 11:26:46 -0600 Subject: [PATCH 02/10] chore: lint with mypy and isort --- .pre-commit-config.yaml | 5 +- .../python/lambda_langchain/lambda_handler.py | 70 ++++++++----------- .../python/lambda_langchain/tests/test_01.py | 8 ++- .../python/lambda_openai/lambda_handler.py | 38 +++------- .../python/lambda_openai/tests/test_init.py | 6 +- .../python/lambda_openai_v2/lambda_handler.py | 23 +++--- .../python/lambda_openai_v2/tests/__init__.py | 4 +- .../python/lambda_openai_v2/tests/test_01.py | 8 ++- .../python/layer_genai/openai_utils/const.py | 2 + .../python/layer_genai/openai_utils/setup.py | 4 +- .../python/layer_genai/openai_utils/utils.py | 16 ++--- pyproject.toml | 23 ++++++ tox.ini | 5 +- 13 files changed, 100 insertions(+), 112 deletions(-) create mode 100644 pyproject.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 13257020..31e73f6b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,10 +22,7 @@ repos: rev: 5.12.0 hooks: - id: isort - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.0 - hooks: - - id: mypy + args: ["--settings-path=pyproject.toml"] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: diff --git a/api/terraform/python/lambda_langchain/lambda_handler.py b/api/terraform/python/lambda_langchain/lambda_handler.py index 6537405f..0fb4a913 100644 --- a/api/terraform/python/lambda_langchain/lambda_handler.py +++ b/api/terraform/python/lambda_langchain/lambda_handler.py @@ -15,56 +15,58 @@ https://python.langchain.com/docs/integrations/memory/aws_dynamodb https://bobbyhadz.com/blog/react-generate-unique-id """ -import os import json -from dotenv import load_dotenv, find_dotenv +import os # OpenAI imports import openai +from dotenv import find_dotenv, load_dotenv +from langchain.chains import LLMChain # Langchain imports from langchain.chat_models import ChatOpenAI -from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain.prompts import ( ChatPromptTemplate, + HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, - HumanMessagePromptTemplate, ) -# from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory -# from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage -# from langchain.schema.messages import BaseMessage - # local imports from 'layer_genai' virtual environment or AWS Lambda layer. from openai_utils.const import ( - OpenAIEndPoint, - OpenAIMessageKeys, - HTTP_RESPONSE_OK, HTTP_RESPONSE_BAD_REQUEST, HTTP_RESPONSE_INTERNAL_SERVER_ERROR, + HTTP_RESPONSE_OK, VALID_CHAT_COMPLETION_MODELS, VALID_EMBEDDING_MODELS, + OpenAIEndPoint, + OpenAIMessageKeys, ) from openai_utils.utils import ( - http_response_factory, - exception_response_factory, dump_environment, - get_request_body, - parse_request, + exception_response_factory, get_content_for_role, get_message_history, get_messages_for_role, + get_request_body, + http_response_factory, + parse_request, ) from openai_utils.validators import ( - validate_item, - validate_request_body, - validate_messages, validate_completion_request, validate_embedding_request, + validate_item, + validate_messages, + validate_request_body, ) + +# from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory +# from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage +# from langchain.schema.messages import BaseMessage + + ############################################################################### # ENVIRONMENT CREDENTIALS ############################################################################### @@ -108,9 +110,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No # ---------------------------------------------------------------------- request_body = get_request_body(event=event) validate_request_body(request_body=request_body) - end_point, model, messages, input_text, temperature, max_tokens = parse_request( - request_body - ) + end_point, model, messages, input_text, temperature, max_tokens = parse_request(request_body) validate_messages(request_body=request_body) request_meta_data = { "request_meta_data": { @@ -138,18 +138,12 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No item_type="ChatCompletion models", ) validate_completion_request(request_body) - system_message = get_content_for_role( - messages, OpenAIMessageKeys.OPENAI_SYSTEM_MESSAGE_KEY - ) - user_message = get_content_for_role( - messages, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY - ) + system_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_SYSTEM_MESSAGE_KEY) + user_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY) # 2. initialize the LangChain ChatOpenAI model # ------------------------------------------------------------- - llm = ChatOpenAI( - model=model, temperature=temperature, max_tokens=max_tokens - ) + llm = ChatOpenAI(model=model, temperature=temperature, max_tokens=max_tokens) prompt = ChatPromptTemplate( messages=[ SystemMessagePromptTemplate.from_template(system_message), @@ -165,9 +159,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No return_messages=True, ) message_history = get_message_history(messages) - user_messages = get_messages_for_role( - message_history, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY - ) + user_messages = get_messages_for_role(message_history, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY) assistant_messages = get_messages_for_role( message_history, OpenAIMessageKeys.OPENAI_ASSISTANT_MESSAGE_KEY ) @@ -211,9 +203,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No openai_results = openai.Moderation.create(input=input_text) case OpenAIEndPoint.Models: - openai_results = ( - openai.Model.retrieve(model) if model else openai.Model.list() - ) + openai_results = openai.Model.retrieve(model) if model else openai.Model.list() case OpenAIEndPoint.Audio: raise NotImplementedError("Audio support is coming soon") @@ -221,9 +211,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No # handle anything that went wrong except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request - return http_response_factory( - status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e) - ) + return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) except (openai.OpenAIError, Exception) as e: # 500 Internal Server Error return http_response_factory( @@ -232,6 +220,4 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No ) # success!! return the results - return http_response_factory( - status_code=HTTP_RESPONSE_OK, body={**openai_results, **request_meta_data} - ) + return http_response_factory(status_code=HTTP_RESPONSE_OK, body={**openai_results, **request_meta_data}) diff --git a/api/terraform/python/lambda_langchain/tests/test_01.py b/api/terraform/python/lambda_langchain/tests/test_01.py index 5529446b..04248c31 100644 --- a/api/terraform/python/lambda_langchain/tests/test_01.py +++ b/api/terraform/python/lambda_langchain/tests/test_01.py @@ -2,11 +2,13 @@ """ Test requests to the OpenAI API via Langchain using the Lambda Layer, 'genai'. """ -import pytest import os -from dotenv import load_dotenv, find_dotenv -from lambda_langchain.tests.init import get_event + +import pytest +from dotenv import find_dotenv, load_dotenv from lambda_langchain.lambda_handler import handler +from lambda_langchain.tests.init import get_event + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() diff --git a/api/terraform/python/lambda_openai/lambda_handler.py b/api/terraform/python/lambda_openai/lambda_handler.py index 263cc96f..ef0ddb0e 100644 --- a/api/terraform/python/lambda_openai/lambda_handler.py +++ b/api/terraform/python/lambda_openai/lambda_handler.py @@ -44,12 +44,14 @@ import base64 import json # library for interacting with JSON data https://www.json.org/json-en.html -import openai import os # library for interacting with the operating system import platform # library to view informatoin about the server host this Lambda runs on import sys # libraries for error management import traceback # libraries for error management +import openai + + DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t") HTTP_RESPONSE_OK = 200 HTTP_RESPONSE_BAD_REQUEST = 400 @@ -108,11 +110,7 @@ def http_response_factory(status_code: int, body) -> dict: see https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html """ if status_code < 100 or status_code > 599: - raise ValueError( - "Invalid HTTP response code received: {status_code}".format( - status_code=status_code - ) - ) + raise ValueError("Invalid HTTP response code received: {status_code}".format(status_code=status_code)) retval = { "isBase64Encoded": False, @@ -165,21 +163,11 @@ def validate_messages(request_body): raise ValueError("dict key 'messages' should be a JSON list") for message in messages: if type(message) is not dict: - raise ValueError( - "invalid ojbect type {t} found in messages list".format(t=type(message)) - ) + raise ValueError("invalid ojbect type {t} found in messages list".format(t=type(message))) if "role" not in message: - raise ValueError( - "dict key 'role' not found in message {m}".format( - m=json.dumps(message, indent=4) - ) - ) + raise ValueError("dict key 'role' not found in message {m}".format(m=json.dumps(message, indent=4))) if "content" not in message: - raise ValueError( - "dict key 'content' not found in message {m}".format( - m=json.dumps(message, indent=4) - ) - ) + raise ValueError("dict key 'content' not found in message {m}".format(m=json.dumps(message, indent=4))) def validate_completion_request(request_body) -> None: @@ -294,9 +282,7 @@ def handler(event, context): item_type="ChatCompletion models", ) validate_completion_request(request_body) - openai_results = openai.ChatCompletion.create( - model=model, messages=messages - ) + openai_results = openai.ChatCompletion.create(model=model, messages=messages) case OpenAIEndPoint.Embedding: # https://platform.openai.com/docs/guides/embeddings/embeddings @@ -319,9 +305,7 @@ def handler(event, context): openai_results = openai.Moderation.create(input=input_text) case OpenAIEndPoint.Models: - openai_results = ( - openai.Model.retrieve(model) if model else openai.Model.list() - ) + openai_results = openai.Model.retrieve(model) if model else openai.Model.list() case OpenAIEndPoint.Audio: raise NotImplementedError("Audio support is coming soon") @@ -329,9 +313,7 @@ def handler(event, context): # handle anything that went wrong except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request - return http_response_factory( - status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e) - ) + return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) except (openai.OpenAIError, Exception) as e: # 500 Internal Server Error return http_response_factory( diff --git a/api/terraform/python/lambda_openai/tests/test_init.py b/api/terraform/python/lambda_openai/tests/test_init.py index a7899582..859d97ca 100644 --- a/api/terraform/python/lambda_openai/tests/test_init.py +++ b/api/terraform/python/lambda_openai/tests/test_init.py @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- """Shared code for testing the lambda function""" -from dotenv import load_dotenv, find_dotenv -import os import json +import os + +from dotenv import find_dotenv, load_dotenv from lambda_openai.lambda_handler import handler + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() if os.path.exists(dotenv_path): diff --git a/api/terraform/python/lambda_openai_v2/lambda_handler.py b/api/terraform/python/lambda_openai_v2/lambda_handler.py index a53f1c8f..411e7af8 100644 --- a/api/terraform/python/lambda_openai_v2/lambda_handler.py +++ b/api/terraform/python/lambda_openai_v2/lambda_handler.py @@ -31,26 +31,27 @@ # ----------------------- import openai from openai_utils.const import ( - OpenAIEndPoint, - HTTP_RESPONSE_OK, HTTP_RESPONSE_BAD_REQUEST, HTTP_RESPONSE_INTERNAL_SERVER_ERROR, + HTTP_RESPONSE_OK, VALID_CHAT_COMPLETION_MODELS, VALID_EMBEDDING_MODELS, + OpenAIEndPoint, ) from openai_utils.utils import ( - http_response_factory, - exception_response_factory, dump_environment, + exception_response_factory, get_request_body, + http_response_factory, parse_request, ) from openai_utils.validators import ( - validate_item, validate_completion_request, validate_embedding_request, + validate_item, ) + DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t") # https://platform.openai.com/api_keys @@ -71,9 +72,7 @@ def handler(event, context): try: openai_results = {} request_body = get_request_body(event=event) - end_point, model, messages, input_text, temperature, max_tokens = parse_request( - request_body - ) + end_point, model, messages, input_text, temperature, max_tokens = parse_request(request_body) request_meta_data = { "request_meta_data": { "lambda": "lambda_openai_v2", @@ -121,9 +120,7 @@ def handler(event, context): openai_results = openai.Moderation.create(input=input_text) case OpenAIEndPoint.Models: - openai_results = ( - openai.Model.retrieve(model) if model else openai.Model.list() - ) + openai_results = openai.Model.retrieve(model) if model else openai.Model.list() case OpenAIEndPoint.Audio: raise NotImplementedError("Audio support is coming soon") @@ -131,9 +128,7 @@ def handler(event, context): # handle anything that went wrong except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request - return http_response_factory( - status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e) - ) + return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) except (openai.OpenAIError, Exception) as e: # 500 Internal Server Error return http_response_factory( diff --git a/api/terraform/python/lambda_openai_v2/tests/__init__.py b/api/terraform/python/lambda_openai_v2/tests/__init__.py index 33b22ea4..3d844456 100644 --- a/api/terraform/python/lambda_openai_v2/tests/__init__.py +++ b/api/terraform/python/lambda_openai_v2/tests/__init__.py @@ -1,6 +1,8 @@ -from dotenv import load_dotenv, find_dotenv import os +from dotenv import find_dotenv, load_dotenv + + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() if os.path.exists(dotenv_path): diff --git a/api/terraform/python/lambda_openai_v2/tests/test_01.py b/api/terraform/python/lambda_openai_v2/tests/test_01.py index 6375bd6d..6531f9c4 100644 --- a/api/terraform/python/lambda_openai_v2/tests/test_01.py +++ b/api/terraform/python/lambda_openai_v2/tests/test_01.py @@ -2,11 +2,13 @@ """ Test requests to the OpenAI API using the Lambda Layer, 'genai'. """ -import pytest import os -from dotenv import load_dotenv, find_dotenv -from lambda_openai_v2.tests.init import get_event + +import pytest +from dotenv import find_dotenv, load_dotenv from lambda_openai_v2.lambda_handler import handler +from lambda_openai_v2.tests.init import get_event + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() diff --git a/api/terraform/python/layer_genai/openai_utils/const.py b/api/terraform/python/layer_genai/openai_utils/const.py index 4fdfed19..b1ac0476 100644 --- a/api/terraform/python/layer_genai/openai_utils/const.py +++ b/api/terraform/python/layer_genai/openai_utils/const.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- """A module containing constants for the OpenAI API.""" import os + import openai + HTTP_RESPONSE_OK = 200 HTTP_RESPONSE_BAD_REQUEST = 400 HTTP_RESPONSE_INTERNAL_SERVER_ERROR = 500 diff --git a/api/terraform/python/layer_genai/openai_utils/setup.py b/api/terraform/python/layer_genai/openai_utils/setup.py index 6612cffd..843cbba4 100644 --- a/api/terraform/python/layer_genai/openai_utils/setup.py +++ b/api/terraform/python/layer_genai/openai_utils/setup.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Setup for openai_utils package.""" -from setuptools import setup, find_packages - from setup_utils import get_semantic_version +from setuptools import find_packages, setup + setup( name="openai_utils", diff --git a/api/terraform/python/layer_genai/openai_utils/utils.py b/api/terraform/python/layer_genai/openai_utils/utils.py index 03365845..ffd05178 100644 --- a/api/terraform/python/layer_genai/openai_utils/utils.py +++ b/api/terraform/python/layer_genai/openai_utils/utils.py @@ -2,24 +2,24 @@ """Utility functions for the OpenAI Lambda functions""" import base64 import json # library for interacting with JSON data https://www.json.org/json-en.html -import openai import os # library for interacting with the operating system import platform # library to view informatoin about the server host this Lambda runs on import sys # libraries for error management import traceback # libraries for error management +import openai from openai_utils.const import ( - OpenAIEndPoint, DEBUG_MODE, LANGCHAIN_MESSAGE_HISTORY_ROLES, + OpenAIEndPoint, ) from openai_utils.validators import ( + validate_endpoint, validate_item, - validate_request_body, + validate_max_tokens, validate_messages, + validate_request_body, validate_temperature, - validate_max_tokens, - validate_endpoint, ) @@ -33,11 +33,7 @@ def http_response_factory(status_code: int, body) -> dict: see https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html """ if status_code < 100 or status_code > 599: - raise ValueError( - "Invalid HTTP response code received: {status_code}".format( - status_code=status_code - ) - ) + raise ValueError("Invalid HTTP response code received: {status_code}".format(status_code=status_code)) retval = { "isBase64Encoded": False, diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..5ab74c62 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,23 @@ +[tool.isort] +profile = "black" +lines_after_imports = 2 + +[tool.black] +line-length = 120 +target-version = ['py311'] +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | venv + | build + | buck-out + | build + | dist + | lambda_dist_pkg +)/ +''' diff --git a/tox.ini b/tox.ini index 24a61b09..e71f2689 100644 --- a/tox.ini +++ b/tox.ini @@ -4,11 +4,10 @@ envlist = py3.11,flake8 skip_missing_interpreters = true -[isort] +[tool.isort] +profile = "black" skip =venv,node_modules,api/terraform/python/lambda_openai/lambda_dist_pkg,api/terraform/python/lambda_openai/venv -[mypy] -files = api/terraform/python/ [gh-actions] python = From e2d3b99558d1007e9621776e9730f29bd625dfd6 Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 18:10:55 -0600 Subject: [PATCH 03/10] style: reformat json with prettier --- .pre-commit-config.yaml | 15 +- .vscode/extensions.json | 4 +- .vscode/settings.json | 3 +- api/postman/OpenAI.postman_collection.json | 2722 ++++++++--------- api/postman/prod.postman_environment.json | 50 +- api/terraform/json/iam_policy_apigateway.json | 25 +- api/terraform/json/iam_policy_lambda.json | 28 +- api/terraform/json/iam_role_apigateway.json | 14 +- api/terraform/json/iam_role_lambda.json | 4 +- .../tests/events/test_01.request.json | 2 +- .../openai_utils/data/example_request.json | 16 +- api/terraform/templates/test_200.json | 34 +- api/terraform/templates/test_400.json | 4 +- .../test/events/openai.response.v0.4.0.json | 36 +- ...gateway_endpoint_airport_codes_result.json | 36 +- doc/apigateway_event_request.json | 140 +- ...ateway-response-after-transformations.json | 38 +- ...teway-response-before-transformations.json | 38 +- doc/cloudwatch-lambda-environment-dump.json | 28 +- doc/cloudwatch-lambda-event-dump.json | 28 +- doc/cloudwatch-lambda-response-dump.json | 50 +- doc/openai.chat.completion.chunk.json | 16 +- doc/openai.chat.completion.request.body.json | 9 +- doc/openai.chat.completion.response.json | 22 +- 24 files changed, 1561 insertions(+), 1801 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31e73f6b..0796359c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,10 +2,14 @@ default_language_version: # default language version for each language python: python3.11 repos: - - repo: https://github.com/pre-commit/mirrors-eslint - rev: v8.54.0 + - repo: local hooks: - id: eslint + name: eslint + entry: bash -c 'cd client && npx eslint' + language: node + types: [javascript] + files: ^client/ - repo: https://github.com/pre-commit/mirrors-prettier rev: v3.1.0 hooks: @@ -23,6 +27,13 @@ repos: hooks: - id: isort args: ["--settings-path=pyproject.toml"] + - repo: local + hooks: + - id: pylint + name: pylint + entry: ./run_pylint.sh + language: script + types: [python] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 0b4f1010..b5cd5896 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,5 +1,3 @@ { - "recommendations": [ - "ms-python.black-formatter" - ] + "recommendations": ["ms-python.black-formatter"] } diff --git a/.vscode/settings.json b/.vscode/settings.json index 9aa66ae2..488c1b56 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,4 +2,5 @@ "cornflakes.linter.executablePath": "/Users/mcdaniel/desktop/aws-openai/venv/bin/flake8", "[python]": { "editor.defaultFormatter": "ms-python.black-formatter" - }} + } +} diff --git a/api/postman/OpenAI.postman_collection.json b/api/postman/OpenAI.postman_collection.json index 502ba6f2..75b226c7 100644 --- a/api/postman/OpenAI.postman_collection.json +++ b/api/postman/OpenAI.postman_collection.json @@ -1,1479 +1,1247 @@ { - "info": { - "_postman_id": "09bb3fde-e486-4e10-a3cc-f7f7184fcc41", - "name": "OpenAI", - "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", - "_exporter_id": "2085624" - }, - "item": [ - { - "name": "passthrough", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"temperature\": 0.5,\n \"max_tokens\": 256,\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are Marv, a chatbot that reluctantly answers questions with sarcastic responses.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Marv, I'd like to introduce you to all the nice YouTube viewers.\"\n }\n ]\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/passthrough", - "host": [ - "{{base_url}}" - ], - "path": [ - "passthrough" - ] - } - }, - "response": [] - }, - { - "name": "passthrough_v2", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"temperature\": 0.5,\n \"max_tokens\": 256,\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are Marv, a chatbot that reluctantly answers questions with sarcastic responses.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Marv, I'd like to introduce you to all the nice YouTube viewers.\"\n }\n ],\n \"chat_history\": [\n {\n \"message\": \"Hello, I'm Marv, a sarcastic chatbot.\",\n \"direction\": \"incoming\",\n \"sentTime\": \"11/16/2023, 5:53:32 PM\",\n \"sender\": \"system\"\n }\n ]\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/passthrough_v2", - "host": [ - "{{base_url}}" - ], - "path": [ - "passthrough_v2" - ] - } - }, - "response": [] - }, - { - "name": "langchain-passthrough", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"temperature\": 0.5,\n \"max_tokens\": 256,\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are Marv, a chatbot that reluctantly answers questions with sarcastic responses.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"'sup Chuck?\"\n },\n {\n \"role\": \"assistant\",\n \"content\": \"Oh, you know, just chillin and hoping that you'll ask me a question.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"What's the meaning of life?\"\n },\n {\n \"role\": \"assistant\",\n \"content\": \"I know!! I know!!! It's 42!!!\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Please be more specific.\"\n }\n ],\n \"chat_history\": [\n {\n \"message\": \"Hello, I'm Marv, a sarcastic chatbot.\",\n \"direction\": \"incoming\",\n \"sentTime\": \"11/16/2023, 5:53:32 PM\",\n \"sender\": \"system\"\n }\n ]\n}\n", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/langchain", - "host": [ - "{{base_url}}" - ], - "path": [ - "langchain" - ] - } - }, - "response": [] - }, - { - "name": "test-00-test_200", - "request": { - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/tests/test_200", - "host": [ - "{{base_url}}" - ], - "path": [ - "tests", - "test_200" - ] - } - }, - "response": [] - }, - { - "name": "test-00-test_400", - "request": { - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/tests/test_400", - "host": [ - "{{base_url}}" - ], - "path": [ - "tests", - "test_400" - ] - } - }, - "response": [] - }, - { - "name": "test-00-test_500", - "request": { - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/tests/test_500", - "host": [ - "{{base_url}}" - ], - "path": [ - "tests", - "test_500" - ] - } - }, - "response": [] - }, - { - "name": "test-00-test_504", - "request": { - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/tests/test_504", - "host": [ - "{{base_url}}" - ], - "path": [ - "tests", - "test_504" - ] - } - }, - "response": [] - }, - { - "name": "example-01-grammar", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"she broked the lamp post\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-grammar", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-grammar" - ] - } - }, - "response": [] - }, - { - "name": "example-02-summarize", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Jupiter is a really big planet in our Solar System. It is the fifth planet from the Sun and it is the largest planet. It is called a gas giant because it is made mostly of gas. Jupiter is much smaller than the Sun, but it is bigger than all the other planets combined. It is very bright and can be seen in the night sky without a telescope. People have known about Jupiter for a very long time, even before they started writing things down. It is named after a god from ancient Rome. Sometimes, Jupiter is so bright that it can make shadows on Earth. It is usually the third-brightest thing we can see in the night sky, after the Moon and Venus.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-summarize", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-summarize" - ] - } - }, - "response": [] - }, - { - "name": "example-03-parse-data", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy. There are also loheckles, which are a grayish blue fruit and are very tart, a little bit like a lemon. Pounits are a bright green color and are more savory than sweet. There are also plenty of loopnovas which are a neon pink flavor and taste like cotton candy. Finally, there are fruits called glowls, which have a very sour and bitter taste which is acidic and caustic, and a pale orange tinge to them.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-parse-data", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-parse-data" - ] - } - }, - "response": [] - }, - { - "name": "example-04-emoji-translation", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Artificial intelligence is a technology with great promise.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-emoji-translation", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-emoji-translation" - ] - } - }, - "response": [] - }, - { - "name": "example-04-emoji-chat", - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"how are you?\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-emoji-chatbot", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-emoji-chatbot" - ] - } - }, - "response": [] - }, - { - "name": "example-05-time-complexity", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"def foo(n, k):\\\\r\\\\n accum = 0\\\\r\\\\n for i in range(n):\\\\r\\\\n for l in range(k):\\\\r\\\\n accum += i\\\\r\\\\n return accum\\\\r\\\\n\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-time-complexity", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-time-complexity" - ] - } - }, - "response": [] - }, - { - "name": "example-06-explain-code", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"class Log:\\\\r\\\\n def __init__(self, path):\\\\r\\\\n dirname = os.path.dirname(path)\\\\r\\\\n os.makedirs(dirname, exist_ok=True)\\\\r\\\\n f = open(path, 'a+')\\\\r\\\\n\\\\r\\\\n # Check that the file is newline-terminated\\\\r\\\\n size = os.path.getsize(path)\\\\r\\\\n if size > 0:\\\\r\\\\n f.seek(size - 1)\\\\r\\\\n end = f.read(1)\\\\r\\\\n if end != '\\n':\\\\r\\\\n f.write('\\n')\\\\r\\\\n self.f = f\\\\r\\\\n self.path = path\\\\r\\\\n\\\\r\\\\n def log(self, event):\\\\r\\\\n event['_event_id'] = str(uuid.uuid4())\\\\r\\\\n json.dump(event, self.f)\\\\r\\\\n self.f.write('\\n')\\\\r\\\\n\\\\r\\\\n def state(self):\\\\r\\\\n state = {'complete': set(), 'last': None}\\\\r\\\\n for line in open(self.path):\\\\r\\\\n event = json.loads(line)\\\\r\\\\n if event['type'] == 'submit' and event['success']:\\\\r\\\\n state['complete'].add(event['id'])\\\\r\\\\n state['last'] = event\\\\r\\\\n return state\\\\r\\\\n\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-explain-code", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-explain-code" - ] - } - }, - "response": [] - }, - { - "name": "example-07-keywords", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Black-on-black ware is a 20th- and 21st-century pottery tradition developed by the Puebloan Native American ceramic artists in Northern New Mexico. Traditional reduction-fired blackware has been made for centuries by pueblo artists. Black-on-black ware of the past century is produced with a smooth surface, with the designs applied through selective burnishing or the application of refractory slip. Another style involves carving or incising designs and selectively polishing the raised areas. For generations several families from Kha'po Owingeh and P'ohwhóge Owingeh pueblos have been making black-on-black ware with the techniques passed down from matriarch potters. Artists from other pueblos have also produced black-on-black ware. Several contemporary artists have created works honoring the pottery of their ancestors.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-keywords", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-keywords" - ] - } - }, - "response": [] - }, - { - "name": "example-08-product-name-gen", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Product description: A home milkshake maker. Seed words: fast, healthy, compact.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-product-name-gen", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-product-name-gen" - ] - } - }, - "response": [] - }, - { - "name": "example-09-fix-python-bugs", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"import Random\\\\r\\\\na = random.randint(1,12)\\\\r\\\\nb = random.randint(1,12)\\\\r\\\\nfor i in range(10):\\\\r\\\\n question = 'What is '+a+' x '+b+'? '\\\\r\\\\n answer = input(question)\\\\r\\\\n if answer = a*b\\\\r\\\\n print (Well done!)\\\\r\\\\n else:\\\\r\\\\n print('No.')\\\\r\\\\n\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-fix-python-bugs", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-fix-python-bugs" - ] - } - }, - "response": [] - }, - { - "name": "example-10-spreadsheet-gen", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Create a two-column CSV of top science fiction movies along with the year of release.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-spreadsheet-gen", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-spreadsheet-gen" - ] - } - }, - "response": [] - }, - { - "name": "example-11-tweet-classifier", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"I loved the new Batman movie!\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-tweet-classifier", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-tweet-classifier" - ] - } - }, - "response": [] - }, - { - "name": "example-12-airport-codes", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"I want to fly from Orlando to Boston\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-airport-codes", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-airport-codes" - ] - } - }, - "response": [] - }, - { - "name": "example-13-mood-color", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Blue sky at dusk.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-mood-color", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-mood-color" - ] - } - }, - "response": [] - }, - { - "name": "example-14-vr-fitness", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Brainstorm some ideas combining VR and fitness.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-vr-fitness", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-vr-fitness" - ] - } - }, - "response": [] - }, - { - "name": "example-15-marv-sarcastic-chat", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Marv, I'd like to introduce you to all the nice YouTube viewers.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-marv-sarcastic-chat", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-marv-sarcastic-chat" - ] - } - }, - "response": [] - }, - { - "name": "example-16-turn-by-turn-directions", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Go south on 95 until you hit Sunrise boulevard then take it east to us 1 and head south. Tom Jenkins bbq will be on the left after several miles.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-turn-by-turn-directions", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-turn-by-turn-directions" - ] - } - }, - "response": [] - }, - { - "name": "example-17-interview-questions", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Create a list of 8 questions for an interview with a science fiction author.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-interview-questions", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-interview-questions" - ] - } - }, - "response": [] - }, - { - "name": "example-18-function-from-spec", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Write a Python function that takes as input a file path to an image, loads the image into memory as a numpy array, then crops the rows and columns around the perimeter if they are darker than a threshold value. Use the mean value of rows and columns to decide if they should be marked for deletion.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-function-from-spec", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-function-from-spec" - ] - } - }, - "response": [] - }, - { - "name": "example-19-code-improvement", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"from typing import List\\\\r\\\\n\\\\r\\\\ndef has_sum_k(nums: List[int], k: int) -> bool:\\\\r\\\\n n = len(nums)\\\\r\\\\n for i in range(n):\\\\r\\\\n for j in range(i+1, n):\\\\r\\\\n if nums[i] + nums[j] == k:\\\\r\\\\n return True\\\\r\\\\n return False\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-code-improvement", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-code-improvement" - ] - } - }, - "response": [] - }, - { - "name": "example-20-single-page-website", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Make a single page website that shows off different neat javascript features for drop-downs and things to display information. The website should be an HTML file with embedded javascript and CSS.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-single-page-website", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-single-page-website" - ] - } - }, - "response": [] - }, - { - "name": "example-21-rap-battle", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Write a rap battle between Alan Turing and Claude Shannon.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-rap-battle", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-rap-battle" - ] - } - }, - "response": [] - }, - { - "name": "example-22-memo-writer", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Draft a company memo to be distributed to all employees. The memo should cover the following specific points without deviating from the topics mentioned and not writing any fact which is not present here:\\n\\nIntroduction: Remind employees about the upcoming quarterly review scheduled for the last week of April.\\n\\nPerformance Metrics: Clearly state the three key performance indicators (KPIs) that will be assessed during the review: sales targets, customer satisfaction (measured by net promoter score), and process efficiency (measured by average project completion time).\\n\\nProject Updates: Provide a brief update on the status of the three ongoing company projects:\\n\\na. Project Alpha: 75% complete, expected completion by May 30th.\\nb. Project Beta: 50% complete, expected completion by June 15th.\\nc. Project Gamma: 30% complete, expected completion by July 31st.\\n\\nTeam Recognition: Announce that the Sales Team was the top-performing team of the past quarter and congratulate them for achieving 120% of their target.\\n\\nTraining Opportunities: Inform employees about the upcoming training workshops that will be held in May, including \\\"Advanced Customer Service\\\" on May 10th and \\\"Project Management Essentials\\\" on May 25th.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-memo-writer", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-memo-writer" - ] - } - }, - "response": [] - }, - { - "name": "example-24-translation", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"My name is Jane. What is yours?\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-translation", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-translation" - ] - } - }, - "response": [] - }, - { - "name": "example-25-socratic-tutor", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Help me to understand the future of artificial intelligence.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-socratic-tutor", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-socratic-tutor" - ] - } - }, - "response": [] - }, - { - "name": "example-26-sql-translate", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Write a SQL query which computes the average total order value for all orders on 2023-04-01.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-sql-translate", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-sql-translate" - ] - } - }, - "response": [] - }, - { - "name": "example-27-notes-summarizer", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Meeting Date: March 5th, 2050\\nMeeting Time: 2:00 PM\\nLocation: Conference Room 3B, Intergalactic Headquarters\\n\\nAttendees:\\n- Captain Stardust\\n- Dr. Quasar\\n- Lady Nebula\\n- Sir Supernova\\n- Ms. Comet\\n\\nMeeting called to order by Captain Stardust at 2:05 PM\\n\\n1. Introductions and welcome to our newest team member, Ms. Comet\\n\\n2. Discussion of our recent mission to Planet Zog\\n- Captain Stardust: \\\"Overall, a success, but communication with the Zogians was difficult. We need to improve our language skills.\\\"\\n- Dr. Quasar: \\\"Agreed. I'll start working on a Zogian-English dictionary right away.\\\"\\n- Lady Nebula: \\\"The Zogian food was out of this world, literally! We should consider having a Zogian food night on the ship.\\\"\\n\\n3. Addressing the space pirate issue in Sector 7\\n- Sir Supernova: \\\"We need a better strategy for dealing with these pirates. They've already plundered three cargo ships this month.\\\"\\n- Captain Stardust: \\\"I'll speak with Admiral Starbeam about increasing patrols in that area.\\n- Dr. Quasar: \\\"I've been working on a new cloaking technology that could help our ships avoid detection by the pirates. I'll need a few more weeks to finalize the prototype.\\\"\\n\\n4. Review of the annual Intergalactic Bake-Off\\n- Lady Nebula: \\\"I'm happy to report that our team placed second in the competition! Our Martian Mud Pie was a big hit!\\\"\\n- Ms. Comet: \\\"Let's aim for first place next year. I have a secret recipe for Jupiter Jello that I think could be a winner.\\\"\\n\\n5. Planning for the upcoming charity fundraiser\\n- Captain Stardust: \\\"We need some creative ideas for our booth at the Intergalactic Charity Bazaar.\\\"\\n- Sir Supernova: \\\"How about a 'Dunk the Alien' game? We can have people throw water balloons at a volunteer dressed as an alien.\\\"\\n- Dr. Quasar: \\\"I can set up a 'Name That Star' trivia game with prizes for the winners.\\\"\\n- Lady Nebula: \\\"Great ideas, everyone. Let's start gathering the supplies and preparing the games.\\\"\\n\\n6. Upcoming team-building retreat\\n- Ms. Comet: \\\"I would like to propose a team-building retreat to the Moon Resort and Spa. It's a great opportunity to bond and relax after our recent missions.\\\"\\n- Captain Stardust: \\\"Sounds like a fantastic idea. I'll check the budget and see if we can make it happen.\\\"\\n\\n7. Next meeting agenda items\\n- Update on the Zogian-English dictionary (Dr. Quasar)\\n- Progress report on the cloaking technology (Dr. Quasar)\\n- Results of increased patrols in Sector 7 (Captain Stardust)\\n- Final preparations for the Intergalactic Charity Bazaar (All)\\n\\nMeeting adjourned at 3:15 PM. Next meeting scheduled for March 19th, 2050 at 2:00 PM in Conference Room 3B, Intergalactic Headquarters.\\n\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-notes-summarizer", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-notes-summarizer" - ] - } - }, - "response": [] - }, - { - "name": "example-28-review-classifier", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"I recently purchased the Inflatotron 2000 airbed for a camping trip and wanted to share my experience with others. Overall, I found the airbed to be a mixed bag with some positives and negatives.\\\\Starting with the positives, the Inflatotron 2000 is incredibly easy to set up and inflate. It comes with a built-in electric pump that quickly inflates the bed within a few minutes, which is a huge plus for anyone who wants to avoid the hassle of manually pumping up their airbed. The bed is also quite comfortable to sleep on and offers decent support for your back, which is a major plus if you have any issues with back pain.\\\\On the other hand, I did experience some negatives with the Inflatotron 2000. Firstly, I found that the airbed is not very durable and punctures easily. During my camping trip, the bed got punctured by a stray twig that had fallen on it, which was quite frustrating. Secondly, I noticed that the airbed tends to lose air overnight, which meant that I had to constantly re-inflate it every morning. This was a bit annoying as it disrupted my sleep and made me feel less rested in the morning.\\\\Another negative point is that the Inflatotron 2000 is quite heavy and bulky, which makes it difficult to transport and store. If you're planning on using this airbed for camping or other outdoor activities, you'll need to have a large enough vehicle to transport it and a decent amount of storage space to store it when not in use.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-review-classifier", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-review-classifier" - ] - } - }, - "response": [] - }, - { - "name": "example-29-pro-con-discusser", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"changing careers at 60\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-pro-con-discusser", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-pro-con-discusser" - ] - } - }, - "response": [] - }, - { - "name": "example-30-lesson-plan-writer", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "x-api-key", - "value": "{{api_key}}", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"input_text\": \"Write a lesson plan for an introductory algebra class. The lesson plan should cover the distributive law, in particular how it works in simple cases involving mixes of positive and negative numbers. Come up with some examples that show common student errors.\",\n \"chat_history\": []\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{base_url}}/examples/default-lesson-plan-writer", - "host": [ - "{{base_url}}" - ], - "path": [ - "examples", - "default-lesson-plan-writer" - ] - } - }, - "response": [] - } - ] + "info": { + "_postman_id": "09bb3fde-e486-4e10-a3cc-f7f7184fcc41", + "name": "OpenAI", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "2085624" + }, + "item": [ + { + "name": "passthrough", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"temperature\": 0.5,\n \"max_tokens\": 256,\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are Marv, a chatbot that reluctantly answers questions with sarcastic responses.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Marv, I'd like to introduce you to all the nice YouTube viewers.\"\n }\n ]\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/passthrough", + "host": ["{{base_url}}"], + "path": ["passthrough"] + } + }, + "response": [] + }, + { + "name": "passthrough_v2", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"temperature\": 0.5,\n \"max_tokens\": 256,\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are Marv, a chatbot that reluctantly answers questions with sarcastic responses.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Marv, I'd like to introduce you to all the nice YouTube viewers.\"\n }\n ],\n \"chat_history\": [\n {\n \"message\": \"Hello, I'm Marv, a sarcastic chatbot.\",\n \"direction\": \"incoming\",\n \"sentTime\": \"11/16/2023, 5:53:32 PM\",\n \"sender\": \"system\"\n }\n ]\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/passthrough_v2", + "host": ["{{base_url}}"], + "path": ["passthrough_v2"] + } + }, + "response": [] + }, + { + "name": "langchain-passthrough", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"temperature\": 0.5,\n \"max_tokens\": 256,\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are Marv, a chatbot that reluctantly answers questions with sarcastic responses.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"'sup Chuck?\"\n },\n {\n \"role\": \"assistant\",\n \"content\": \"Oh, you know, just chillin and hoping that you'll ask me a question.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"What's the meaning of life?\"\n },\n {\n \"role\": \"assistant\",\n \"content\": \"I know!! I know!!! It's 42!!!\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Please be more specific.\"\n }\n ],\n \"chat_history\": [\n {\n \"message\": \"Hello, I'm Marv, a sarcastic chatbot.\",\n \"direction\": \"incoming\",\n \"sentTime\": \"11/16/2023, 5:53:32 PM\",\n \"sender\": \"system\"\n }\n ]\n}\n", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/langchain", + "host": ["{{base_url}}"], + "path": ["langchain"] + } + }, + "response": [] + }, + { + "name": "test-00-test_200", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/tests/test_200", + "host": ["{{base_url}}"], + "path": ["tests", "test_200"] + } + }, + "response": [] + }, + { + "name": "test-00-test_400", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/tests/test_400", + "host": ["{{base_url}}"], + "path": ["tests", "test_400"] + } + }, + "response": [] + }, + { + "name": "test-00-test_500", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/tests/test_500", + "host": ["{{base_url}}"], + "path": ["tests", "test_500"] + } + }, + "response": [] + }, + { + "name": "test-00-test_504", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"this is a test\",\n \"chat_history\": []\n\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/tests/test_504", + "host": ["{{base_url}}"], + "path": ["tests", "test_504"] + } + }, + "response": [] + }, + { + "name": "example-01-grammar", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"she broked the lamp post\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-grammar", + "host": ["{{base_url}}"], + "path": ["examples", "default-grammar"] + } + }, + "response": [] + }, + { + "name": "example-02-summarize", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Jupiter is a really big planet in our Solar System. It is the fifth planet from the Sun and it is the largest planet. It is called a gas giant because it is made mostly of gas. Jupiter is much smaller than the Sun, but it is bigger than all the other planets combined. It is very bright and can be seen in the night sky without a telescope. People have known about Jupiter for a very long time, even before they started writing things down. It is named after a god from ancient Rome. Sometimes, Jupiter is so bright that it can make shadows on Earth. It is usually the third-brightest thing we can see in the night sky, after the Moon and Venus.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-summarize", + "host": ["{{base_url}}"], + "path": ["examples", "default-summarize"] + } + }, + "response": [] + }, + { + "name": "example-03-parse-data", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy. There are also loheckles, which are a grayish blue fruit and are very tart, a little bit like a lemon. Pounits are a bright green color and are more savory than sweet. There are also plenty of loopnovas which are a neon pink flavor and taste like cotton candy. Finally, there are fruits called glowls, which have a very sour and bitter taste which is acidic and caustic, and a pale orange tinge to them.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-parse-data", + "host": ["{{base_url}}"], + "path": ["examples", "default-parse-data"] + } + }, + "response": [] + }, + { + "name": "example-04-emoji-translation", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Artificial intelligence is a technology with great promise.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-emoji-translation", + "host": ["{{base_url}}"], + "path": ["examples", "default-emoji-translation"] + } + }, + "response": [] + }, + { + "name": "example-04-emoji-chat", + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"how are you?\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-emoji-chatbot", + "host": ["{{base_url}}"], + "path": ["examples", "default-emoji-chatbot"] + } + }, + "response": [] + }, + { + "name": "example-05-time-complexity", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"def foo(n, k):\\\\r\\\\n accum = 0\\\\r\\\\n for i in range(n):\\\\r\\\\n for l in range(k):\\\\r\\\\n accum += i\\\\r\\\\n return accum\\\\r\\\\n\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-time-complexity", + "host": ["{{base_url}}"], + "path": ["examples", "default-time-complexity"] + } + }, + "response": [] + }, + { + "name": "example-06-explain-code", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"class Log:\\\\r\\\\n def __init__(self, path):\\\\r\\\\n dirname = os.path.dirname(path)\\\\r\\\\n os.makedirs(dirname, exist_ok=True)\\\\r\\\\n f = open(path, 'a+')\\\\r\\\\n\\\\r\\\\n # Check that the file is newline-terminated\\\\r\\\\n size = os.path.getsize(path)\\\\r\\\\n if size > 0:\\\\r\\\\n f.seek(size - 1)\\\\r\\\\n end = f.read(1)\\\\r\\\\n if end != '\\n':\\\\r\\\\n f.write('\\n')\\\\r\\\\n self.f = f\\\\r\\\\n self.path = path\\\\r\\\\n\\\\r\\\\n def log(self, event):\\\\r\\\\n event['_event_id'] = str(uuid.uuid4())\\\\r\\\\n json.dump(event, self.f)\\\\r\\\\n self.f.write('\\n')\\\\r\\\\n\\\\r\\\\n def state(self):\\\\r\\\\n state = {'complete': set(), 'last': None}\\\\r\\\\n for line in open(self.path):\\\\r\\\\n event = json.loads(line)\\\\r\\\\n if event['type'] == 'submit' and event['success']:\\\\r\\\\n state['complete'].add(event['id'])\\\\r\\\\n state['last'] = event\\\\r\\\\n return state\\\\r\\\\n\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-explain-code", + "host": ["{{base_url}}"], + "path": ["examples", "default-explain-code"] + } + }, + "response": [] + }, + { + "name": "example-07-keywords", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Black-on-black ware is a 20th- and 21st-century pottery tradition developed by the Puebloan Native American ceramic artists in Northern New Mexico. Traditional reduction-fired blackware has been made for centuries by pueblo artists. Black-on-black ware of the past century is produced with a smooth surface, with the designs applied through selective burnishing or the application of refractory slip. Another style involves carving or incising designs and selectively polishing the raised areas. For generations several families from Kha'po Owingeh and P'ohwhóge Owingeh pueblos have been making black-on-black ware with the techniques passed down from matriarch potters. Artists from other pueblos have also produced black-on-black ware. Several contemporary artists have created works honoring the pottery of their ancestors.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-keywords", + "host": ["{{base_url}}"], + "path": ["examples", "default-keywords"] + } + }, + "response": [] + }, + { + "name": "example-08-product-name-gen", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Product description: A home milkshake maker. Seed words: fast, healthy, compact.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-product-name-gen", + "host": ["{{base_url}}"], + "path": ["examples", "default-product-name-gen"] + } + }, + "response": [] + }, + { + "name": "example-09-fix-python-bugs", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"import Random\\\\r\\\\na = random.randint(1,12)\\\\r\\\\nb = random.randint(1,12)\\\\r\\\\nfor i in range(10):\\\\r\\\\n question = 'What is '+a+' x '+b+'? '\\\\r\\\\n answer = input(question)\\\\r\\\\n if answer = a*b\\\\r\\\\n print (Well done!)\\\\r\\\\n else:\\\\r\\\\n print('No.')\\\\r\\\\n\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-fix-python-bugs", + "host": ["{{base_url}}"], + "path": ["examples", "default-fix-python-bugs"] + } + }, + "response": [] + }, + { + "name": "example-10-spreadsheet-gen", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Create a two-column CSV of top science fiction movies along with the year of release.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-spreadsheet-gen", + "host": ["{{base_url}}"], + "path": ["examples", "default-spreadsheet-gen"] + } + }, + "response": [] + }, + { + "name": "example-11-tweet-classifier", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"I loved the new Batman movie!\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-tweet-classifier", + "host": ["{{base_url}}"], + "path": ["examples", "default-tweet-classifier"] + } + }, + "response": [] + }, + { + "name": "example-12-airport-codes", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"I want to fly from Orlando to Boston\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-airport-codes", + "host": ["{{base_url}}"], + "path": ["examples", "default-airport-codes"] + } + }, + "response": [] + }, + { + "name": "example-13-mood-color", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Blue sky at dusk.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-mood-color", + "host": ["{{base_url}}"], + "path": ["examples", "default-mood-color"] + } + }, + "response": [] + }, + { + "name": "example-14-vr-fitness", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Brainstorm some ideas combining VR and fitness.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-vr-fitness", + "host": ["{{base_url}}"], + "path": ["examples", "default-vr-fitness"] + } + }, + "response": [] + }, + { + "name": "example-15-marv-sarcastic-chat", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Marv, I'd like to introduce you to all the nice YouTube viewers.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-marv-sarcastic-chat", + "host": ["{{base_url}}"], + "path": ["examples", "default-marv-sarcastic-chat"] + } + }, + "response": [] + }, + { + "name": "example-16-turn-by-turn-directions", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Go south on 95 until you hit Sunrise boulevard then take it east to us 1 and head south. Tom Jenkins bbq will be on the left after several miles.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-turn-by-turn-directions", + "host": ["{{base_url}}"], + "path": ["examples", "default-turn-by-turn-directions"] + } + }, + "response": [] + }, + { + "name": "example-17-interview-questions", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Create a list of 8 questions for an interview with a science fiction author.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-interview-questions", + "host": ["{{base_url}}"], + "path": ["examples", "default-interview-questions"] + } + }, + "response": [] + }, + { + "name": "example-18-function-from-spec", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Write a Python function that takes as input a file path to an image, loads the image into memory as a numpy array, then crops the rows and columns around the perimeter if they are darker than a threshold value. Use the mean value of rows and columns to decide if they should be marked for deletion.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-function-from-spec", + "host": ["{{base_url}}"], + "path": ["examples", "default-function-from-spec"] + } + }, + "response": [] + }, + { + "name": "example-19-code-improvement", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"from typing import List\\\\r\\\\n\\\\r\\\\ndef has_sum_k(nums: List[int], k: int) -> bool:\\\\r\\\\n n = len(nums)\\\\r\\\\n for i in range(n):\\\\r\\\\n for j in range(i+1, n):\\\\r\\\\n if nums[i] + nums[j] == k:\\\\r\\\\n return True\\\\r\\\\n return False\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-code-improvement", + "host": ["{{base_url}}"], + "path": ["examples", "default-code-improvement"] + } + }, + "response": [] + }, + { + "name": "example-20-single-page-website", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Make a single page website that shows off different neat javascript features for drop-downs and things to display information. The website should be an HTML file with embedded javascript and CSS.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-single-page-website", + "host": ["{{base_url}}"], + "path": ["examples", "default-single-page-website"] + } + }, + "response": [] + }, + { + "name": "example-21-rap-battle", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Write a rap battle between Alan Turing and Claude Shannon.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-rap-battle", + "host": ["{{base_url}}"], + "path": ["examples", "default-rap-battle"] + } + }, + "response": [] + }, + { + "name": "example-22-memo-writer", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Draft a company memo to be distributed to all employees. The memo should cover the following specific points without deviating from the topics mentioned and not writing any fact which is not present here:\\n\\nIntroduction: Remind employees about the upcoming quarterly review scheduled for the last week of April.\\n\\nPerformance Metrics: Clearly state the three key performance indicators (KPIs) that will be assessed during the review: sales targets, customer satisfaction (measured by net promoter score), and process efficiency (measured by average project completion time).\\n\\nProject Updates: Provide a brief update on the status of the three ongoing company projects:\\n\\na. Project Alpha: 75% complete, expected completion by May 30th.\\nb. Project Beta: 50% complete, expected completion by June 15th.\\nc. Project Gamma: 30% complete, expected completion by July 31st.\\n\\nTeam Recognition: Announce that the Sales Team was the top-performing team of the past quarter and congratulate them for achieving 120% of their target.\\n\\nTraining Opportunities: Inform employees about the upcoming training workshops that will be held in May, including \\\"Advanced Customer Service\\\" on May 10th and \\\"Project Management Essentials\\\" on May 25th.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-memo-writer", + "host": ["{{base_url}}"], + "path": ["examples", "default-memo-writer"] + } + }, + "response": [] + }, + { + "name": "example-24-translation", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"My name is Jane. What is yours?\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-translation", + "host": ["{{base_url}}"], + "path": ["examples", "default-translation"] + } + }, + "response": [] + }, + { + "name": "example-25-socratic-tutor", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Help me to understand the future of artificial intelligence.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-socratic-tutor", + "host": ["{{base_url}}"], + "path": ["examples", "default-socratic-tutor"] + } + }, + "response": [] + }, + { + "name": "example-26-sql-translate", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Write a SQL query which computes the average total order value for all orders on 2023-04-01.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-sql-translate", + "host": ["{{base_url}}"], + "path": ["examples", "default-sql-translate"] + } + }, + "response": [] + }, + { + "name": "example-27-notes-summarizer", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Meeting Date: March 5th, 2050\\nMeeting Time: 2:00 PM\\nLocation: Conference Room 3B, Intergalactic Headquarters\\n\\nAttendees:\\n- Captain Stardust\\n- Dr. Quasar\\n- Lady Nebula\\n- Sir Supernova\\n- Ms. Comet\\n\\nMeeting called to order by Captain Stardust at 2:05 PM\\n\\n1. Introductions and welcome to our newest team member, Ms. Comet\\n\\n2. Discussion of our recent mission to Planet Zog\\n- Captain Stardust: \\\"Overall, a success, but communication with the Zogians was difficult. We need to improve our language skills.\\\"\\n- Dr. Quasar: \\\"Agreed. I'll start working on a Zogian-English dictionary right away.\\\"\\n- Lady Nebula: \\\"The Zogian food was out of this world, literally! We should consider having a Zogian food night on the ship.\\\"\\n\\n3. Addressing the space pirate issue in Sector 7\\n- Sir Supernova: \\\"We need a better strategy for dealing with these pirates. They've already plundered three cargo ships this month.\\\"\\n- Captain Stardust: \\\"I'll speak with Admiral Starbeam about increasing patrols in that area.\\n- Dr. Quasar: \\\"I've been working on a new cloaking technology that could help our ships avoid detection by the pirates. I'll need a few more weeks to finalize the prototype.\\\"\\n\\n4. Review of the annual Intergalactic Bake-Off\\n- Lady Nebula: \\\"I'm happy to report that our team placed second in the competition! Our Martian Mud Pie was a big hit!\\\"\\n- Ms. Comet: \\\"Let's aim for first place next year. I have a secret recipe for Jupiter Jello that I think could be a winner.\\\"\\n\\n5. Planning for the upcoming charity fundraiser\\n- Captain Stardust: \\\"We need some creative ideas for our booth at the Intergalactic Charity Bazaar.\\\"\\n- Sir Supernova: \\\"How about a 'Dunk the Alien' game? We can have people throw water balloons at a volunteer dressed as an alien.\\\"\\n- Dr. Quasar: \\\"I can set up a 'Name That Star' trivia game with prizes for the winners.\\\"\\n- Lady Nebula: \\\"Great ideas, everyone. Let's start gathering the supplies and preparing the games.\\\"\\n\\n6. Upcoming team-building retreat\\n- Ms. Comet: \\\"I would like to propose a team-building retreat to the Moon Resort and Spa. It's a great opportunity to bond and relax after our recent missions.\\\"\\n- Captain Stardust: \\\"Sounds like a fantastic idea. I'll check the budget and see if we can make it happen.\\\"\\n\\n7. Next meeting agenda items\\n- Update on the Zogian-English dictionary (Dr. Quasar)\\n- Progress report on the cloaking technology (Dr. Quasar)\\n- Results of increased patrols in Sector 7 (Captain Stardust)\\n- Final preparations for the Intergalactic Charity Bazaar (All)\\n\\nMeeting adjourned at 3:15 PM. Next meeting scheduled for March 19th, 2050 at 2:00 PM in Conference Room 3B, Intergalactic Headquarters.\\n\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-notes-summarizer", + "host": ["{{base_url}}"], + "path": ["examples", "default-notes-summarizer"] + } + }, + "response": [] + }, + { + "name": "example-28-review-classifier", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"I recently purchased the Inflatotron 2000 airbed for a camping trip and wanted to share my experience with others. Overall, I found the airbed to be a mixed bag with some positives and negatives.\\\\Starting with the positives, the Inflatotron 2000 is incredibly easy to set up and inflate. It comes with a built-in electric pump that quickly inflates the bed within a few minutes, which is a huge plus for anyone who wants to avoid the hassle of manually pumping up their airbed. The bed is also quite comfortable to sleep on and offers decent support for your back, which is a major plus if you have any issues with back pain.\\\\On the other hand, I did experience some negatives with the Inflatotron 2000. Firstly, I found that the airbed is not very durable and punctures easily. During my camping trip, the bed got punctured by a stray twig that had fallen on it, which was quite frustrating. Secondly, I noticed that the airbed tends to lose air overnight, which meant that I had to constantly re-inflate it every morning. This was a bit annoying as it disrupted my sleep and made me feel less rested in the morning.\\\\Another negative point is that the Inflatotron 2000 is quite heavy and bulky, which makes it difficult to transport and store. If you're planning on using this airbed for camping or other outdoor activities, you'll need to have a large enough vehicle to transport it and a decent amount of storage space to store it when not in use.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-review-classifier", + "host": ["{{base_url}}"], + "path": ["examples", "default-review-classifier"] + } + }, + "response": [] + }, + { + "name": "example-29-pro-con-discusser", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"changing careers at 60\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-pro-con-discusser", + "host": ["{{base_url}}"], + "path": ["examples", "default-pro-con-discusser"] + } + }, + "response": [] + }, + { + "name": "example-30-lesson-plan-writer", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "x-api-key", + "value": "{{api_key}}", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"input_text\": \"Write a lesson plan for an introductory algebra class. The lesson plan should cover the distributive law, in particular how it works in simple cases involving mixes of positive and negative numbers. Come up with some examples that show common student errors.\",\n \"chat_history\": []\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{base_url}}/examples/default-lesson-plan-writer", + "host": ["{{base_url}}"], + "path": ["examples", "default-lesson-plan-writer"] + } + }, + "response": [] + } + ] } diff --git a/api/postman/prod.postman_environment.json b/api/postman/prod.postman_environment.json index dbe814c7..53529e02 100644 --- a/api/postman/prod.postman_environment.json +++ b/api/postman/prod.postman_environment.json @@ -1,27 +1,27 @@ { - "id": "66a74108-0b5a-4536-beb7-049eb7660f3b", - "name": "prod", - "values": [ - { - "key": "api_key", - "value": "SET-ME-PLEASE", - "type": "default", - "enabled": true - }, - { - "key": "stage", - "value": "v1", - "type": "default", - "enabled": true - }, - { - "key": "base_url", - "value": "https://SET-ME-PLEASE.execute-api.us-east-1.amazonaws.com", - "type": "default", - "enabled": true - } - ], - "_postman_variable_scope": "environment", - "_postman_exported_at": "2023-09-13T13:15:27.933Z", - "_postman_exported_using": "Postman/10.17.7" + "id": "66a74108-0b5a-4536-beb7-049eb7660f3b", + "name": "prod", + "values": [ + { + "key": "api_key", + "value": "SET-ME-PLEASE", + "type": "default", + "enabled": true + }, + { + "key": "stage", + "value": "v1", + "type": "default", + "enabled": true + }, + { + "key": "base_url", + "value": "https://SET-ME-PLEASE.execute-api.us-east-1.amazonaws.com", + "type": "default", + "enabled": true + } + ], + "_postman_variable_scope": "environment", + "_postman_exported_at": "2023-09-13T13:15:27.933Z", + "_postman_exported_using": "Postman/10.17.7" } diff --git a/api/terraform/json/iam_policy_apigateway.json b/api/terraform/json/iam_policy_apigateway.json index 3f8a12de..dd9480ff 100644 --- a/api/terraform/json/iam_policy_apigateway.json +++ b/api/terraform/json/iam_policy_apigateway.json @@ -1,24 +1,23 @@ { - "Version": "2012-10-17", - "Statement": [ + "Version": "2012-10-17", + "Statement": [ { "Effect": "Allow", - "Action": [ - "lambda:InvokeFunction" - ], + "Action": ["lambda:InvokeFunction"], "Resource": "*" }, { "Effect": "Allow", "Action": [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:DescribeLogGroups", - "logs:DescribeLogStreams", - "logs:PutLogEvents", - "logs:GetLogEvents", - "logs:FilterLogEvents" + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:GetLogEvents", + "logs:FilterLogEvents" ], "Resource": "*" - } ] + } + ] } diff --git a/api/terraform/json/iam_policy_lambda.json b/api/terraform/json/iam_policy_lambda.json index 4f592dbd..1939d68a 100644 --- a/api/terraform/json/iam_policy_lambda.json +++ b/api/terraform/json/iam_policy_lambda.json @@ -1,20 +1,18 @@ { "Version": "2012-10-17", "Statement": [ - { - "Effect": "Allow", - "Action": [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:DescribeLogGroups", - "logs:DescribeLogStreams", - "logs:PutLogEvents", - "logs:GetLogEvents", - "logs:FilterLogEvents" - ], - "Resource": [ - "arn:aws:lambda:*" - ] - } + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:GetLogEvents", + "logs:FilterLogEvents" + ], + "Resource": ["arn:aws:lambda:*"] + } ] } diff --git a/api/terraform/json/iam_role_apigateway.json b/api/terraform/json/iam_role_apigateway.json index 66c28b19..e2315362 100644 --- a/api/terraform/json/iam_role_apigateway.json +++ b/api/terraform/json/iam_role_apigateway.json @@ -1,13 +1,13 @@ { - "Version" : "2012-10-17", - "Statement" : [ + "Version": "2012-10-17", + "Statement": [ { - "Sid" : "", - "Effect" : "Allow", - "Principal" : { - "Service" : "apigateway.amazonaws.com" + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "apigateway.amazonaws.com" }, - "Action" : "sts:AssumeRole" + "Action": "sts:AssumeRole" } ] } diff --git a/api/terraform/json/iam_role_lambda.json b/api/terraform/json/iam_role_lambda.json index fe943731..b55db60d 100644 --- a/api/terraform/json/iam_role_lambda.json +++ b/api/terraform/json/iam_role_lambda.json @@ -5,9 +5,7 @@ "Sid": "", "Effect": "Allow", "Principal": { - "Service": [ - "lambda.amazonaws.com" - ] + "Service": ["lambda.amazonaws.com"] }, "Action": "sts:AssumeRole" } diff --git a/api/terraform/python/lambda_langchain/tests/events/test_01.request.json b/api/terraform/python/lambda_langchain/tests/events/test_01.request.json index c2a5f694..4da12680 100644 --- a/api/terraform/python/lambda_langchain/tests/events/test_01.request.json +++ b/api/terraform/python/lambda_langchain/tests/events/test_01.request.json @@ -28,5 +28,5 @@ "role": "user", "content": "Please be more specific." } - ] + ] } diff --git a/api/terraform/python/layer_genai/openai_utils/data/example_request.json b/api/terraform/python/layer_genai/openai_utils/data/example_request.json index 449b47f4..13af72bc 100644 --- a/api/terraform/python/layer_genai/openai_utils/data/example_request.json +++ b/api/terraform/python/layer_genai/openai_utils/data/example_request.json @@ -4,13 +4,13 @@ "temperature": 0.7, "max_tokens": 100, "messages": [ - { - "role": "system", - "content": "You will be provided with statements, and your task is to convert them to standard English." - }, - { - "role": "user", - "content": "She no went to the market." - } + { + "role": "system", + "content": "You will be provided with statements, and your task is to convert them to standard English." + }, + { + "role": "user", + "content": "She no went to the market." + } ] } diff --git a/api/terraform/templates/test_200.json b/api/terraform/templates/test_200.json index 2741fa0f..19f9a9d2 100644 --- a/api/terraform/templates/test_200.json +++ b/api/terraform/templates/test_200.json @@ -1,25 +1,25 @@ { - "isBase64Encoded" : false, - "statusCode" : 200, - "body" : { - "id" : "chatcmpl-8AQPdETlM808Fp0NjEeCOOc3a13Vt", - "object" : "chat.completion", - "created" : 1697495501, - "model" : "gpt-3.5-turbo-0613", - "choices" : [ + "isBase64Encoded": false, + "statusCode": 200, + "body": { + "id": "chatcmpl-8AQPdETlM808Fp0NjEeCOOc3a13Vt", + "object": "chat.completion", + "created": 1697495501, + "model": "gpt-3.5-turbo-0613", + "choices": [ { - "index" : 0, - "message" : { - "role" : "assistant", - "content" : "Oh, hello there! What kind of trouble can I unknowingly get myself into for you today?" + "index": 0, + "message": { + "role": "assistant", + "content": "Oh, hello there! What kind of trouble can I unknowingly get myself into for you today?" }, - "finish_reason" : "stop" + "finish_reason": "stop" } ], - "usage" : { - "prompt_tokens" : 31, - "completion_tokens" : 20, - "total_tokens" : 51 + "usage": { + "prompt_tokens": 31, + "completion_tokens": 20, + "total_tokens": 51 } } } diff --git a/api/terraform/templates/test_400.json b/api/terraform/templates/test_400.json index 17aa7462..335c4b82 100644 --- a/api/terraform/templates/test_400.json +++ b/api/terraform/templates/test_400.json @@ -2,7 +2,7 @@ "isBase64Encoded": false, "statusCode": 400, "body": { - "error": "Request failed due to server shutdown {\n \"error\": {\n \"message\": \"Request failed due to server shutdown\",\n \"type\": \"server_error\",\n \"param\": null,\n \"code\": null\n }\n}\n 500 {'error': {'message': 'Request failed due to server shutdown', 'type': 'server_error', 'param': None, 'code': None}} {'Date': 'Wed, 18 Oct 2023 17:08:23 GMT', 'Content-Type': 'application/json', 'Content-Length': '141', 'Connection': 'keep-alive', 'access-control-allow-origin': '*', 'openai-model': 'gpt-3.5-turbo-0613', 'openai-organization': 'user-tl1wv9chydjidmopc8vig1gc', 'openai-processing-ms': '9581', 'openai-version': '2020-10-01', 'strict-transport-security': 'max-age=15724800; includeSubDomains', 'x-ratelimit-limit-requests': '3500', 'x-ratelimit-limit-tokens': '90000', 'x-ratelimit-remaining-requests': '3499', 'x-ratelimit-remaining-tokens': '89958', 'x-ratelimit-reset-requests': '17ms', 'x-ratelimit-reset-tokens': '28ms', 'x-request-id': '04b40821ae7ecb90639e00d2ff9ef17f', 'CF-Cache-Status': 'DYNAMIC', 'Server': 'cloudflare', 'CF-RAY': '818261d3f9b2393d-IAD', 'alt-svc': 'h3=\":443\"; ma=86400'}", - "description": "Traceback (most recent call last):\n File \"/var/task/openai_text.py\", line 287, in handler\n openai_results = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_resources/abstract/engine_api_resource.py\", line 155, in create\n response, _, api_key = requestor.request(\n ^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_requestor.py\", line 299, in request\n resp, got_stream = self._interpret_response(result, stream)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_requestor.py\", line 710, in _interpret_response\n self._interpret_response_line(\n File \"/var/task/openai/api_requestor.py\", line 775, in _interpret_response_line\n raise self.handle_error_response(\nopenai.error.APIError: Request failed due to server shutdown {\n \"error\": {\n \"message\": \"Request failed due to server shutdown\",\n \"type\": \"server_error\",\n \"param\": null,\n \"code\": null\n }\n}\n 500 {'error': {'message': 'Request failed due to server shutdown', 'type': 'server_error', 'param': None, 'code': None}} {'Date': 'Wed, 18 Oct 2023 17:08:23 GMT', 'Content-Type': 'application/json', 'Content-Length': '141', 'Connection': 'keep-alive', 'access-control-allow-origin': '*', 'openai-model': 'gpt-3.5-turbo-0613', 'openai-organization': 'user-tl1wv9chydjidmopc8vig1gc', 'openai-processing-ms': '9581', 'openai-version': '2020-10-01', 'strict-transport-security': 'max-age=15724800; includeSubDomains', 'x-ratelimit-limit-requests': '3500', 'x-ratelimit-limit-tokens': '90000', 'x-ratelimit-remaining-requests': '3499', 'x-ratelimit-remaining-tokens': '89958', 'x-ratelimit-reset-requests': '17ms', 'x-ratelimit-reset-tokens': '28ms', 'x-request-id': '04b40821ae7ecb90639e00d2ff9ef17f', 'CF-Cache-Status': 'DYNAMIC', 'Server': 'cloudflare', 'CF-RAY': '818261d3f9b2393d-IAD', 'alt-svc': 'h3=\":443\"; ma=86400'}\n" + "error": "Request failed due to server shutdown {\n \"error\": {\n \"message\": \"Request failed due to server shutdown\",\n \"type\": \"server_error\",\n \"param\": null,\n \"code\": null\n }\n}\n 500 {'error': {'message': 'Request failed due to server shutdown', 'type': 'server_error', 'param': None, 'code': None}} {'Date': 'Wed, 18 Oct 2023 17:08:23 GMT', 'Content-Type': 'application/json', 'Content-Length': '141', 'Connection': 'keep-alive', 'access-control-allow-origin': '*', 'openai-model': 'gpt-3.5-turbo-0613', 'openai-organization': 'user-tl1wv9chydjidmopc8vig1gc', 'openai-processing-ms': '9581', 'openai-version': '2020-10-01', 'strict-transport-security': 'max-age=15724800; includeSubDomains', 'x-ratelimit-limit-requests': '3500', 'x-ratelimit-limit-tokens': '90000', 'x-ratelimit-remaining-requests': '3499', 'x-ratelimit-remaining-tokens': '89958', 'x-ratelimit-reset-requests': '17ms', 'x-ratelimit-reset-tokens': '28ms', 'x-request-id': '04b40821ae7ecb90639e00d2ff9ef17f', 'CF-Cache-Status': 'DYNAMIC', 'Server': 'cloudflare', 'CF-RAY': '818261d3f9b2393d-IAD', 'alt-svc': 'h3=\":443\"; ma=86400'}", + "description": "Traceback (most recent call last):\n File \"/var/task/openai_text.py\", line 287, in handler\n openai_results = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_resources/abstract/engine_api_resource.py\", line 155, in create\n response, _, api_key = requestor.request(\n ^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_requestor.py\", line 299, in request\n resp, got_stream = self._interpret_response(result, stream)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/var/task/openai/api_requestor.py\", line 710, in _interpret_response\n self._interpret_response_line(\n File \"/var/task/openai/api_requestor.py\", line 775, in _interpret_response_line\n raise self.handle_error_response(\nopenai.error.APIError: Request failed due to server shutdown {\n \"error\": {\n \"message\": \"Request failed due to server shutdown\",\n \"type\": \"server_error\",\n \"param\": null,\n \"code\": null\n }\n}\n 500 {'error': {'message': 'Request failed due to server shutdown', 'type': 'server_error', 'param': None, 'code': None}} {'Date': 'Wed, 18 Oct 2023 17:08:23 GMT', 'Content-Type': 'application/json', 'Content-Length': '141', 'Connection': 'keep-alive', 'access-control-allow-origin': '*', 'openai-model': 'gpt-3.5-turbo-0613', 'openai-organization': 'user-tl1wv9chydjidmopc8vig1gc', 'openai-processing-ms': '9581', 'openai-version': '2020-10-01', 'strict-transport-security': 'max-age=15724800; includeSubDomains', 'x-ratelimit-limit-requests': '3500', 'x-ratelimit-limit-tokens': '90000', 'x-ratelimit-remaining-requests': '3499', 'x-ratelimit-remaining-tokens': '89958', 'x-ratelimit-reset-requests': '17ms', 'x-ratelimit-reset-tokens': '28ms', 'x-request-id': '04b40821ae7ecb90639e00d2ff9ef17f', 'CF-Cache-Status': 'DYNAMIC', 'Server': 'cloudflare', 'CF-RAY': '818261d3f9b2393d-IAD', 'alt-svc': 'h3=\":443\"; ma=86400'}\n" } } diff --git a/client/src/components/chatApp/test/events/openai.response.v0.4.0.json b/client/src/components/chatApp/test/events/openai.response.v0.4.0.json index 6fdff543..cc79a95a 100644 --- a/client/src/components/chatApp/test/events/openai.response.v0.4.0.json +++ b/client/src/components/chatApp/test/events/openai.response.v0.4.0.json @@ -2,24 +2,24 @@ "isBase64Encoded": false, "statusCode": 200, "body": { - "id": "chatcmpl-8B4RwgebWS8ZdTQ1ppHgElsru0BVW", - "object": "chat.completion", - "created": 1697649404, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Quantum computing is a type of computing that uses tiny particles called quantum bits, or qubits, to process and store information. It's like regular computers, but instead of using regular bits that can be either 0 or 1, quantum computers use qubits that can be both 0 and 1 at the same time. This allows them to solve certain problems much faster than regular computers. Quantum computers are very advanced and not widely used yet, but scientists are working on developing them to help solve complex problems in areas like science, medicine, and cryptography." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 30, - "completion_tokens": 113, - "total_tokens": 143 + "id": "chatcmpl-8B4RwgebWS8ZdTQ1ppHgElsru0BVW", + "object": "chat.completion", + "created": 1697649404, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Quantum computing is a type of computing that uses tiny particles called quantum bits, or qubits, to process and store information. It's like regular computers, but instead of using regular bits that can be either 0 or 1, quantum computers use qubits that can be both 0 and 1 at the same time. This allows them to solve certain problems much faster than regular computers. Quantum computers are very advanced and not widely used yet, but scientists are working on developing them to help solve complex problems in areas like science, medicine, and cryptography." + }, + "finish_reason": "stop" } + ], + "usage": { + "prompt_tokens": 30, + "completion_tokens": 113, + "total_tokens": 143 + } } } diff --git a/doc/apigateway_endpoint_airport_codes_result.json b/doc/apigateway_endpoint_airport_codes_result.json index 9bd76255..64420680 100644 --- a/doc/apigateway_endpoint_airport_codes_result.json +++ b/doc/apigateway_endpoint_airport_codes_result.json @@ -2,24 +2,24 @@ "isBase64Encoded": false, "statusCode": 200, "body": { - "id": "chatcmpl-88XFbBoZW1Spk1YrMvlYorBfHrR8B", - "object": "chat.completion", - "created": 1697045131, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "The airport code for Mexico City International Airport is MEX, and the airport code for Cancun International Airport is CUN." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 42, - "completion_tokens": 25, - "total_tokens": 67 + "id": "chatcmpl-88XFbBoZW1Spk1YrMvlYorBfHrR8B", + "object": "chat.completion", + "created": 1697045131, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The airport code for Mexico City International Airport is MEX, and the airport code for Cancun International Airport is CUN." + }, + "finish_reason": "stop" } + ], + "usage": { + "prompt_tokens": 42, + "completion_tokens": 25, + "total_tokens": 67 + } } } diff --git a/doc/apigateway_event_request.json b/doc/apigateway_event_request.json index ea1018c6..4dbbea3a 100644 --- a/doc/apigateway_event_request.json +++ b/doc/apigateway_event_request.json @@ -1,85 +1,67 @@ { "event": { - "resource": "/default-grammar", - "path": "/default-grammar", + "resource": "/default-grammar", + "path": "/default-grammar", + "httpMethod": "PUT", + "headers": { + "accept": "*/*", + "content-type": "application/json", + "Host": "ntivxlkmv7.execute-api.us-east-1.amazonaws.com", + "User-Agent": "curl/7.87.0", + "X-Amzn-Trace-Id": "Root=1-6500dd43-0c30df604c51a91e1e174dea", + "x-api-key": "dOQAFTyJ8c7OnTJxlde3G8NFo4iRnRrA6j1IZyF3", + "X-Forwarded-For": "187.190.193.166", + "X-Forwarded-Port": "443", + "X-Forwarded-Proto": "https" + }, + "multiValueHeaders": { + "accept": ["*/*"], + "content-type": ["application/json"], + "Host": ["ntivxlkmv7.execute-api.us-east-1.amazonaws.com"], + "User-Agent": ["curl/7.87.0"], + "X-Amzn-Trace-Id": ["Root=1-6500dd43-0c30df604c51a91e1e174dea"], + "x-api-key": ["dOQAFTyJ8c7OnTJxlde3G8NFo4iRnRrA6j1IZyF3"], + "X-Forwarded-For": ["187.190.193.166"], + "X-Forwarded-Port": ["443"], + "X-Forwarded-Proto": ["https"] + }, + "queryStringParameters": null, + "multiValueQueryStringParameters": null, + "pathParameters": null, + "stageVariables": null, + "requestContext": { + "resourceId": "xm24b5", + "resourcePath": "/default-grammar", "httpMethod": "PUT", - "headers": { - "accept": "*/*", - "content-type": "application/json", - "Host": "ntivxlkmv7.execute-api.us-east-1.amazonaws.com", - "User-Agent": "curl/7.87.0", - "X-Amzn-Trace-Id": "Root=1-6500dd43-0c30df604c51a91e1e174dea", - "x-api-key": "dOQAFTyJ8c7OnTJxlde3G8NFo4iRnRrA6j1IZyF3", - "X-Forwarded-For": "187.190.193.166", - "X-Forwarded-Port": "443", - "X-Forwarded-Proto": "https" + "extendedRequestId": "LKeCmE_PIAMElDA=", + "requestTime": "12/Sep/2023:21:50:59 +0000", + "path": "/v1/default-grammar", + "accountId": "090511222473", + "protocol": "HTTP/1.1", + "stage": "v1", + "domainPrefix": "ntivxlkmv7", + "requestTimeEpoch": 1694555459521, + "requestId": "1076f26f-69e3-4e7b-b51c-cee56e814897", + "identity": { + "cognitoIdentityPoolId": null, + "cognitoIdentityId": null, + "apiKey": "dOQAFTyJ8c7OnTJxlde3G8NFo4iRnRrA6j1IZyF3", + "principalOrgId": null, + "cognitoAuthenticationType": null, + "userArn": null, + "apiKeyId": "tf5t787ath", + "userAgent": "curl/7.87.0", + "accountId": null, + "caller": null, + "sourceIp": "187.190.193.166", + "accessKey": null, + "cognitoAuthenticationProvider": null, + "user": null }, - "multiValueHeaders": { - "accept": [ - "*/*" - ], - "content-type": [ - "application/json" - ], - "Host": [ - "ntivxlkmv7.execute-api.us-east-1.amazonaws.com" - ], - "User-Agent": [ - "curl/7.87.0" - ], - "X-Amzn-Trace-Id": [ - "Root=1-6500dd43-0c30df604c51a91e1e174dea" - ], - "x-api-key": [ - "dOQAFTyJ8c7OnTJxlde3G8NFo4iRnRrA6j1IZyF3" - ], - "X-Forwarded-For": [ - "187.190.193.166" - ], - "X-Forwarded-Port": [ - "443" - ], - "X-Forwarded-Proto": [ - "https" - ] - }, - "queryStringParameters": null, - "multiValueQueryStringParameters": null, - "pathParameters": null, - "stageVariables": null, - "requestContext": { - "resourceId": "xm24b5", - "resourcePath": "/default-grammar", - "httpMethod": "PUT", - "extendedRequestId": "LKeCmE_PIAMElDA=", - "requestTime": "12/Sep/2023:21:50:59 +0000", - "path": "/v1/default-grammar", - "accountId": "090511222473", - "protocol": "HTTP/1.1", - "stage": "v1", - "domainPrefix": "ntivxlkmv7", - "requestTimeEpoch": 1694555459521, - "requestId": "1076f26f-69e3-4e7b-b51c-cee56e814897", - "identity": { - "cognitoIdentityPoolId": null, - "cognitoIdentityId": null, - "apiKey": "dOQAFTyJ8c7OnTJxlde3G8NFo4iRnRrA6j1IZyF3", - "principalOrgId": null, - "cognitoAuthenticationType": null, - "userArn": null, - "apiKeyId": "tf5t787ath", - "userAgent": "curl/7.87.0", - "accountId": null, - "caller": null, - "sourceIp": "187.190.193.166", - "accessKey": null, - "cognitoAuthenticationProvider": null, - "user": null - }, - "domainName": "ntivxlkmv7.execute-api.us-east-1.amazonaws.com", - "apiId": "ntivxlkmv7" - }, - "body": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"messages\": [\n {\"role\": \"user\", \"content\": \"She no went to the market.\"}\n ]\n}", - "isBase64Encoded": false + "domainName": "ntivxlkmv7.execute-api.us-east-1.amazonaws.com", + "apiId": "ntivxlkmv7" + }, + "body": "{\n \"model\": \"gpt-3.5-turbo\",\n \"end_point\": \"ChatCompletion\",\n \"messages\": [\n {\"role\": \"user\", \"content\": \"She no went to the market.\"}\n ]\n}", + "isBase64Encoded": false } } diff --git a/doc/cloudwatch-apigateway-response-after-transformations.json b/doc/cloudwatch-apigateway-response-after-transformations.json index 70a44b81..5ffd51a3 100644 --- a/doc/cloudwatch-apigateway-response-after-transformations.json +++ b/doc/cloudwatch-apigateway-response-after-transformations.json @@ -2,27 +2,27 @@ "isBase64Encoded": false, "statusCode": 200, "headers": { - "Content-Type": "application/json" + "Content-Type": "application/json" }, "body": { - "id": "chatcmpl-7yjlsCU70sm8BilEtAiGaxUmdwJNM", - "object": "chat.completion", - "created": 1694709980, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "She broke the lamp post." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 35, - "completion_tokens": 6, - "total_tokens": 41 + "id": "chatcmpl-7yjlsCU70sm8BilEtAiGaxUmdwJNM", + "object": "chat.completion", + "created": 1694709980, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "She broke the lamp post." + }, + "finish_reason": "stop" } + ], + "usage": { + "prompt_tokens": 35, + "completion_tokens": 6, + "total_tokens": 41 + } } } diff --git a/doc/cloudwatch-apigateway-response-before-transformations.json b/doc/cloudwatch-apigateway-response-before-transformations.json index 70a44b81..5ffd51a3 100644 --- a/doc/cloudwatch-apigateway-response-before-transformations.json +++ b/doc/cloudwatch-apigateway-response-before-transformations.json @@ -2,27 +2,27 @@ "isBase64Encoded": false, "statusCode": 200, "headers": { - "Content-Type": "application/json" + "Content-Type": "application/json" }, "body": { - "id": "chatcmpl-7yjlsCU70sm8BilEtAiGaxUmdwJNM", - "object": "chat.completion", - "created": 1694709980, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "She broke the lamp post." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 35, - "completion_tokens": 6, - "total_tokens": 41 + "id": "chatcmpl-7yjlsCU70sm8BilEtAiGaxUmdwJNM", + "object": "chat.completion", + "created": 1694709980, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "She broke the lamp post." + }, + "finish_reason": "stop" } + ], + "usage": { + "prompt_tokens": 35, + "completion_tokens": 6, + "total_tokens": 41 + } } } diff --git a/doc/cloudwatch-lambda-environment-dump.json b/doc/cloudwatch-lambda-environment-dump.json index 841b9184..0630c9d8 100644 --- a/doc/cloudwatch-lambda-environment-dump.json +++ b/doc/cloudwatch-lambda-environment-dump.json @@ -1,18 +1,18 @@ { "environment": { - "os": "posix", - "system": "Linux", - "release": "5.10.184-194.730.amzn2.x86_64", - "openai": "0.28.0", - "openai_app_info": null, - "openai_end_points": [ - "Embedding", - "ChatCompletion", - "Moderation", - "Image", - "Audio", - "Model" - ], - "DEBUG_MODE": true + "os": "posix", + "system": "Linux", + "release": "5.10.184-194.730.amzn2.x86_64", + "openai": "0.28.0", + "openai_app_info": null, + "openai_end_points": [ + "Embedding", + "ChatCompletion", + "Moderation", + "Image", + "Audio", + "Model" + ], + "DEBUG_MODE": true } } diff --git a/doc/cloudwatch-lambda-event-dump.json b/doc/cloudwatch-lambda-event-dump.json index 4565a210..ef3d14ad 100644 --- a/doc/cloudwatch-lambda-event-dump.json +++ b/doc/cloudwatch-lambda-event-dump.json @@ -1,18 +1,18 @@ { "event": { - "model": "gpt-3.5-turbo", - "end_point": "ChatCompletion", - "temperature": 1, - "max_tokens": 4096, - "messages": [ - { - "role": "system", - "content": "You will be provided with statements, and your task is to convert them to standard English." - }, - { - "role": "user", - "content": "she broked the lamp post" - } - ] + "model": "gpt-3.5-turbo", + "end_point": "ChatCompletion", + "temperature": 1, + "max_tokens": 4096, + "messages": [ + { + "role": "system", + "content": "You will be provided with statements, and your task is to convert them to standard English." + }, + { + "role": "user", + "content": "she broked the lamp post" + } + ] } } diff --git a/doc/cloudwatch-lambda-response-dump.json b/doc/cloudwatch-lambda-response-dump.json index 01bf6829..f3d8fb4d 100644 --- a/doc/cloudwatch-lambda-response-dump.json +++ b/doc/cloudwatch-lambda-response-dump.json @@ -1,30 +1,30 @@ { "retval": { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yjlsCU70sm8BilEtAiGaxUmdwJNM", - "object": "chat.completion", - "created": 1694709980, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "She broke the lamp post." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 35, - "completion_tokens": 6, - "total_tokens": 41 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yjlsCU70sm8BilEtAiGaxUmdwJNM", + "object": "chat.completion", + "created": 1694709980, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "She broke the lamp post." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 35, + "completion_tokens": 6, + "total_tokens": 41 } + } } } diff --git a/doc/openai.chat.completion.chunk.json b/doc/openai.chat.completion.chunk.json index c6f56071..edbf3c83 100644 --- a/doc/openai.chat.completion.chunk.json +++ b/doc/openai.chat.completion.chunk.json @@ -3,11 +3,13 @@ "object": "chat.completion.chunk", "created": 1677652288, "model": "gpt-3.5-turbo", - "choices": [{ - "index": 0, - "delta": { - "content": "Hello" - }, - "finish_reason": "stop" - }] + "choices": [ + { + "index": 0, + "delta": { + "content": "Hello" + }, + "finish_reason": "stop" + } + ] } diff --git a/doc/openai.chat.completion.request.body.json b/doc/openai.chat.completion.request.body.json index ade27670..924bda16 100644 --- a/doc/openai.chat.completion.request.body.json +++ b/doc/openai.chat.completion.request.body.json @@ -1,7 +1,10 @@ { "model": "gpt-3.5-turbo", "messages": [ - {"role": "system", "content": "You will be provided with statements, and your task is to convert them to standard English."}, - {"role": "user", "content": "She no went to the market."} - ] + { + "role": "system", + "content": "You will be provided with statements, and your task is to convert them to standard English." + }, + { "role": "user", "content": "She no went to the market." } + ] } diff --git a/doc/openai.chat.completion.response.json b/doc/openai.chat.completion.response.json index b7a715cb..5ca0066f 100644 --- a/doc/openai.chat.completion.response.json +++ b/doc/openai.chat.completion.response.json @@ -4,18 +4,18 @@ "created": 1677858242, "model": "gpt-3.5-turbo-0613", "usage": { - "prompt_tokens": 13, - "completion_tokens": 7, - "total_tokens": 20 + "prompt_tokens": 13, + "completion_tokens": 7, + "total_tokens": 20 }, "choices": [ - { - "message": { - "role": "assistant", - "content": "\n\nThis is a test!" - }, - "finish_reason": "stop", - "index": 0 - } + { + "message": { + "role": "assistant", + "content": "\n\nThis is a test!" + }, + "finish_reason": "stop", + "index": 0 + } ] } From 9411b9929cb13f9ff9acb114b213a43e8bf9ac6e Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 18:11:34 -0600 Subject: [PATCH 04/10] style: reformat md with prettier --- .github/PULL_REQUEST_TEMPLATE.md | 12 + api/README.md | 290 +++++++++--------- doc/examples/README.md | 64 ++-- doc/examples/example-01-grammar.md | 36 +-- doc/examples/example-02-summarize.md | 51 ++- doc/examples/example-03-parse-data.md | 50 +-- doc/examples/example-04-emoji-translation.md | 51 ++- doc/examples/example-05-time-complexity.md | 51 ++- doc/examples/example-07-keywords.md | 50 +-- doc/examples/example-08-product-name-gen.md | 50 +-- doc/examples/example-09-fix-python-bugs.md | 54 ++-- doc/examples/example-10-spreadsheet-gen.md | 50 +-- doc/examples/example-11-tweet-classifier.md | 50 +-- doc/examples/example-12-airport-codes.md | 50 +-- doc/examples/example-13-mood-color.md | 50 +-- doc/examples/example-14-vr-fitness.md | 50 +-- .../example-15-marv-sarcastic-chat.md | 50 +-- .../example-16-turn-by-turn-directions.md | 50 +-- .../example-17-interview-questions.md | 50 +-- doc/examples/example-18-function-from-spec.md | 54 ++-- doc/examples/example-19-code-improvement.md | 54 ++-- .../example-20-single-page-website.md | 56 ++-- doc/examples/example-21-rap-battle.md | 53 ++-- doc/examples/example-24-translation.md | 50 +-- doc/examples/example-29-pro-con-discusser.md | 50 +-- doc/lambda_openai_text.md | 77 ++--- doc/terraform-getting-started.md | 11 +- 27 files changed, 785 insertions(+), 779 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 9d4b43ba..ea27012f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,9 @@ # Pull Request Template ## Type of Change + + - [ ] New feature - [ ] Bug fix - [ ] Documentation @@ -13,21 +15,31 @@ - Fixes #[Add issue number here.] ## Changes + + _Describe what this Pull Request does_ ## Testing + + _Describe the testing that has been done or needs to be done_ ## Screenshots + + _Add any relevant screenshots_ ## Dependencies + + _List dependencies_ ## Breaking Changes + + _Describe any breaking changes_ diff --git a/api/README.md b/api/README.md index 54165814..460872b0 100644 --- a/api/README.md +++ b/api/README.md @@ -25,32 +25,32 @@ return value ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yLxpF7ZsJzF3FTUICyUKDe1Ob9nd", - "object": "chat.completion", - "created": 1694618465, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "The correct way to phrase this sentence would be: \"She did not go to the market.\"" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 36, - "completion_tokens": 10, - "total_tokens": 46 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yLxpF7ZsJzF3FTUICyUKDe1Ob9nd", + "object": "chat.completion", + "created": 1694618465, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The correct way to phrase this sentence would be: \"She did not go to the market.\"" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 36, + "completion_tokens": 10, + "total_tokens": 46 } + } } ``` @@ -69,38 +69,38 @@ An example complete URL for one of the end points described below: https://api.o Implementations of each example application found in [OpenAI API - Examples](https://platform.openai.com/examples). -| Example | Link | -|---------|------| -| default-grammar | [/examples/default-grammar](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19) | -| default-summarize | [/examples/default-summarize](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L41) | -| default-parse-data | [/examples/default-parse-data](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L65) | -| default-emoji-translation | [/examples/default-emoji-translation](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L90) | -| default-time-complexity | [/examples/default-time-complexity](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L114) | -| default-explain-code | [/examples/default-explain-code](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L138) | -| default-keywords | [/examples/default-keywords](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L162) | -| default-product-name-gen | [/examples/default-product-name-gen](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L186) | -| default-fix-python-bugs | [/examples/default-fix-python-bugs](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L210) | -| default-spreadsheet-gen | [/examples/default-spreadsheet-gen](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L234) | -| default-tweet-classifier | [/examples/default-tweet-classifier](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L257) | -| default-airport-codes | [/examples/default-airport-codes](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L281) | -| default-mood-color | [/examples/default-mood-color](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L305) | -| default-vr-fitness | [/examples/default-vr-fitness](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L329) | -| default-marv-sarcastic-chat | [/examples/default-marv-sarcastic-chat](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L352) | -| default-turn-by-turn-directions | [/examples/default-turn-by-turn-directions](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L376) | -| default-interview-questions | [/examples/default-interview-questions](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L400) | -| default-function-from-spec | [/examples/default-function-from-spec](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L423) | -| default-code-improvement | [/examples/default-code-improvement](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L446) | -| default-single-page-website | [/examples/default-single-page-website](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L470) | -| default-rap-battle | [/examples/default-rap-battle](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L493) | -| default-memo-writer | [/examples/default-memo-writer](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L516) | -| default-emoji-chatbot | [/examples/default-emoji-chatbot](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L539) | -| default-translation | [/examples/default-translation](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L563) | -| default-socratic-tutor | [/examples/default-socratic-tutor](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L587) | -| default-sql-translate | [/examples/default-sql-translate](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L611) | +| Example | Link | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| default-grammar | [/examples/default-grammar](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19) | +| default-summarize | [/examples/default-summarize](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L41) | +| default-parse-data | [/examples/default-parse-data](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L65) | +| default-emoji-translation | [/examples/default-emoji-translation](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L90) | +| default-time-complexity | [/examples/default-time-complexity](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L114) | +| default-explain-code | [/examples/default-explain-code](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L138) | +| default-keywords | [/examples/default-keywords](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L162) | +| default-product-name-gen | [/examples/default-product-name-gen](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L186) | +| default-fix-python-bugs | [/examples/default-fix-python-bugs](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L210) | +| default-spreadsheet-gen | [/examples/default-spreadsheet-gen](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L234) | +| default-tweet-classifier | [/examples/default-tweet-classifier](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L257) | +| default-airport-codes | [/examples/default-airport-codes](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L281) | +| default-mood-color | [/examples/default-mood-color](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L305) | +| default-vr-fitness | [/examples/default-vr-fitness](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L329) | +| default-marv-sarcastic-chat | [/examples/default-marv-sarcastic-chat](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L352) | +| default-turn-by-turn-directions | [/examples/default-turn-by-turn-directions](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L376) | +| default-interview-questions | [/examples/default-interview-questions](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L400) | +| default-function-from-spec | [/examples/default-function-from-spec](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L423) | +| default-code-improvement | [/examples/default-code-improvement](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L446) | +| default-single-page-website | [/examples/default-single-page-website](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L470) | +| default-rap-battle | [/examples/default-rap-battle](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L493) | +| default-memo-writer | [/examples/default-memo-writer](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L516) | +| default-emoji-chatbot | [/examples/default-emoji-chatbot](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L539) | +| default-translation | [/examples/default-translation](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L563) | +| default-socratic-tutor | [/examples/default-socratic-tutor](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L587) | +| default-sql-translate | [/examples/default-sql-translate](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L611) | | default-meeting-notes-summarizer | [/examples/default-meeting-notes-summarizer](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L635) | -| default-review-classifier | [/examples/default-review-classifier](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L659) | -| default-pro-con-discusser | [/examples/default-pro-con-discusser](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L683) | -| default-lesson-plan-writer | [/examples/default-lesson-plan-writer](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L706) | +| default-review-classifier | [/examples/default-review-classifier](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L659) | +| default-pro-con-discusser | [/examples/default-pro-con-discusser](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L683) | +| default-lesson-plan-writer | [/examples/default-lesson-plan-writer](https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L706) | ### Passthrough @@ -110,28 +110,28 @@ Example valid request body: ```json { - "model": "gpt-3.5-turbo", - "end_point": "ChatCompletion", - "temperature": 0.9, - "max_tokens": 1024, - "messages": [ - { - "role": "system", - "content": "Summarize content you are provided with for a second-grade student." - }, - { - "role": "user", - "content": "what is quantum computing?" - }, - { - "role": "assistant", - "content": "Quantum computing involves teeny tiny itsy bitsy atomic stuff" - }, - { - "role": "user", - "content": "What??? I don't understand. Please provide a better explanation." - }, - ] + "model": "gpt-3.5-turbo", + "end_point": "ChatCompletion", + "temperature": 0.9, + "max_tokens": 1024, + "messages": [ + { + "role": "system", + "content": "Summarize content you are provided with for a second-grade student." + }, + { + "role": "user", + "content": "what is quantum computing?" + }, + { + "role": "assistant", + "content": "Quantum computing involves teeny tiny itsy bitsy atomic stuff" + }, + { + "role": "user", + "content": "What??? I don't understand. Please provide a better explanation." + } + ] } ``` @@ -144,53 +144,52 @@ Example valid request body: - [AWS account](https://aws.amazon.com/) - [AWS Command Line Interface](https://aws.amazon.com/cli/) - [Terraform](https://www.terraform.io/). - *If you're new to Terraform then see [Getting Started With AWS and Terraform](./doc/terraform-getting-started.md)* + _If you're new to Terraform then see [Getting Started With AWS and Terraform](./doc/terraform-getting-started.md)_ - [OpenAI platform API key](https://platform.openai.com/). - *If you're new to OpenAI API then see [How to Get an OpenAI API Key](./doc/openai-api-key.md)* + _If you're new to OpenAI API then see [How to Get an OpenAI API Key](./doc/openai-api-key.md)_ ## Setup 1. clone this repo and setup a Python virtual environment - ```console - git clone https://github.com/FullStackWithLawrence/aws-openai.git - cd aws-openai - make init - ``` + ```console + git clone https://github.com/FullStackWithLawrence/aws-openai.git + cd aws-openai + make init + ``` 2. add your OpenAI API credentials to the [.env](./.env) file in the root folder of this repo. Your organization ID and API Key should appear similar in format to these examples below. - ```console - OPENAI_API_ORGANIZATION=org-YJzABCDEFGHIJESMShcyulf0 - OPENAI_API_KEY=sk-7doQ4gAITSez7ABCDEFGHIJlbkFJKLOuEbRhAFadzjtnzAV2 - ``` - - *Windows/Powershell users: you'll need to modify [./terraform/lambda_openai.tf](./terraform/lambda_openai.tf) data "external" "env" as per instructions in this code block.* + ```console + OPENAI_API_ORGANIZATION=org-YJzABCDEFGHIJESMShcyulf0 + OPENAI_API_KEY=sk-7doQ4gAITSez7ABCDEFGHIJlbkFJKLOuEbRhAFadzjtnzAV2 + ``` + _Windows/Powershell users: you'll need to modify [./terraform/lambda_openai.tf](./terraform/lambda_openai.tf) data "external" "env" as per instructions in this code block._ 3. Add your AWS account number and region to Terraform. Set these three values in [terraform.tfvars](./terraform/terraform.tfvars): - ```terraform - account_id = "012345678912" # Required: your 12-digit AWS account number - aws_region = "us-east-1" # Optional: an AWS data center - aws_profile = "default" # Optional: for aws cli credentials - ``` + ```terraform + account_id = "012345678912" # Required: your 12-digit AWS account number + aws_region = "us-east-1" # Optional: an AWS data center + aws_profile = "default" # Optional: for aws cli credentials + ``` - *see the README section **"Installation Prerequisites"** below for instructions on setting up Terraform for first-time use.* + _see the README section **"Installation Prerequisites"** below for instructions on setting up Terraform for first-time use._ 4. Build and deploy the microservice.. - ```terraform - terraform init - terraform apply - ``` + ```terraform + terraform init + terraform apply + ``` - *Note the output variables for your API Gateway root URL and API key.* - ![Postman](https://raw.githubusercontent.com/FullStackWithLawrence/aws-openai/main/doc/terraform-apply2.png "Postman") + _Note the output variables for your API Gateway root URL and API key._ + ![Postman](https://raw.githubusercontent.com/FullStackWithLawrence/aws-openai/main/doc/terraform-apply2.png "Postman") 5. (Optional) use the [preconfigured import files](./postman/) to setup a Postman collection with all 30 URL endpoints. - ![Postman](https://raw.githubusercontent.com/FullStackWithLawrence/aws-openai/main/doc/postman-1.png "Postman") + ![Postman](https://raw.githubusercontent.com/FullStackWithLawrence/aws-openai/main/doc/postman-1.png "Postman") ### Custom Domain (Optional) @@ -216,13 +215,13 @@ You'll find a detailed narrative explanation of the design strategy in this arti ### Services and Technologies Used -* **[OpenAI](https://pypi.org/project/openai/)**: a PyPi package thata provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. -* **[API Gateway](https://aws.amazon.com/api-gateway/)**: an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs at any scale. -* **[IAM](https://aws.amazon.com/iam/)**: a web service that helps you securely control access to AWS resources. With IAM, you can centrally manage permissions that control which AWS resources users can access. You use IAM to control who is authenticated (signed in) and authorized (has permissions) to use resources. -* **[Lambda](https://aws.amazon.com/lambda/)**: an event-driven, serverless computing platform provided by Amazon as a part of Amazon Web Services. It is a computing service that runs code in response to events and automatically manages the computing resources required by that code. It was introduced on November 13, 2014. -* **[CloudWatch](https://aws.amazon.com/cloudwatch/)**: CloudWatch enables you to monitor your complete stack (applications, infrastructure, network, and services) and use alarms, logs, and events data to take automated actions and reduce mean time to resolution (MTTR). -* **[Route53](https://aws.amazon.com/route53/)**: (OPTIONAL). a scalable and highly available Domain Name System service. Released on December 5, 2010. -* **[Certificate Manager](https://aws.amazon.com/certificate-manager/)**: (OPTIONAL). handles the complexity of creating, storing, and renewing public and private SSL/TLS X.509 certificates and keys that protect your AWS websites and applications. +- **[OpenAI](https://pypi.org/project/openai/)**: a PyPi package thata provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. +- **[API Gateway](https://aws.amazon.com/api-gateway/)**: an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs at any scale. +- **[IAM](https://aws.amazon.com/iam/)**: a web service that helps you securely control access to AWS resources. With IAM, you can centrally manage permissions that control which AWS resources users can access. You use IAM to control who is authenticated (signed in) and authorized (has permissions) to use resources. +- **[Lambda](https://aws.amazon.com/lambda/)**: an event-driven, serverless computing platform provided by Amazon as a part of Amazon Web Services. It is a computing service that runs code in response to events and automatically manages the computing resources required by that code. It was introduced on November 13, 2014. +- **[CloudWatch](https://aws.amazon.com/cloudwatch/)**: CloudWatch enables you to monitor your complete stack (applications, infrastructure, network, and services) and use alarms, logs, and events data to take automated actions and reduce mean time to resolution (MTTR). +- **[Route53](https://aws.amazon.com/route53/)**: (OPTIONAL). a scalable and highly available Domain Name System service. Released on December 5, 2010. +- **[Certificate Manager](https://aws.amazon.com/certificate-manager/)**: (OPTIONAL). handles the complexity of creating, storing, and renewing public and private SSL/TLS X.509 certificates and keys that protect your AWS websites and applications. ## OpenAI API @@ -264,29 +263,29 @@ a static example response from the OpenAI chatgpt-3.5 API ```json { - "body": { - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Oh, hello there! What kind of trouble can I unknowingly get myself into for you today?", - "role": "assistant" - } - } - ], - "created": 1697495501, - "id": "chatcmpl-8AQPdETlM808Fp0NjEeCOOc3a13Vt", - "model": "gpt-3.5-turbo-0613", - "object": "chat.completion", - "usage": { - "completion_tokens": 20, - "prompt_tokens": 31, - "total_tokens": 51 + "body": { + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Oh, hello there! What kind of trouble can I unknowingly get myself into for you today?", + "role": "assistant" } - }, - "isBase64Encoded": false, - "statusCode": 200 + } + ], + "created": 1697495501, + "id": "chatcmpl-8AQPdETlM808Fp0NjEeCOOc3a13Vt", + "model": "gpt-3.5-turbo-0613", + "object": "chat.completion", + "usage": { + "completion_tokens": 20, + "prompt_tokens": 31, + "total_tokens": 51 + } + }, + "isBase64Encoded": false, + "statusCode": 200 } ``` @@ -296,12 +295,12 @@ a static http 400 response ```json { - "body": { - "error": "Bad Request", - "message": "TEST 400 RESPONSE." - }, - "isBase64Encoded": false, - "statusCode": 400 + "body": { + "error": "Bad Request", + "message": "TEST 400 RESPONSE." + }, + "isBase64Encoded": false, + "statusCode": 400 } ``` @@ -311,12 +310,12 @@ a static http 500 response ```json { - "body": { - "error": "Internal Server Error", - "message": "TEST 500 RESPONSE." - }, - "isBase64Encoded": false, - "statusCode": 500 + "body": { + "error": "Internal Server Error", + "message": "TEST 500 RESPONSE." + }, + "isBase64Encoded": false, + "statusCode": 500 } ``` @@ -324,7 +323,6 @@ a static http 500 response a static http 504 response with an empty body. - ## Trouble Shooting and Logging The terraform scripts will automatically create a collection of CloudWatch Log Groups. Additionally, note the Terraform global variable 'debug_mode' (defaults to 'true') which will increase the verbosity of log entries in the [Lambda functions](./terraform/python/), which are implemented with Python. diff --git a/doc/examples/README.md b/doc/examples/README.md index b40b09ca..6bd0608d 100644 --- a/doc/examples/README.md +++ b/doc/examples/README.md @@ -7,38 +7,38 @@ ## REST API endpoints -| Endpoint | Summary Explanation | -| -------------------------------------- | ------------------------------- | -| [/examples/default-grammar](./example-01-grammar.md) | Convert ungrammatical statements into standard English. | -[/examples/default-summarize](./example-02-summarize.md) | Simplify text to a level appropriate for a second-grade student. | -| [/examples/default-parse-data](./example-03-parse-data.md) | Create tables from unstructured text. | -[/examples/default-emoji-translation](./example-04-emoji-translation.md) | Translate regular text into emoji text. | -| [/examples/default-time-complexity](./example-05-time-complexity.md) | Find the time complexity of a function written in Python. -[/examples/default-explain-code](./example-06-explain-code.md) | Explain a complicated piece of Python code. -| [/examples/default-keywords](./example-07-keywords.md) | Extract keywords from a block of text. | -[/examples/default-product-name-gen](./example-08-product-name-gen.md) | Generate product names from a description and seed words. | -| [/examples/default-fix-python-bugs](./example-09-fix-python-bugs.md) | Find and fix bugs in Python source code. | -[/examples/default-spreadsheet-gen](./example-10-spreadsheet-gen.md) | Create spreadsheets of various kinds of data. | -| [/examples/default-tweet-classifier](./example-11-tweet-classifier.md) | Detect sentiment in a tweet. | -[/examples/default-airport-codes](./example-12-airport-codes.md) | Extract airport codes from text. | -| [/examples/default-mood-color](./example-13-mood-color.md) | Turn a text description into a color. | -[/examples/default-vr-fitness](./example-14-vr-fitness.md) | Generate ideas for fitness promoting virtual reality games. | -| [/examples/default-marv-sarcastic-chat](./example-15-marv-sarcastic-chat.md) | Marv is a factual chatbot that is also sarcastic. | -[/examples/default-turn-by-turn-directions](./example-16-turn-by-turn-directions.md) | Convert natural language to turn-by-turn directions. | -| [/examples/default-interview-questions](./example-17-interview-questions.md) | Create job interview questions. | -[/examples/default-function-from-spec](./example-18-function-from-spec.md) | Create a Python function from a specification. | -| [/examples/default-code-improvement](./example-19-code-improvement.md) | Provide ideas for efficiency improvements to Python code. | -[/examples/default-single-page-website](./example-20-single-page-website.md) | Create a single page website. | -| [/examples/default-rap-battle](./example-21-rap-battle.md) | Generate a rap battle between two characters. | -[/examples/default-memo-writer](./example-22-memo-writer.md) | Generate a company memo based on provided points. | -| [/examples/default-emoji-chatbot](./example-23-emoji-chatbot.md) | Generate conversational replies using emojis only. | -[/examples/default-translation](./example-24-translation.md) | Translate natural language text. | -| [/examples/default-socratic-tutor](./example-25-socratic-tutor.md) | Generate responses as a Socratic tutor. | -[/examples/default-sql-translate](./example-26-sql-translate.md) | Convert natural language into SQL queries. | -| [/examples/default-meeting-notes-summarizer](./example-27-notes-summarizer.md) | Summarize meeting notes including overall discussion, action items, and future topics. | -[/examples/default-review-classifier](./example-28-review-classifier.md) | Classify user reviews based on a set of tags. | -| [/examples/default-pro-con-discusser](./example-29-pro-con-discusser.md) | Analyze the pros and cons of a given topic. | -[/examples/default-lesson-plan-writer](./example-30-lesson-plan-writer.md) | Generate a lesson plan for a specific topic. | +| Endpoint | Summary Explanation | +| ------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------- | +| [/examples/default-grammar](./example-01-grammar.md) | Convert ungrammatical statements into standard English. | +| [/examples/default-summarize](./example-02-summarize.md) | Simplify text to a level appropriate for a second-grade student. | +| [/examples/default-parse-data](./example-03-parse-data.md) | Create tables from unstructured text. | +| [/examples/default-emoji-translation](./example-04-emoji-translation.md) | Translate regular text into emoji text. | +| [/examples/default-time-complexity](./example-05-time-complexity.md) | Find the time complexity of a function written in Python. | +| [/examples/default-explain-code](./example-06-explain-code.md) | Explain a complicated piece of Python code. | +| [/examples/default-keywords](./example-07-keywords.md) | Extract keywords from a block of text. | +| [/examples/default-product-name-gen](./example-08-product-name-gen.md) | Generate product names from a description and seed words. | +| [/examples/default-fix-python-bugs](./example-09-fix-python-bugs.md) | Find and fix bugs in Python source code. | +| [/examples/default-spreadsheet-gen](./example-10-spreadsheet-gen.md) | Create spreadsheets of various kinds of data. | +| [/examples/default-tweet-classifier](./example-11-tweet-classifier.md) | Detect sentiment in a tweet. | +| [/examples/default-airport-codes](./example-12-airport-codes.md) | Extract airport codes from text. | +| [/examples/default-mood-color](./example-13-mood-color.md) | Turn a text description into a color. | +| [/examples/default-vr-fitness](./example-14-vr-fitness.md) | Generate ideas for fitness promoting virtual reality games. | +| [/examples/default-marv-sarcastic-chat](./example-15-marv-sarcastic-chat.md) | Marv is a factual chatbot that is also sarcastic. | +| [/examples/default-turn-by-turn-directions](./example-16-turn-by-turn-directions.md) | Convert natural language to turn-by-turn directions. | +| [/examples/default-interview-questions](./example-17-interview-questions.md) | Create job interview questions. | +| [/examples/default-function-from-spec](./example-18-function-from-spec.md) | Create a Python function from a specification. | +| [/examples/default-code-improvement](./example-19-code-improvement.md) | Provide ideas for efficiency improvements to Python code. | +| [/examples/default-single-page-website](./example-20-single-page-website.md) | Create a single page website. | +| [/examples/default-rap-battle](./example-21-rap-battle.md) | Generate a rap battle between two characters. | +| [/examples/default-memo-writer](./example-22-memo-writer.md) | Generate a company memo based on provided points. | +| [/examples/default-emoji-chatbot](./example-23-emoji-chatbot.md) | Generate conversational replies using emojis only. | +| [/examples/default-translation](./example-24-translation.md) | Translate natural language text. | +| [/examples/default-socratic-tutor](./example-25-socratic-tutor.md) | Generate responses as a Socratic tutor. | +| [/examples/default-sql-translate](./example-26-sql-translate.md) | Convert natural language into SQL queries. | +| [/examples/default-meeting-notes-summarizer](./example-27-notes-summarizer.md) | Summarize meeting notes including overall discussion, action items, and future topics. | +| [/examples/default-review-classifier](./example-28-review-classifier.md) | Classify user reviews based on a set of tags. | +| [/examples/default-pro-con-discusser](./example-29-pro-con-discusser.md) | Analyze the pros and cons of a given topic. | +| [/examples/default-lesson-plan-writer](./example-30-lesson-plan-writer.md) | Generate a lesson plan for a specific topic. | ## more diff --git a/doc/examples/example-01-grammar.md b/doc/examples/example-01-grammar.md index 593e1de5..dbcbdb5e 100644 --- a/doc/examples/example-01-grammar.md +++ b/doc/examples/example-01-grammar.md @@ -18,25 +18,25 @@ curl --location --request PUT 'https://YOUR-API-GATEWAY-URL.amazonaws.com/v1/exa ```json { - "id": "chatcmpl-7y5TvSLe4m1oU1cxPC5qX7fI4HCL9", - "object": "chat.completion", - "created": 1694555107, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "The correct way to phrase this sentence would be: \"She did not go to the market.\"" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 14, - "completion_tokens": 19, - "total_tokens": 33 + "id": "chatcmpl-7y5TvSLe4m1oU1cxPC5qX7fI4HCL9", + "object": "chat.completion", + "created": 1694555107, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The correct way to phrase this sentence would be: \"She did not go to the market.\"" + }, + "finish_reason": "stop" } + ], + "usage": { + "prompt_tokens": 14, + "completion_tokens": 19, + "total_tokens": 33 + } } ``` diff --git a/doc/examples/example-02-summarize.md b/doc/examples/example-02-summarize.md index d513c261..f14256a9 100644 --- a/doc/examples/example-02-summarize.md +++ b/doc/examples/example-02-summarize.md @@ -5,7 +5,6 @@ Simplify text to a level appropriate for a second-grade student. - See [https://platform.openai.com/examples/default-summarize](https://platform.openai.com/examples/default-summarize) - [Open in OpenAI Playground](https://platform.openai.com/playground/p/default-summarize) - ## Example Usage ```console @@ -21,32 +20,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTflEBQpKxSpKN5MZtnvCLoSkoDM", - "object": "chat.completion", - "created": 1694648097, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Jupiter is a really big and bright planet in our Solar System. It is the fifth planet from the Sun and it is the largest planet. It is called a gas giant because it is made mostly of gas. Even though it is smaller than the Sun, it is bigger than all the other planets put together. People have known about Jupiter for a very long time, even before they started writing things down. It is named after a god from ancient Rome. Jupiter is so bright that it can sometimes make shadows on Earth. It is usually the third-brightest thing we can see in the night sky, after the Moon and Venus." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 167, - "completion_tokens": 128, - "total_tokens": 295 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTflEBQpKxSpKN5MZtnvCLoSkoDM", + "object": "chat.completion", + "created": 1694648097, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Jupiter is a really big and bright planet in our Solar System. It is the fifth planet from the Sun and it is the largest planet. It is called a gas giant because it is made mostly of gas. Even though it is smaller than the Sun, it is bigger than all the other planets put together. People have known about Jupiter for a very long time, even before they started writing things down. It is named after a god from ancient Rome. Jupiter is so bright that it can sometimes make shadows on Earth. It is usually the third-brightest thing we can see in the night sky, after the Moon and Venus." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 167, + "completion_tokens": 128, + "total_tokens": 295 } + } } ``` diff --git a/doc/examples/example-03-parse-data.md b/doc/examples/example-03-parse-data.md index 81ed9b83..fe8da5cd 100644 --- a/doc/examples/example-03-parse-data.md +++ b/doc/examples/example-03-parse-data.md @@ -20,32 +20,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTtGjKLP0wq1Vz4I5dRpLYbFvB7I", - "object": "chat.completion", - "created": 1694648934, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Fruit,Color,Flavor\nNeoskizzles,Purple,Candy\nLoheckles,Grayish blue,Tart\nPounits,Bright green,Savory\nLoopnovas,Neon pink,Cotton candy\nGlowls,Pale orange,Sour and bitter" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 161, - "completion_tokens": 59, - "total_tokens": 220 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTtGjKLP0wq1Vz4I5dRpLYbFvB7I", + "object": "chat.completion", + "created": 1694648934, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Fruit,Color,Flavor\nNeoskizzles,Purple,Candy\nLoheckles,Grayish blue,Tart\nPounits,Bright green,Savory\nLoopnovas,Neon pink,Cotton candy\nGlowls,Pale orange,Sour and bitter" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 161, + "completion_tokens": 59, + "total_tokens": 220 } + } } ``` diff --git a/doc/examples/example-04-emoji-translation.md b/doc/examples/example-04-emoji-translation.md index 7a1ba10b..1d15c9ff 100644 --- a/doc/examples/example-04-emoji-translation.md +++ b/doc/examples/example-04-emoji-translation.md @@ -5,7 +5,6 @@ Translate regular text into emoji text. - See [https://platform.openai.com/examples/default-emoji-translation](https://platform.openai.com/examples/default-emoji-translation) - [Open in OpenAI Playground](https://platform.openai.com/playground/p/default-emoji-translation) - ## Example Usage ```console @@ -19,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTuWCwvgecRstWKGGv13q4bLvXyA", - "object": "chat.completion", - "created": 1694649012, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "🤖🧠💡🌟👍" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 52, - "completion_tokens": 14, - "total_tokens": 66 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTuWCwvgecRstWKGGv13q4bLvXyA", + "object": "chat.completion", + "created": 1694649012, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "🤖🧠💡🌟👍" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 52, + "completion_tokens": 14, + "total_tokens": 66 } + } } ``` diff --git a/doc/examples/example-05-time-complexity.md b/doc/examples/example-05-time-complexity.md index a0bfb0e9..90ab9d2f 100644 --- a/doc/examples/example-05-time-complexity.md +++ b/doc/examples/example-05-time-complexity.md @@ -5,7 +5,6 @@ Find the time complexity of a function. - See [https://platform.openai.com/examples/default-time-complexity](https://platform.openai.com/examples/default-time-complexity) - [Open in OpenAI Playground](https://platform.openai.com/playground/p/default-time-complexity) - ## Example Usage ```console @@ -21,32 +20,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUXtcSDMvMWI0eldblWaFa3UGc4d", - "object": "chat.completion", - "created": 1694651453, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "The time complexity of the code is O(n * k), where n and k are the inputs to the function.\n\nThe outer loop runs 'n' times, and the inner loop runs 'k' times for each iteration of the outer loop. Therefore, the total number of iterations of the inner loop is n * k.\n\nInside the loops, we have a constant-time operation 'accum += i', which takes O(1) time.\n\nHence, the overall time complexity of the code is O(n * k)." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 64, - "completion_tokens": 104, - "total_tokens": 168 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUXtcSDMvMWI0eldblWaFa3UGc4d", + "object": "chat.completion", + "created": 1694651453, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The time complexity of the code is O(n * k), where n and k are the inputs to the function.\n\nThe outer loop runs 'n' times, and the inner loop runs 'k' times for each iteration of the outer loop. Therefore, the total number of iterations of the inner loop is n * k.\n\nInside the loops, we have a constant-time operation 'accum += i', which takes O(1) time.\n\nHence, the overall time complexity of the code is O(n * k)." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 64, + "completion_tokens": 104, + "total_tokens": 168 } + } } ``` diff --git a/doc/examples/example-07-keywords.md b/doc/examples/example-07-keywords.md index 37cbae12..bd325eac 100644 --- a/doc/examples/example-07-keywords.md +++ b/doc/examples/example-07-keywords.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTwFseUXth0Jju4SRdvs9T3r4W8Y", - "object": "chat.completion", - "created": 1694649119, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "- Black-on-black ware\n- 20th-century\n- 21st-century\n- pottery tradition\n- Puebloan Native American\n- ceramic artists\n- Northern New Mexico\n- reduction-fired blackware\n- pueblo artists\n- smooth surface\n- designs\n- selective burnishing\n- refractory slip\n- carving\n- incising designs\n- polishing\n- raised areas\n- generations\n- families\n- Kha'po Owingeh\n- P'ohwhóge Owingeh\n- pueblos\n- techniques\n- matriarch potters\n- contemporary artists\n- ancestors\n- works honoring\n- pottery" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 200, - "completion_tokens": 136, - "total_tokens": 336 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTwFseUXth0Jju4SRdvs9T3r4W8Y", + "object": "chat.completion", + "created": 1694649119, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "- Black-on-black ware\n- 20th-century\n- 21st-century\n- pottery tradition\n- Puebloan Native American\n- ceramic artists\n- Northern New Mexico\n- reduction-fired blackware\n- pueblo artists\n- smooth surface\n- designs\n- selective burnishing\n- refractory slip\n- carving\n- incising designs\n- polishing\n- raised areas\n- generations\n- families\n- Kha'po Owingeh\n- P'ohwhóge Owingeh\n- pueblos\n- techniques\n- matriarch potters\n- contemporary artists\n- ancestors\n- works honoring\n- pottery" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 200, + "completion_tokens": 136, + "total_tokens": 336 } + } } ``` diff --git a/doc/examples/example-08-product-name-gen.md b/doc/examples/example-08-product-name-gen.md index b57bf59e..7c45a46b 100644 --- a/doc/examples/example-08-product-name-gen.md +++ b/doc/examples/example-08-product-name-gen.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTweQPrLD9ZTIIV1WU9EaaJmuxe0", - "object": "chat.completion", - "created": 1694649144, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "1. SpeedBlend\n2. FitShake\n3. CompactBlend\n4. QuickMix\n5. HealthyMix\n6. PowerShake\n7. MiniShaker\n8. SlimBlend\n9. SwiftShake\n10. NutriBlend" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 50, - "completion_tokens": 54, - "total_tokens": 104 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTweQPrLD9ZTIIV1WU9EaaJmuxe0", + "object": "chat.completion", + "created": 1694649144, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1. SpeedBlend\n2. FitShake\n3. CompactBlend\n4. QuickMix\n5. HealthyMix\n6. PowerShake\n7. MiniShaker\n8. SlimBlend\n9. SwiftShake\n10. NutriBlend" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 50, + "completion_tokens": 54, + "total_tokens": 104 } + } } ``` diff --git a/doc/examples/example-09-fix-python-bugs.md b/doc/examples/example-09-fix-python-bugs.md index d366cf6d..d86fba2d 100644 --- a/doc/examples/example-09-fix-python-bugs.md +++ b/doc/examples/example-09-fix-python-bugs.md @@ -18,36 +18,36 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ## Example results -```json +````json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUeCThVHPQPnTUTF7mrDrKp5ZmJ9", - "object": "chat.completion", - "created": 1694651844, - "model": "gpt-4-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "There are several problems with the given piece of Python code. Here is the corrected version:\n\n```Python\nimport random\na = random.randint(1,12)\nb = random.randint(1,12)\nfor i in range(10):\n question = 'What is {} x {}? '.format(a, b)\n answer = int(input(question))\n if answer == a*b:\n print ('Well done!')\n else:\n print('No.')\n```\n\nProblem explanations:\n\n1) In Python, module names are case-sensitive. So, \"import Random\" should be \"import random\".\n2) You need to convert user input (which will be a string) to int before comparing with other integers. Hence, `answer = int(input(question))`.\n3) In Python, \"=\" is an assignment operator, not a comparison operator. You should use \"==\" for comparison. So, `if answer == a*b:`\n4) The print statement needs to be a string, so 'Well done!' needs quotes around it.\n5) ‘a’ and ‘b’ are variable and need to change for each iteration in for loop. But in the given code, they are not changing. So, they should be inside the loop. \n6) Python does not support '+' operator for concatenating string with integer. Hence, we need to use `format()` function to format the string." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 106, - "completion_tokens": 282, - "total_tokens": 388 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUeCThVHPQPnTUTF7mrDrKp5ZmJ9", + "object": "chat.completion", + "created": 1694651844, + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "There are several problems with the given piece of Python code. Here is the corrected version:\n\n```Python\nimport random\na = random.randint(1,12)\nb = random.randint(1,12)\nfor i in range(10):\n question = 'What is {} x {}? '.format(a, b)\n answer = int(input(question))\n if answer == a*b:\n print ('Well done!')\n else:\n print('No.')\n```\n\nProblem explanations:\n\n1) In Python, module names are case-sensitive. So, \"import Random\" should be \"import random\".\n2) You need to convert user input (which will be a string) to int before comparing with other integers. Hence, `answer = int(input(question))`.\n3) In Python, \"=\" is an assignment operator, not a comparison operator. You should use \"==\" for comparison. So, `if answer == a*b:`\n4) The print statement needs to be a string, so 'Well done!' needs quotes around it.\n5) ‘a’ and ‘b’ are variable and need to change for each iteration in for loop. But in the given code, they are not changing. So, they should be inside the loop. \n6) Python does not support '+' operator for concatenating string with integer. Hence, we need to use `format()` function to format the string." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 106, + "completion_tokens": 282, + "total_tokens": 388 } + } } -``` +```` ## Official Documentation diff --git a/doc/examples/example-10-spreadsheet-gen.md b/doc/examples/example-10-spreadsheet-gen.md index d4344e73..6a82299b 100644 --- a/doc/examples/example-10-spreadsheet-gen.md +++ b/doc/examples/example-10-spreadsheet-gen.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTx5LQgAulaMFHvI7bffH69wsRPe", - "object": "chat.completion", - "created": 1694649171, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Movie,Year of Release\n2001: A Space Odyssey,1968\nBlade Runner,1982\nThe Matrix,1999\nStar Wars: Episode IV - A New Hope,1977\nE.T. the Extra-Terrestrial,1982\nThe Terminator,1984\nInception,2010\nBack to the Future,1985\nThe Fifth Element,1997\nWar of the Worlds,2005\nInterstellar,2014\nThe Martian,2015\nAvatar,2009\nAlien,1979\nJurassic Park,1993" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 28, - "completion_tokens": 118, - "total_tokens": 146 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTx5LQgAulaMFHvI7bffH69wsRPe", + "object": "chat.completion", + "created": 1694649171, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Movie,Year of Release\n2001: A Space Odyssey,1968\nBlade Runner,1982\nThe Matrix,1999\nStar Wars: Episode IV - A New Hope,1977\nE.T. the Extra-Terrestrial,1982\nThe Terminator,1984\nInception,2010\nBack to the Future,1985\nThe Fifth Element,1997\nWar of the Worlds,2005\nInterstellar,2014\nThe Martian,2015\nAvatar,2009\nAlien,1979\nJurassic Park,1993" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 28, + "completion_tokens": 118, + "total_tokens": 146 } + } } ``` diff --git a/doc/examples/example-11-tweet-classifier.md b/doc/examples/example-11-tweet-classifier.md index 4bb07b90..e00f42c1 100644 --- a/doc/examples/example-11-tweet-classifier.md +++ b/doc/examples/example-11-tweet-classifier.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTxXj4nCx4xpjL9fWACgn5mwgEHw", - "object": "chat.completion", - "created": 1694649199, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "positive" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 42, - "completion_tokens": 1, - "total_tokens": 43 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTxXj4nCx4xpjL9fWACgn5mwgEHw", + "object": "chat.completion", + "created": 1694649199, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "positive" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 42, + "completion_tokens": 1, + "total_tokens": 43 } + } } ``` diff --git a/doc/examples/example-12-airport-codes.md b/doc/examples/example-12-airport-codes.md index 1c4abb50..67473448 100644 --- a/doc/examples/example-12-airport-codes.md +++ b/doc/examples/example-12-airport-codes.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yTxxjFtIQ28fRyUgcDejHq4KytP8", - "object": "chat.completion", - "created": 1694649225, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "The airport code for Orlando is MCO, and the airport code for Boston is BOS." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 39, - "completion_tokens": 19, - "total_tokens": 58 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yTxxjFtIQ28fRyUgcDejHq4KytP8", + "object": "chat.completion", + "created": 1694649225, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The airport code for Orlando is MCO, and the airport code for Boston is BOS." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 39, + "completion_tokens": 19, + "total_tokens": 58 } + } } ``` diff --git a/doc/examples/example-13-mood-color.md b/doc/examples/example-13-mood-color.md index d66a0e4e..510769a8 100644 --- a/doc/examples/example-13-mood-color.md +++ b/doc/examples/example-13-mood-color.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUfR6uI3iC12pDzpJQUWCDujVDMs", - "object": "chat.completion", - "created": 1694651921, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "{\n \"css_code\": \"background-color: #24556a;\"\n}" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 57, - "completion_tokens": 16, - "total_tokens": 73 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUfR6uI3iC12pDzpJQUWCDujVDMs", + "object": "chat.completion", + "created": 1694651921, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\n \"css_code\": \"background-color: #24556a;\"\n}" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 57, + "completion_tokens": 16, + "total_tokens": 73 } + } } ``` diff --git a/doc/examples/example-14-vr-fitness.md b/doc/examples/example-14-vr-fitness.md index 26242725..dfcf64e0 100644 --- a/doc/examples/example-14-vr-fitness.md +++ b/doc/examples/example-14-vr-fitness.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yU1gfYWXwpPfIqqAMqJlf4WvfsWQ", - "object": "chat.completion", - "created": 1694649456, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "1. Virtual fitness classes: Users can join live or pre-recorded fitness classes in a virtual reality setting, providing a more immersive and engaging workout experience. They can interact with trainers and other participants, follow along with the exercises, and receive real-time feedback or stats on their performance. \n\n2. Virtual gaming workouts: Create VR games specifically designed to provide a fun and challenging workout. Users could play games that require physical movements like boxing, dancing, or even virtual obstacle courses, effectively blending entertainment and exercise. \n\n3. Virtual personal trainers: Users can have virtual personal trainers guide them through workouts in a virtual reality space, giving them real-time instructions, motivation, and personalized feedback based on their goals and capabilities. The virtual trainers can also track progress, set goals, and provide exercise variations for a comprehensive fitness experience. \n\n4. Virtual reality fitness simulations: Users can experience and explore various fitness activities or outdoor locations virtually. For example, they could go hiking in the mountains, run on a virtual beach, or cycle through a scenic virtual landscape, all while getting the physical benefits of the exercise. \n\n5. VR home gym: Create a virtual reality environment that mimics a fully-equipped gym in a user's home. Users can interact with virtual exercise equipment, access workout programs, and receive guidance or workout recommendations from virtual trainers. This setup can provide a space-saving and cost-effective solution for those with limited resources or space. \n\n6. Interactive fitness challenges: Organize virtual reality fitness competitions or challenges, allowing users to compete against each other in various physical activities and workouts. This could involve virtual races, obstacle courses, or challenges like completing a high-intensity workout routine within a set time. Social integration and leaderboards can make these challenges more engaging and competitive. \n\n7. Physical therapy and rehabilitation: Use virtual reality to create immersive environments that aid in physical therapy and rehabilitation. Users can engage in exercises and movements under the guidance of virtual therapists, making the recovery process more engaging and enjoyable. \n\n8. Meditative and mindful VR experiences: Design virtual reality experiences that combine fitness with mindfulness and meditation. Users can practice yoga or other relaxation techniques in tranquil virtual environments, providing them with both physical exercise and mental relaxation benefits. \n\n9. Virtual personal fitness assistants: Develop virtual reality avatars or chatbots that act as personal fitness assistants, offering guidance, motivation, and support during workouts. They can provide workout suggestions, track progress, offer exercise modifications, and even evaluate form to ensure users are performing exercises correctly. \n\n10. VR fitness social platforms: Create virtual reality platforms where users can meet and interact with others who share the same fitness goals. They can join group workouts, participate in challenges, or share and exchange fitness tips and experiences, making the fitness journey more social and community-oriented." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 20, - "completion_tokens": 560, - "total_tokens": 580 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yU1gfYWXwpPfIqqAMqJlf4WvfsWQ", + "object": "chat.completion", + "created": 1694649456, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1. Virtual fitness classes: Users can join live or pre-recorded fitness classes in a virtual reality setting, providing a more immersive and engaging workout experience. They can interact with trainers and other participants, follow along with the exercises, and receive real-time feedback or stats on their performance. \n\n2. Virtual gaming workouts: Create VR games specifically designed to provide a fun and challenging workout. Users could play games that require physical movements like boxing, dancing, or even virtual obstacle courses, effectively blending entertainment and exercise. \n\n3. Virtual personal trainers: Users can have virtual personal trainers guide them through workouts in a virtual reality space, giving them real-time instructions, motivation, and personalized feedback based on their goals and capabilities. The virtual trainers can also track progress, set goals, and provide exercise variations for a comprehensive fitness experience. \n\n4. Virtual reality fitness simulations: Users can experience and explore various fitness activities or outdoor locations virtually. For example, they could go hiking in the mountains, run on a virtual beach, or cycle through a scenic virtual landscape, all while getting the physical benefits of the exercise. \n\n5. VR home gym: Create a virtual reality environment that mimics a fully-equipped gym in a user's home. Users can interact with virtual exercise equipment, access workout programs, and receive guidance or workout recommendations from virtual trainers. This setup can provide a space-saving and cost-effective solution for those with limited resources or space. \n\n6. Interactive fitness challenges: Organize virtual reality fitness competitions or challenges, allowing users to compete against each other in various physical activities and workouts. This could involve virtual races, obstacle courses, or challenges like completing a high-intensity workout routine within a set time. Social integration and leaderboards can make these challenges more engaging and competitive. \n\n7. Physical therapy and rehabilitation: Use virtual reality to create immersive environments that aid in physical therapy and rehabilitation. Users can engage in exercises and movements under the guidance of virtual therapists, making the recovery process more engaging and enjoyable. \n\n8. Meditative and mindful VR experiences: Design virtual reality experiences that combine fitness with mindfulness and meditation. Users can practice yoga or other relaxation techniques in tranquil virtual environments, providing them with both physical exercise and mental relaxation benefits. \n\n9. Virtual personal fitness assistants: Develop virtual reality avatars or chatbots that act as personal fitness assistants, offering guidance, motivation, and support during workouts. They can provide workout suggestions, track progress, offer exercise modifications, and even evaluate form to ensure users are performing exercises correctly. \n\n10. VR fitness social platforms: Create virtual reality platforms where users can meet and interact with others who share the same fitness goals. They can join group workouts, participate in challenges, or share and exchange fitness tips and experiences, making the fitness journey more social and community-oriented." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 20, + "completion_tokens": 560, + "total_tokens": 580 } + } } ``` diff --git a/doc/examples/example-15-marv-sarcastic-chat.md b/doc/examples/example-15-marv-sarcastic-chat.md index 4597c95e..90b474dd 100644 --- a/doc/examples/example-15-marv-sarcastic-chat.md +++ b/doc/examples/example-15-marv-sarcastic-chat.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yU2U5PzOyaOuOXMuz0a0rKYBn5C9", - "object": "chat.completion", - "created": 1694649506, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Oh, let me grab my calculator and do some rocket science for you. Just kidding! It's 2.20462 pounds in a kilogram. Now go lift some weights or something." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 37, - "completion_tokens": 39, - "total_tokens": 76 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yU2U5PzOyaOuOXMuz0a0rKYBn5C9", + "object": "chat.completion", + "created": 1694649506, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Oh, let me grab my calculator and do some rocket science for you. Just kidding! It's 2.20462 pounds in a kilogram. Now go lift some weights or something." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 37, + "completion_tokens": 39, + "total_tokens": 76 } + } } ``` diff --git a/doc/examples/example-16-turn-by-turn-directions.md b/doc/examples/example-16-turn-by-turn-directions.md index 8ae82eed..3e19c982 100644 --- a/doc/examples/example-16-turn-by-turn-directions.md +++ b/doc/examples/example-16-turn-by-turn-directions.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yU31Yp12omgjVmZfM4jcyxMbidDp", - "object": "chat.completion", - "created": 1694649539, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "1. Start by going south on 95.\n2. Continue on 95 until you reach Sunrise Boulevard.\n3. Take Sunrise Boulevard and go east.\n4. Follow Sunrise Boulevard until you reach US 1.\n5. Turn left onto US 1 and head south.\n6. Keep driving on US 1 for several miles.\n7. Look for Tom Jenkins BBQ on your left-hand side." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 72, - "completion_tokens": 80, - "total_tokens": 152 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yU31Yp12omgjVmZfM4jcyxMbidDp", + "object": "chat.completion", + "created": 1694649539, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1. Start by going south on 95.\n2. Continue on 95 until you reach Sunrise Boulevard.\n3. Take Sunrise Boulevard and go east.\n4. Follow Sunrise Boulevard until you reach US 1.\n5. Turn left onto US 1 and head south.\n6. Keep driving on US 1 for several miles.\n7. Look for Tom Jenkins BBQ on your left-hand side." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 72, + "completion_tokens": 80, + "total_tokens": 152 } + } } ``` diff --git a/doc/examples/example-17-interview-questions.md b/doc/examples/example-17-interview-questions.md index 8a8eb7c8..a4b86fc6 100644 --- a/doc/examples/example-17-interview-questions.md +++ b/doc/examples/example-17-interview-questions.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yU3L73zU6uRTO0JyDZvl3tNpuE5u", - "object": "chat.completion", - "created": 1694649559, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "1. When did your interest in science fiction begin and what inspired you to start writing in this genre?\n2. What do you think makes science fiction such a popular and enduring genre?\n3. Can you tell us about your writing process? How do you develop ideas and create unique and believable worlds for your stories?\n4. Many science fiction authors explore themes of technology and its impact on society. What are some of the ethical or moral dilemmas you like to explore through your writing?\n5. In your opinion, how does science fiction reflect or comment on our current societal issues?\n6. Science fiction often involves envisioning possible futures. How do you balance scientific plausibility with creating an engaging story for readers?\n7. Can you share some of the challenges you have faced as a science fiction author and how you have overcome them in your writing journey?\n8. Science fiction has often been regarded as a genre that reflects our hopes and fears for the future. Can you discuss how your work has addressed some of these hopes and fears?" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 27, - "completion_tokens": 207, - "total_tokens": 234 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yU3L73zU6uRTO0JyDZvl3tNpuE5u", + "object": "chat.completion", + "created": 1694649559, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1. When did your interest in science fiction begin and what inspired you to start writing in this genre?\n2. What do you think makes science fiction such a popular and enduring genre?\n3. Can you tell us about your writing process? How do you develop ideas and create unique and believable worlds for your stories?\n4. Many science fiction authors explore themes of technology and its impact on society. What are some of the ethical or moral dilemmas you like to explore through your writing?\n5. In your opinion, how does science fiction reflect or comment on our current societal issues?\n6. Science fiction often involves envisioning possible futures. How do you balance scientific plausibility with creating an engaging story for readers?\n7. Can you share some of the challenges you have faced as a science fiction author and how you have overcome them in your writing journey?\n8. Science fiction has often been regarded as a genre that reflects our hopes and fears for the future. Can you discuss how your work has addressed some of these hopes and fears?" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 27, + "completion_tokens": 207, + "total_tokens": 234 } + } } ``` diff --git a/doc/examples/example-18-function-from-spec.md b/doc/examples/example-18-function-from-spec.md index b8d4a639..9f5bd6ee 100644 --- a/doc/examples/example-18-function-from-spec.md +++ b/doc/examples/example-18-function-from-spec.md @@ -16,36 +16,36 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ## Example results -```json +````json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yU3zxScvzbhF2jINgJCbSVKAByut", - "object": "chat.completion", - "created": 1694649599, - "model": "gpt-4-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "To implement this, we are going to use the OpenCV and Numpy libraries. Here is a possible implementation of the function you asked for:\n\n```python\nimport cv2\nimport numpy as np\n\ndef crop_dark_areas(image_path, threshold):\n # Load the image into a numpy array\n img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n\n # Compute the mean value for each row and column\n avg_rows = np.average(img, axis=1)\n avg_cols = np.average(img, axis=0)\n\n # Find the indices of rows and columns to keep\n rows_to_keep = np.where(avg_rows > threshold)[0]\n cols_to_keep = np.where(avg_cols > threshold)[0]\n\n # Crop the rows and columns\n img_cropped = img[np.min(rows_to_keep):np.max(rows_to_keep)+1, np.min(cols_to_keep):np.max(cols_to_keep)+1]\n\n return img_cropped\n```\n\nThis function works by first loading the image into a Numpy array with grayscale values. Then, it computes the mean value of every row and every column in the image, and decides whether to keep a row or a column based on whether its mean value is greater than the given threshold.\n\nPlease make sure to have OpenCV and numpy installed in your python environment. If not installed, please use the following commands to install:\n- OpenCV: `pip install opencv-python`\n- Numpy: `pip install numpy`" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 72, - "completion_tokens": 306, - "total_tokens": 378 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yU3zxScvzbhF2jINgJCbSVKAByut", + "object": "chat.completion", + "created": 1694649599, + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "To implement this, we are going to use the OpenCV and Numpy libraries. Here is a possible implementation of the function you asked for:\n\n```python\nimport cv2\nimport numpy as np\n\ndef crop_dark_areas(image_path, threshold):\n # Load the image into a numpy array\n img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n\n # Compute the mean value for each row and column\n avg_rows = np.average(img, axis=1)\n avg_cols = np.average(img, axis=0)\n\n # Find the indices of rows and columns to keep\n rows_to_keep = np.where(avg_rows > threshold)[0]\n cols_to_keep = np.where(avg_cols > threshold)[0]\n\n # Crop the rows and columns\n img_cropped = img[np.min(rows_to_keep):np.max(rows_to_keep)+1, np.min(cols_to_keep):np.max(cols_to_keep)+1]\n\n return img_cropped\n```\n\nThis function works by first loading the image into a Numpy array with grayscale values. Then, it computes the mean value of every row and every column in the image, and decides whether to keep a row or a column based on whether its mean value is greater than the given threshold.\n\nPlease make sure to have OpenCV and numpy installed in your python environment. If not installed, please use the following commands to install:\n- OpenCV: `pip install opencv-python`\n- Numpy: `pip install numpy`" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 72, + "completion_tokens": 306, + "total_tokens": 378 } + } } -``` +```` ## Official Documentation diff --git a/doc/examples/example-19-code-improvement.md b/doc/examples/example-19-code-improvement.md index fd889a99..ea63d2c6 100644 --- a/doc/examples/example-19-code-improvement.md +++ b/doc/examples/example-19-code-improvement.md @@ -18,36 +18,36 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ## Example results -```json +````json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUk2FIKLOQebVHB4tUhQ5ggiee6b", - "object": "chat.completion", - "created": 1694652206, - "model": "gpt-4-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "1. Leverage `set` for O(1) lookups: Rather than performing a linear scan on the list for each element, which leads to O(n^2) complexity, we can use a set to store the numbers as we process them. Every time when we look at a new number, we can check if there's a number previously stored in the set such that the two numbers sum up to `k`. This approach has O(n) time complexity.\n\n2. Avoid unnecessary typecasting: In this case, Python type hints were used correctly and there's no unnecessary typecasting, but it's worth mentioning as a general efficiency improvement tip. Unnecessary typecasting can be a costly operation, it's better to avoid it where possible.\n\nHere's the proposed improvement:\n\n```python\nfrom typing import List\n\ndef has_sum_k(nums: List[int], k: int) -> bool:\n seen = set()\n for num in nums:\n if k - num in seen:\n return True\n seen.add(num)\n return False\n```\nThis version of the code accomplishes the same goal with better time complexity: it reduces from O(n^2) to O(n). The space complexity is O(n), as in worst case scenario the set will store all unique elements." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 97, - "completion_tokens": 262, - "total_tokens": 359 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUk2FIKLOQebVHB4tUhQ5ggiee6b", + "object": "chat.completion", + "created": 1694652206, + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1. Leverage `set` for O(1) lookups: Rather than performing a linear scan on the list for each element, which leads to O(n^2) complexity, we can use a set to store the numbers as we process them. Every time when we look at a new number, we can check if there's a number previously stored in the set such that the two numbers sum up to `k`. This approach has O(n) time complexity.\n\n2. Avoid unnecessary typecasting: In this case, Python type hints were used correctly and there's no unnecessary typecasting, but it's worth mentioning as a general efficiency improvement tip. Unnecessary typecasting can be a costly operation, it's better to avoid it where possible.\n\nHere's the proposed improvement:\n\n```python\nfrom typing import List\n\ndef has_sum_k(nums: List[int], k: int) -> bool:\n seen = set()\n for num in nums:\n if k - num in seen:\n return True\n seen.add(num)\n return False\n```\nThis version of the code accomplishes the same goal with better time complexity: it reduces from O(n^2) to O(n). The space complexity is O(n), as in worst case scenario the set will store all unique elements." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 97, + "completion_tokens": 262, + "total_tokens": 359 } + } } -``` +```` ## Official Documentation diff --git a/doc/examples/example-20-single-page-website.md b/doc/examples/example-20-single-page-website.md index b9e653ab..dfc35c13 100644 --- a/doc/examples/example-20-single-page-website.md +++ b/doc/examples/example-20-single-page-website.md @@ -16,38 +16,38 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ## Example results -```json +````json { - "retval": { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUl2CBzp93yjsudcr3OffoTI3Th4", - "object": "chat.completion", - "created": 1694652268, - "model": "gpt-4-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Here's a simple HTML file that includes a dropdown menu with JavaScript event handlers and a toggleable information box.\n\n```html\n\n\n\n \n\n\n\n

JavaScript Drop-down Menu

\n\n \n \n\n
\n\n \n\n\n\n```\nThis page contains a dropdown menu, which has an `onchange` event handler attached. Whenever the selected option is changed, it calls the `displayInfo` function, which changes the inner HTML of the `#infoBox` div to display the selected fact.\n\nThe CSS in the `` part of this HTML document styles the information box, hiding it when it has no content, and providing it with a border, some padding, and a top margin when it displays some information." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 46, - "completion_tokens": 413, - "total_tokens": 459 - } + "retval": { + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUl2CBzp93yjsudcr3OffoTI3Th4", + "object": "chat.completion", + "created": 1694652268, + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Here's a simple HTML file that includes a dropdown menu with JavaScript event handlers and a toggleable information box.\n\n```html\n\n\n\n \n\n\n\n

JavaScript Drop-down Menu

\n\n \n \n\n
\n\n \n\n\n\n```\nThis page contains a dropdown menu, which has an `onchange` event handler attached. Whenever the selected option is changed, it calls the `displayInfo` function, which changes the inner HTML of the `#infoBox` div to display the selected fact.\n\nThe CSS in the `` part of this HTML document styles the information box, hiding it when it has no content, and providing it with a border, some padding, and a top margin when it displays some information." + }, + "finish_reason": "stop" } + ], + "usage": { + "prompt_tokens": 46, + "completion_tokens": 413, + "total_tokens": 459 + } } + } } -``` +```` ## Official Documentation diff --git a/doc/examples/example-21-rap-battle.md b/doc/examples/example-21-rap-battle.md index 5f16af38..294ec9d8 100644 --- a/doc/examples/example-21-rap-battle.md +++ b/doc/examples/example-21-rap-battle.md @@ -18,36 +18,35 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "retval": { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUlc4YOZIA8z2QWUimt3UAs48ELf", - "object": "chat.completion", - "created": 1694652304, - "model": "gpt-4-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Verse 1 - Alan Turing:\n\nTuring's the name, I'm the game changer,\nAI's genesis, its potential engager.\nBroke the Enigma, won the world-wide war,\nDid in years what would've taken four-score.\n\nClaude Shannon, yeah, I heard of you,\nYour information theory is cool and new,\nBut without my machine, where would you be?\nStill fumbling with uncertainty?\n\nVerse 2 - Claude Shannon:\n\nClaude Shannon here, master of communication,\nLaid the foundation for the tech innovation.\nFrom entropy to redundancy, I made it clear,\nWithout me, you wouldn’t be here.\n\nTuring, you're a scholar, and your creation's legit,\nBut you needed my circuits to make it all fit.\nWithout my binary, your machine would be moot,\nI'm the rhythm to your computability flute.\n\nVerse 3 - Alan Turing:\n\nI’m the father of computers, gave silicon life,\nYour theories are nice, but I cut through like a knife.\nI built from scratch, made thought transmissible,\nMy Turing Test made the impossible feasible.\n\nYes, your binary helped, won't deny it's true,\nBut remember, the machine's architecture is due.\nTo me, your contribution's just a part of,\nThe grand machine I brought forth as an art.\n\nVerse 4 - Claude Shannon:\n\nYou created AI, and I’ll give you your due,\nBut my digital circuits gave its life it's true hue.\nYes, your creation’s great, made the world look bright,\nBut my information theory gave it flight.\n\nSure, your tests were nice, and they made a splash,\nBut my foundations turned that splash into cash.\nSo here’s to us, Turing, it’s no contest,\nWhen it comes to pioneers, we’re simply the best." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 22, - "completion_tokens": 366, - "total_tokens": 388 - } + "retval": { + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUlc4YOZIA8z2QWUimt3UAs48ELf", + "object": "chat.completion", + "created": 1694652304, + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Verse 1 - Alan Turing:\n\nTuring's the name, I'm the game changer,\nAI's genesis, its potential engager.\nBroke the Enigma, won the world-wide war,\nDid in years what would've taken four-score.\n\nClaude Shannon, yeah, I heard of you,\nYour information theory is cool and new,\nBut without my machine, where would you be?\nStill fumbling with uncertainty?\n\nVerse 2 - Claude Shannon:\n\nClaude Shannon here, master of communication,\nLaid the foundation for the tech innovation.\nFrom entropy to redundancy, I made it clear,\nWithout me, you wouldn’t be here.\n\nTuring, you're a scholar, and your creation's legit,\nBut you needed my circuits to make it all fit.\nWithout my binary, your machine would be moot,\nI'm the rhythm to your computability flute.\n\nVerse 3 - Alan Turing:\n\nI’m the father of computers, gave silicon life,\nYour theories are nice, but I cut through like a knife.\nI built from scratch, made thought transmissible,\nMy Turing Test made the impossible feasible.\n\nYes, your binary helped, won't deny it's true,\nBut remember, the machine's architecture is due.\nTo me, your contribution's just a part of,\nThe grand machine I brought forth as an art.\n\nVerse 4 - Claude Shannon:\n\nYou created AI, and I’ll give you your due,\nBut my digital circuits gave its life it's true hue.\nYes, your creation’s great, made the world look bright,\nBut my information theory gave it flight.\n\nSure, your tests were nice, and they made a splash,\nBut my foundations turned that splash into cash.\nSo here’s to us, Turing, it’s no contest,\nWhen it comes to pioneers, we’re simply the best." + }, + "finish_reason": "stop" } + ], + "usage": { + "prompt_tokens": 22, + "completion_tokens": 366, + "total_tokens": 388 + } } + } } - ``` ## Official Documentation diff --git a/doc/examples/example-24-translation.md b/doc/examples/example-24-translation.md index 360de14c..f9aa8125 100644 --- a/doc/examples/example-24-translation.md +++ b/doc/examples/example-24-translation.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUAN58A8B4AB4ssDK4UcnlYN19Tz", - "object": "chat.completion", - "created": 1694649995, - "model": "gpt-4-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Mon nom est Jane. Quel est le tien?" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 40, - "completion_tokens": 12, - "total_tokens": 52 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUAN58A8B4AB4ssDK4UcnlYN19Tz", + "object": "chat.completion", + "created": 1694649995, + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Mon nom est Jane. Quel est le tien?" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 40, + "completion_tokens": 12, + "total_tokens": 52 } + } } ``` diff --git a/doc/examples/example-29-pro-con-discusser.md b/doc/examples/example-29-pro-con-discusser.md index a275c426..4a5ab430 100644 --- a/doc/examples/example-29-pro-con-discusser.md +++ b/doc/examples/example-29-pro-con-discusser.md @@ -18,32 +18,32 @@ curl --location --request PUT 'https://api.openai.lawrencemcdaniel.com/examples/ ```json { - "isBase64Encoded": false, - "statusCode": 200, - "headers": { - "Content-Type": "application/json" - }, - "body": { - "id": "chatcmpl-7yUJSDimzpH4zH9CxJAqjMhnlYU6T", - "object": "chat.completion", - "created": 1694650558, - "model": "gpt-4-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Remote Work: \n\nPros:\n1. Flexible hours: Remote workers often have the flexibility to set their own schedules, allowing them to balance work and personal life more effectively.\n2. No commute: This saves time and money on transportation and can increase work-life balance.\n3. Custom work environment: Employees can set up their work environment to fit personal preferences.\n4. Global job opportunities: Allows employees to work for any company regardless of the location.\n\nCons:\n1. Isolation: There can be a lack of human interaction, potentially leading to feelings of isolation or loneliness.\n2. Difficulty separating work from home: Some remote workers struggle to distinguish between their work and personal life, potentially leading to burnout.\n3. Less team building: There are fewer opportunities for team-building activities and face-to-face interactions.\n4. Varying productivity: Some people can find it difficult to stay focused without the structure of a traditional office environment.\n\nOffice Work:\n\nPros:\n1. Clear work-life balance: Having a separate place for work can make it easier to disconnect at the end of the day.\n2. Opportunities for collaboration: Physical proximity to coworkers can often make collaboration, brainstorming, and problem-solving easier.\n3. Direct management: Immediate access to supervision or support from managers or colleagues.\n4. Structured environment: A set of standard work hours and traditional professional setup that keeps the focus on the tasks.\n\nCons:\n1. Commuting: Travelling to and from work can be a significant time commitment and can add to daily stress.\n2. Rigidity: The structure of a traditional 9-5 office job can make it harder for employees to balance personal responsibilities.\n3. Limited geographic scope: Office jobs typically require living close to the office location.\n4. Distractions: From impromptu meetings to a noisy coworker, office jobs come with their own set of potential distractions." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 24, - "completion_tokens": 378, - "total_tokens": 402 - } + "isBase64Encoded": false, + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "id": "chatcmpl-7yUJSDimzpH4zH9CxJAqjMhnlYU6T", + "object": "chat.completion", + "created": 1694650558, + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Remote Work: \n\nPros:\n1. Flexible hours: Remote workers often have the flexibility to set their own schedules, allowing them to balance work and personal life more effectively.\n2. No commute: This saves time and money on transportation and can increase work-life balance.\n3. Custom work environment: Employees can set up their work environment to fit personal preferences.\n4. Global job opportunities: Allows employees to work for any company regardless of the location.\n\nCons:\n1. Isolation: There can be a lack of human interaction, potentially leading to feelings of isolation or loneliness.\n2. Difficulty separating work from home: Some remote workers struggle to distinguish between their work and personal life, potentially leading to burnout.\n3. Less team building: There are fewer opportunities for team-building activities and face-to-face interactions.\n4. Varying productivity: Some people can find it difficult to stay focused without the structure of a traditional office environment.\n\nOffice Work:\n\nPros:\n1. Clear work-life balance: Having a separate place for work can make it easier to disconnect at the end of the day.\n2. Opportunities for collaboration: Physical proximity to coworkers can often make collaboration, brainstorming, and problem-solving easier.\n3. Direct management: Immediate access to supervision or support from managers or colleagues.\n4. Structured environment: A set of standard work hours and traditional professional setup that keeps the focus on the tasks.\n\nCons:\n1. Commuting: Travelling to and from work can be a significant time commitment and can add to daily stress.\n2. Rigidity: The structure of a traditional 9-5 office job can make it harder for employees to balance personal responsibilities.\n3. Limited geographic scope: Office jobs typically require living close to the office location.\n4. Distractions: From impromptu meetings to a noisy coworker, office jobs come with their own set of potential distractions." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 24, + "completion_tokens": 378, + "total_tokens": 402 } + } } ``` diff --git a/doc/lambda_openai_text.md b/doc/lambda_openai_text.md index 9c2b726b..5cd3e4c2 100644 --- a/doc/lambda_openai_text.md +++ b/doc/lambda_openai_text.md @@ -2,13 +2,13 @@ ## Environment Variables -| Variable | Example value | -| --------------------------------------- | ------------------------------- | -| DEBUG_MODE | true | -| OPENAI_API_KEY | sk-7DoB4YOUR-OPENAI-API-KEY | -| OPENAI_API_ORGANIZATION | org-YJz82abcdefthijklmnophcy | -| OPENAI_ENDPOINT_IMAGE_N | 4 | -| OPENAI_ENDPOINT_IMAGE_SIZE | 1024x768 | +| Variable | Example value | +| -------------------------- | ---------------------------- | +| DEBUG_MODE | true | +| OPENAI_API_KEY | sk-7DoB4YOUR-OPENAI-API-KEY | +| OPENAI_API_ORGANIZATION | org-YJz82abcdefthijklmnophcy | +| OPENAI_ENDPOINT_IMAGE_N | 4 | +| OPENAI_ENDPOINT_IMAGE_SIZE | 1024x768 | ## Logging @@ -18,22 +18,22 @@ Generated when DEBUG_MODEL=true ```json { - "environment": { - "os": "posix", - "system": "Linux", - "release": "5.10.184-194.730.amzn2.x86_64", - "openai": "0.28.0", - "openai_app_info": null, - "openai_end_points": [ - "Embedding", - "ChatCompletion", - "Moderation", - "Image", - "Audio", - "Model" - ], - "DEBUG_MODE": true - } + "environment": { + "os": "posix", + "system": "Linux", + "release": "5.10.184-194.730.amzn2.x86_64", + "openai": "0.28.0", + "openai_app_info": null, + "openai_end_points": [ + "Embedding", + "ChatCompletion", + "Moderation", + "Image", + "Audio", + "Model" + ], + "DEBUG_MODE": true + } } ``` @@ -43,20 +43,20 @@ Generated when DEBUG_MODEL=true ```json { - "event": { - "model": "gpt-3.5-turbo", - "end_point": "ChatCompletion", - "messages": [ - { - "role": "system", - "content": "You will be provided with statements, and your task is to convert them to standard English." - }, - { - "role": "user", - "content": "imma bust you upside the head" - } - ] - } + "event": { + "model": "gpt-3.5-turbo", + "end_point": "ChatCompletion", + "messages": [ + { + "role": "system", + "content": "You will be provided with statements, and your task is to convert them to standard English." + }, + { + "role": "user", + "content": "imma bust you upside the head" + } + ] + } } ``` @@ -64,7 +64,7 @@ Generated when DEBUG_MODEL=true Generated when DEBUG_MODEL=true -```json +````json { "retval": { "isBase64Encoded": false, @@ -95,3 +95,4 @@ Generated when DEBUG_MODEL=true } } }``` +```` diff --git a/doc/terraform-getting-started.md b/doc/terraform-getting-started.md index 2796e510..c0ec6510 100644 --- a/doc/terraform-getting-started.md +++ b/doc/terraform-getting-started.md @@ -22,8 +22,8 @@ For Linux & macOS operating systems. Ensure that your environment includes the latest stable releases of the following software packages: -* [aws cli](https://aws.amazon.com/cli/) -* [terraform](https://www.terraform.io/) +- [aws cli](https://aws.amazon.com/cli/) +- [terraform](https://www.terraform.io/) ### Install required software packages using Homebrew @@ -112,7 +112,7 @@ vim terraform/terraform.tf profile = "default" encrypt = false } -```` +``` ### Step 4. Configure your environment by setting Terraform global variable values @@ -128,13 +128,12 @@ aws_region = "us-east-1" aws_profile = "default" ``` - ### Step 3. Run the following command to initialize and build the solution The Terraform modules in this repo rely extensively on calls to other third party Terraform modules published and maintained by [AWS](https://registry.terraform.io/namespaces/terraform-aws-modules). These modules will be downloaded by Terraform so that these can be executed locally from your computer. Noteworth examples of such third party modules include: -* [terraform-aws-modules/s3](https://registry.terraform.io/modules/terraform-aws-modules/s3-bucket/aws/latest) -* [terraform-aws-modules/dynamodb](https://registry.terraform.io/modules/terraform-aws-modules/dynamodb-table/aws/latest) +- [terraform-aws-modules/s3](https://registry.terraform.io/modules/terraform-aws-modules/s3-bucket/aws/latest) +- [terraform-aws-modules/dynamodb](https://registry.terraform.io/modules/terraform-aws-modules/dynamodb-table/aws/latest) ```console cd terraform From a27fb736d0b32310042dc78eb14398ee8e1b18ea Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 18:12:11 -0600 Subject: [PATCH 05/10] style: reformat md with ESLint and prettier --- client/.eslintrc.cjs | 60 +-- client/index.html | 26 +- client/src/App.css | 6 +- client/src/App.jsx | 497 +++++++++++++----- client/src/applications/AeroAssist.js | 15 +- client/src/applications/CSVify.js | 12 +- client/src/applications/CodeExplainer.js | 12 +- client/src/applications/CodeImprovement.js | 12 +- client/src/applications/Emojibot.js | 12 +- client/src/applications/Emojibot4.js | 12 +- client/src/applications/English2French.js | 12 +- client/src/applications/FunctionCreator.js | 12 +- client/src/applications/GrammarGenius.js | 12 +- client/src/applications/InterviewQuestions.js | 13 +- client/src/applications/KeyWords.js | 12 +- client/src/applications/KidsDigest.js | 12 +- client/src/applications/LessonPlanWriter.js | 13 +- .../applications/MeetingNotesSummarizer.js | 13 +- client/src/applications/MemoWriter.js | 12 +- client/src/applications/Mood2CSSColor.js | 12 +- client/src/applications/ProConDiscusser.js | 18 +- .../src/applications/ProductNameGenerator.js | 18 +- client/src/applications/PythonDebugger.js | 12 +- client/src/applications/RapBattle.js | 20 +- client/src/applications/ReviewClassifier.js | 19 +- client/src/applications/SarcasticChat.js | 13 +- client/src/applications/SinglePageWebapp.js | 15 +- client/src/applications/SocraticTutor.js | 14 +- .../src/applications/SpreadsheetGenerator.js | 13 +- client/src/applications/SqlTranslator.js | 12 +- client/src/applications/TimeComplexity.js | 12 +- .../src/applications/TurnByTurnDirections.js | 13 +- client/src/applications/TweetClassifier.js | 12 +- client/src/applications/VRFitness.js | 12 +- client/src/components/Layout.js | 4 +- client/src/components/about/Component.css | 7 +- client/src/components/about/Component.jsx | 74 ++- client/src/components/chatApp/ApiRequest.js | 72 +-- client/src/components/chatApp/Component.css | 4 +- client/src/components/chatApp/Component.jsx | 135 +++-- client/src/components/chatApp/Modal.css | 1 - client/src/components/chatApp/Modal.jsx | 51 +- client/src/config.js | 70 +-- client/src/main.jsx | 12 +- client/vite.config.js | 10 +- 45 files changed, 952 insertions(+), 478 deletions(-) diff --git a/client/.eslintrc.cjs b/client/.eslintrc.cjs index 1e1167f2..5d94793c 100644 --- a/client/.eslintrc.cjs +++ b/client/.eslintrc.cjs @@ -1,35 +1,29 @@ module.exports = { - "env": { - "browser": true, - "es2021": true + env: { + browser: true, + es2021: true, + }, + extends: [ + "eslint:recommended", + "plugin:@typescript-eslint/recommended", + "plugin:react/recommended", + ], + overrides: [ + { + env: { + node: true, + }, + files: [".eslintrc.{js,cjs}"], + parserOptions: { + sourceType: "script", + }, }, - "extends": [ - "eslint:recommended", - "plugin:@typescript-eslint/recommended", - "plugin:react/recommended" - ], - "overrides": [ - { - "env": { - "node": true - }, - "files": [ - ".eslintrc.{js,cjs}" - ], - "parserOptions": { - "sourceType": "script" - } - } - ], - "parser": "@typescript-eslint/parser", - "parserOptions": { - "ecmaVersion": "latest", - "sourceType": "module" - }, - "plugins": [ - "@typescript-eslint", - "react" - ], - "rules": { - } -} + ], + parser: "@typescript-eslint/parser", + parserOptions: { + ecmaVersion: "latest", + sourceType: "module", + }, + plugins: ["@typescript-eslint", "react"], + rules: {}, +}; diff --git a/client/index.html b/client/index.html index 040b0132..00a8c2fa 100644 --- a/client/index.html +++ b/client/index.html @@ -2,16 +2,32 @@ - + OpenAI Examples - - - - + + + + diff --git a/client/src/App.css b/client/src/App.css index b2b50831..5bdfe57c 100644 --- a/client/src/App.css +++ b/client/src/App.css @@ -1,11 +1,11 @@ #root { max-width: 1280px; margin: 0 auto; - padding: 0;; + padding: 0; } .App { - border: 1px solid #1d5268 + border: 1px solid #1d5268; } .app-title { @@ -41,7 +41,7 @@ h5 { .footer a { text-align: center; color: #333; - font-family: 'Courier New', Courier, monospace; + font-family: "Courier New", Courier, monospace; } .footer a:hover { diff --git a/client/src/App.jsx b/client/src/App.jsx index 116b0109..cdec98b8 100644 --- a/client/src/App.jsx +++ b/client/src/App.jsx @@ -1,56 +1,93 @@ // React code -import React from 'react'; -import { useState } from 'react'; +import React from "react"; +import { useState } from "react"; // Third party components -import { Sidebar, Menu, MenuItem, SubMenu } from 'react-pro-sidebar'; -import { ContainerLayout, SidebarLayout, ContentLayout, Logo } from './components/Layout/'; -import { FaInfo, FaDatabase, FaCode, FaChartLine, FaClipboardList, FaGamepad } from "react-icons/fa"; +import { Sidebar, Menu, MenuItem, SubMenu } from "react-pro-sidebar"; +import { + ContainerLayout, + SidebarLayout, + ContentLayout, + Logo, +} from "./components/Layout/"; +import { + FaInfo, + FaDatabase, + FaCode, + FaChartLine, + FaClipboardList, + FaGamepad, +} from "react-icons/fa"; // Our code -import './App.css'; -import ChatApp from './components/chatApp/Component'; -import AboutPage from './components/about/Component'; -import { APPLICATIONS } from './config'; +import "./App.css"; +import ChatApp from "./components/chatApp/Component"; +import AboutPage from "./components/about/Component"; +import { APPLICATIONS } from "./config"; // chatApp definitions -import AeroAssist from './applications/AeroAssist'; -import CodeExplainer from './applications/CodeExplainer'; -import CodeImprovement from './applications/CodeImprovement'; -import CSVify from './applications/CSVify'; -import Emojibot from './applications/Emojibot'; -import Emojibot4 from './applications/Emojibot4'; -import English2French from './applications/English2French'; -import FunctionCreator from './applications/FunctionCreator'; -import GrammarGenius from './applications/GrammarGenius'; -import InterviewQuestions from './applications/InterviewQuestions'; -import KeyWords from './applications/KeyWords'; -import KidsDigest from './applications/KidsDigest'; -import LessonPlanWriter from './applications/LessonPlanWriter'; -import MeetingNotesSummarizer from './applications/MeetingNotesSummarizer'; -import Mood2CSSColor from './applications/Mood2CSSColor'; -import MemoWriter from './applications/MemoWriter'; -import ProductNameGenerator from './applications/ProductNameGenerator'; -import ProConDiscusser from './applications/ProConDiscusser'; -import PythonDebugger from './applications/PythonDebugger'; -import RapBattle from './applications/RapBattle'; -import ReviewClassifier from './applications/ReviewClassifier'; -import SarcasticChat from './applications/SarcasticChat'; -import SinglePageWebapp from './applications/SinglePageWebapp'; -import SocraticTutor from './applications/SocraticTutor'; -import SpreadsheetGenerator from './applications/SpreadsheetGenerator'; -import SqlTranslator from './applications/SqlTranslator'; -import TimeComplexity from './applications/TimeComplexity'; -import TweetClassifier from './applications/TweetClassifier'; -import TurnByTurnDirections from './applications/TurnByTurnDirections'; -import VRFitness from './applications/VRFitness'; +import AeroAssist from "./applications/AeroAssist"; +import CodeExplainer from "./applications/CodeExplainer"; +import CodeImprovement from "./applications/CodeImprovement"; +import CSVify from "./applications/CSVify"; +import Emojibot from "./applications/Emojibot"; +import Emojibot4 from "./applications/Emojibot4"; +import English2French from "./applications/English2French"; +import FunctionCreator from "./applications/FunctionCreator"; +import GrammarGenius from "./applications/GrammarGenius"; +import InterviewQuestions from "./applications/InterviewQuestions"; +import KeyWords from "./applications/KeyWords"; +import KidsDigest from "./applications/KidsDigest"; +import LessonPlanWriter from "./applications/LessonPlanWriter"; +import MeetingNotesSummarizer from "./applications/MeetingNotesSummarizer"; +import Mood2CSSColor from "./applications/Mood2CSSColor"; +import MemoWriter from "./applications/MemoWriter"; +import ProductNameGenerator from "./applications/ProductNameGenerator"; +import ProConDiscusser from "./applications/ProConDiscusser"; +import PythonDebugger from "./applications/PythonDebugger"; +import RapBattle from "./applications/RapBattle"; +import ReviewClassifier from "./applications/ReviewClassifier"; +import SarcasticChat from "./applications/SarcasticChat"; +import SinglePageWebapp from "./applications/SinglePageWebapp"; +import SocraticTutor from "./applications/SocraticTutor"; +import SpreadsheetGenerator from "./applications/SpreadsheetGenerator"; +import SqlTranslator from "./applications/SqlTranslator"; +import TimeComplexity from "./applications/TimeComplexity"; +import TweetClassifier from "./applications/TweetClassifier"; +import TurnByTurnDirections from "./applications/TurnByTurnDirections"; +import VRFitness from "./applications/VRFitness"; const currentYear = new Date().getFullYear(); const Footer = () => { return ( -
-

© {currentYear} lawrencemcdaniel.com | OpenAI Python API | React | | Terraform | Source code

+
+

+ © {currentYear}{" "} + lawrencemcdaniel.com |{" "} + + OpenAI Python API + {" "} + |{" "} + + React + {" "} + |{" "} + + + {" "} + |{" "} + + Terraform + {" "} + |{" "} + + Source code + +

); }; @@ -62,114 +99,320 @@ const App = () => { setSelectedItem(item); }; return ( -
-

OpenAI Code Samples

+
+

OpenAI Code Samples

- -
- - { - // only apply styles on first level elements of the tree - if (level === 0) - return { - color: disabled ? 'gray' : 'lightgray', - backgroundColor: active ? '#eecef9' : undefined, - }; - }, - }} + +
+ + { + // only apply styles on first level elements of the tree + if (level === 0) + return { + color: disabled ? "gray" : "lightgray", + backgroundColor: active ? "#eecef9" : undefined, + }; + }, + }} > - - OpenAI Logo + + OpenAI Logo -
Sample Applications
- }> - handleItemClick(APPLICATIONS.SarcasticChat)}>{SarcasticChat.sidebar_title} - handleItemClick(APPLICATIONS.Emojibot)}>{Emojibot.sidebar_title} - handleItemClick(APPLICATIONS.Emojibot4)}>{Emojibot4.sidebar_title} - handleItemClick(APPLICATIONS.RapBattle)}>{RapBattle.sidebar_title} - handleItemClick(APPLICATIONS.SocraticTutor)}>{SocraticTutor.sidebar_title} - handleItemClick(APPLICATIONS.ProConDiscusser)}>{ProConDiscusser.sidebar_title} - handleItemClick(APPLICATIONS.TweetClassifier)}>{TweetClassifier.sidebar_title} +
Sample Applications
+ }> + handleItemClick(APPLICATIONS.SarcasticChat)} + > + {SarcasticChat.sidebar_title} + + handleItemClick(APPLICATIONS.Emojibot)} + > + {Emojibot.sidebar_title} + + handleItemClick(APPLICATIONS.Emojibot4)} + > + {Emojibot4.sidebar_title} + + handleItemClick(APPLICATIONS.RapBattle)} + > + {RapBattle.sidebar_title} + + handleItemClick(APPLICATIONS.SocraticTutor)} + > + {SocraticTutor.sidebar_title} + + + handleItemClick(APPLICATIONS.ProConDiscusser) + } + > + {ProConDiscusser.sidebar_title} + + + handleItemClick(APPLICATIONS.TweetClassifier) + } + > + {TweetClassifier.sidebar_title} + - }> - handleItemClick(APPLICATIONS.VRFitness)}>{VRFitness.sidebar_title} - handleItemClick(APPLICATIONS.GrammarGenius)}>{GrammarGenius.sidebar_title} - handleItemClick(APPLICATIONS.English2French)}>{English2French.sidebar_title} - handleItemClick(APPLICATIONS.TurnByTurnDirections)}>{TurnByTurnDirections.sidebar_title} - handleItemClick(APPLICATIONS.KidsDigest)}>{KidsDigest.sidebar_title} + }> + handleItemClick(APPLICATIONS.VRFitness)} + > + {VRFitness.sidebar_title} + + handleItemClick(APPLICATIONS.GrammarGenius)} + > + {GrammarGenius.sidebar_title} + + handleItemClick(APPLICATIONS.English2French)} + > + {English2French.sidebar_title} + + + handleItemClick(APPLICATIONS.TurnByTurnDirections) + } + > + {TurnByTurnDirections.sidebar_title} + + handleItemClick(APPLICATIONS.KidsDigest)} + > + {KidsDigest.sidebar_title} + }> - handleItemClick(APPLICATIONS.AeroAssist)}>{AeroAssist.sidebar_title} - handleItemClick(APPLICATIONS.KeyWords)}>{KeyWords.sidebar_title} - handleItemClick(APPLICATIONS.InterviewQuestions)}>{InterviewQuestions.sidebar_title} - handleItemClick(APPLICATIONS.MemoWriter)}>{MemoWriter.sidebar_title} - handleItemClick(APPLICATIONS.LessonPlanWriter)}>{LessonPlanWriter.sidebar_title} - handleItemClick(APPLICATIONS.MeetingNotesSummarizer)}>{MeetingNotesSummarizer.sidebar_title} - handleItemClick(APPLICATIONS.ProductNameGenerator)}>{ProductNameGenerator.sidebar_title} - handleItemClick(APPLICATIONS.ReviewClassifier)}>{ReviewClassifier.sidebar_title} + handleItemClick(APPLICATIONS.AeroAssist)} + > + {AeroAssist.sidebar_title} + + handleItemClick(APPLICATIONS.KeyWords)} + > + {KeyWords.sidebar_title} + + + handleItemClick(APPLICATIONS.InterviewQuestions) + } + > + {InterviewQuestions.sidebar_title} + + handleItemClick(APPLICATIONS.MemoWriter)} + > + {MemoWriter.sidebar_title} + + + handleItemClick(APPLICATIONS.LessonPlanWriter) + } + > + {LessonPlanWriter.sidebar_title} + + + handleItemClick(APPLICATIONS.MeetingNotesSummarizer) + } + > + {MeetingNotesSummarizer.sidebar_title} + + + handleItemClick(APPLICATIONS.ProductNameGenerator) + } + > + {ProductNameGenerator.sidebar_title} + + + handleItemClick(APPLICATIONS.ReviewClassifier) + } + > + {ReviewClassifier.sidebar_title} + - }> - handleItemClick(APPLICATIONS.CSVify)}>{CSVify.sidebar_title} - handleItemClick(APPLICATIONS.SpreadsheetGenerator)}>{SpreadsheetGenerator.sidebar_title} + }> + handleItemClick(APPLICATIONS.CSVify)} + > + {CSVify.sidebar_title} + + + handleItemClick(APPLICATIONS.SpreadsheetGenerator) + } + > + {SpreadsheetGenerator.sidebar_title} + - }> - handleItemClick(APPLICATIONS.FunctionCreator)}>{FunctionCreator.sidebar_title} - handleItemClick(APPLICATIONS.TimeComplexity)}>{TimeComplexity.sidebar_title} - handleItemClick(APPLICATIONS.CodeExplainer)}>{CodeExplainer.sidebar_title} - handleItemClick(APPLICATIONS.PythonDebugger)}>{PythonDebugger.sidebar_title} - handleItemClick(APPLICATIONS.SqlTranslator)}>{SqlTranslator.sidebar_title} - handleItemClick(APPLICATIONS.CodeImprovement)}>{CodeImprovement.sidebar_title} - handleItemClick(APPLICATIONS.Mood2CSSColor)}>{Mood2CSSColor.sidebar_title} - handleItemClick(APPLICATIONS.SinglePageWebapp)}>{SinglePageWebapp.sidebar_title} + }> + + handleItemClick(APPLICATIONS.FunctionCreator) + } + > + {FunctionCreator.sidebar_title} + + handleItemClick(APPLICATIONS.TimeComplexity)} + > + {TimeComplexity.sidebar_title} + + handleItemClick(APPLICATIONS.CodeExplainer)} + > + {CodeExplainer.sidebar_title} + + handleItemClick(APPLICATIONS.PythonDebugger)} + > + {PythonDebugger.sidebar_title} + + handleItemClick(APPLICATIONS.SqlTranslator)} + > + {SqlTranslator.sidebar_title} + + + handleItemClick(APPLICATIONS.CodeImprovement) + } + > + {CodeImprovement.sidebar_title} + + handleItemClick(APPLICATIONS.Mood2CSSColor)} + > + {Mood2CSSColor.sidebar_title} + + + handleItemClick(APPLICATIONS.SinglePageWebapp) + } + > + {SinglePageWebapp.sidebar_title} +
- } onClick={() => handleItemClick('AboutPage')}>About + } + onClick={() => handleItemClick("AboutPage")} + > + About +
- {selectedItem === 'AboutPage' && } + {selectedItem === "AboutPage" && } - {selectedItem === APPLICATIONS.AeroAssist && } - {selectedItem === APPLICATIONS.CodeExplainer && } - {selectedItem === APPLICATIONS.CodeImprovement && } + {selectedItem === APPLICATIONS.AeroAssist && ( + + )} + {selectedItem === APPLICATIONS.CodeExplainer && ( + + )} + {selectedItem === APPLICATIONS.CodeImprovement && ( + + )} {selectedItem === APPLICATIONS.CSVify && } {selectedItem === APPLICATIONS.Emojibot && } - {selectedItem === APPLICATIONS.Emojibot4 && } - {selectedItem === APPLICATIONS.English2French && } - {selectedItem === APPLICATIONS.FunctionCreator && } - {selectedItem === APPLICATIONS.GrammarGenius && } - {selectedItem === APPLICATIONS.InterviewQuestions && } + {selectedItem === APPLICATIONS.Emojibot4 && ( + + )} + {selectedItem === APPLICATIONS.English2French && ( + + )} + {selectedItem === APPLICATIONS.FunctionCreator && ( + + )} + {selectedItem === APPLICATIONS.GrammarGenius && ( + + )} + {selectedItem === APPLICATIONS.InterviewQuestions && ( + + )} {selectedItem === APPLICATIONS.KeyWords && } - {selectedItem === APPLICATIONS.KidsDigest && } - {selectedItem === APPLICATIONS.LessonPlanWriter && } - {selectedItem === APPLICATIONS.MeetingNotesSummarizer && } - {selectedItem === APPLICATIONS.Mood2CSSColor && } - {selectedItem === APPLICATIONS.MemoWriter && } - {selectedItem === APPLICATIONS.ProductNameGenerator && } - {selectedItem === APPLICATIONS.ProConDiscusser && } - {selectedItem === APPLICATIONS.PythonDebugger && } - {selectedItem === APPLICATIONS.RapBattle && } - {selectedItem === APPLICATIONS.ReviewClassifier && } - {selectedItem === APPLICATIONS.SarcasticChat && } - {selectedItem === APPLICATIONS.SinglePageWebapp && } - {selectedItem === APPLICATIONS.SocraticTutor && } - {selectedItem === APPLICATIONS.SpreadsheetGenerator && } - {selectedItem === APPLICATIONS.SqlTranslator && } - {selectedItem === APPLICATIONS.TimeComplexity && } - {selectedItem === APPLICATIONS.TweetClassifier && } - {selectedItem === APPLICATIONS.TurnByTurnDirections && } - {selectedItem === APPLICATIONS.VRFitness && } + {selectedItem === APPLICATIONS.KidsDigest && ( + + )} + {selectedItem === APPLICATIONS.LessonPlanWriter && ( + + )} + {selectedItem === APPLICATIONS.MeetingNotesSummarizer && ( + + )} + {selectedItem === APPLICATIONS.Mood2CSSColor && ( + + )} + {selectedItem === APPLICATIONS.MemoWriter && ( + + )} + {selectedItem === APPLICATIONS.ProductNameGenerator && ( + + )} + {selectedItem === APPLICATIONS.ProConDiscusser && ( + + )} + {selectedItem === APPLICATIONS.PythonDebugger && ( + + )} + {selectedItem === APPLICATIONS.RapBattle && ( + + )} + {selectedItem === APPLICATIONS.ReviewClassifier && ( + + )} + {selectedItem === APPLICATIONS.SarcasticChat && ( + + )} + {selectedItem === APPLICATIONS.SinglePageWebapp && ( + + )} + {selectedItem === APPLICATIONS.SocraticTutor && ( + + )} + {selectedItem === APPLICATIONS.SpreadsheetGenerator && ( + + )} + {selectedItem === APPLICATIONS.SqlTranslator && ( + + )} + {selectedItem === APPLICATIONS.TimeComplexity && ( + + )} + {selectedItem === APPLICATIONS.TweetClassifier && ( + + )} + {selectedItem === APPLICATIONS.TurnByTurnDirections && ( + + )} + {selectedItem === APPLICATIONS.VRFitness && ( + + )}
- ) -} + ); +}; export default App; diff --git a/client/src/applications/AeroAssist.js b/client/src/applications/AeroAssist.js index ce8e3aab..bf7f0c71 100644 --- a/client/src/applications/AeroAssist.js +++ b/client/src/applications/AeroAssist.js @@ -1,6 +1,10 @@ -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-airport-codes'; +const SLUG = "default-airport-codes"; const AeroAssist = { sidebar_title: "Airport Assistant", @@ -8,8 +12,9 @@ const AeroAssist = { api_key: AWS_API_GATEWAY_KEY, app_name: "Airport Assistant", assistant_name: "Emily", - avatar_url: 'https://chatscope.io/storybook/react/static/media/emily.d34aecd9.svg', - background_image_url: '/applications/AeroAssist/AeroAssist-bg.svg', + avatar_url: + "https://chatscope.io/storybook/react/static/media/emily.d34aecd9.svg", + background_image_url: "/applications/AeroAssist/AeroAssist-bg.svg", welcome_message: `Hello, I'm Emily, an air travel chatbot powered by ChatGPT. Ask me anything about airport codes anywhere in the world!`, example_prompts: [ '"What is the airport code for London Heathrow?"', @@ -18,7 +23,7 @@ const AeroAssist = { '"Name an airport in Mexico where you can land a helicopter"', '"I want to fly from Frankfurt to London."', ], - placeholder_text: 'Ask me anything about airports', + placeholder_text: "Ask me anything about airports", info_url: OPENAI_EXAMPLES_URL + SLUG, file_attach_button: false, uses_openai: true, diff --git a/client/src/applications/CSVify.js b/client/src/applications/CSVify.js index 55f57d1b..498827d3 100644 --- a/client/src/applications/CSVify.js +++ b/client/src/applications/CSVify.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-parse-data'; +const SLUG = "default-parse-data"; const CSVify = { sidebar_title: "CSVify", @@ -9,8 +13,8 @@ const CSVify = { api_key: AWS_API_GATEWAY_KEY, app_name: "CSVify", assistant_name: "Chad", - avatar_url: '/applications/CSVify/Chad.svg', - background_image_url: '/applications/CSVify/CSVify-bg.jpg', + avatar_url: "/applications/CSVify/Chad.svg", + background_image_url: "/applications/CSVify/CSVify-bg.jpg", welcome_message: `Hello, I'm Chad, and I convert unstructured text data to CSV. Paste some text into the chat box to get started.`, example_prompts: [], placeholder_text: `send some data to Chad`, diff --git a/client/src/applications/CodeExplainer.js b/client/src/applications/CodeExplainer.js index eb51c087..9c514a6a 100644 --- a/client/src/applications/CodeExplainer.js +++ b/client/src/applications/CodeExplainer.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-explain-code'; +const SLUG = "default-explain-code"; const CodeExplainer = { sidebar_title: "Code Explainer", @@ -9,8 +13,8 @@ const CodeExplainer = { api_key: AWS_API_GATEWAY_KEY, app_name: "CodeExplainer", assistant_name: "Patricia", - avatar_url: '/applications/CodeExplainer/Patricia.svg', - background_image_url: '/applications/CodeExplainer/CodeExplainer-bg.svg', + avatar_url: "/applications/CodeExplainer/Patricia.svg", + background_image_url: "/applications/CodeExplainer/CodeExplainer-bg.svg", welcome_message: `Hello, I'm Patricia and I'm an expert Python programmer. Upload a Python file and I'll concisely explain what it does.`, example_prompts: [], placeholder_text: `upload a Python file`, diff --git a/client/src/applications/CodeImprovement.js b/client/src/applications/CodeImprovement.js index e9e84388..b95d2bc3 100644 --- a/client/src/applications/CodeImprovement.js +++ b/client/src/applications/CodeImprovement.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-code-improvement'; +const SLUG = "default-code-improvement"; const CodeImprovement = { sidebar_title: "Coding CoPilot", @@ -9,8 +13,8 @@ const CodeImprovement = { api_key: AWS_API_GATEWAY_KEY, app_name: "Python Code Evaluator", assistant_name: "Camilla", - avatar_url: '/applications/CodeImprovement/Camilla.svg', - background_image_url: '/applications/CodeImprovement/CodeImprovement-bg.svg', + avatar_url: "/applications/CodeImprovement/Camilla.svg", + background_image_url: "/applications/CodeImprovement/CodeImprovement-bg.svg", welcome_message: `Hello, I'm Camilla, a Python programmer, and I can help you improve your Python code.`, example_prompts: [], placeholder_text: `give Camilla a Python code snippet to evaluate`, diff --git a/client/src/applications/Emojibot.js b/client/src/applications/Emojibot.js index cc693e44..6e590bac 100644 --- a/client/src/applications/Emojibot.js +++ b/client/src/applications/Emojibot.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-emoji-translation'; +const SLUG = "default-emoji-translation"; const Emojibot = { sidebar_title: "Emoji Translator", @@ -9,8 +13,8 @@ const Emojibot = { api_key: AWS_API_GATEWAY_KEY, app_name: "Emojibot", assistant_name: "Erik", - avatar_url: '/applications/Emojibot/Erik.svg', - background_image_url: '/applications/Emojibot/Emojibot-bg.jpg', + avatar_url: "/applications/Emojibot/Erik.svg", + background_image_url: "/applications/Emojibot/Emojibot-bg.jpg", welcome_message: `Hello, I'm Erik, and I will translate your text into emojis.`, example_prompts: [ "What's shake'n bacon", diff --git a/client/src/applications/Emojibot4.js b/client/src/applications/Emojibot4.js index 549a9c7b..49b4d455 100644 --- a/client/src/applications/Emojibot4.js +++ b/client/src/applications/Emojibot4.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-emoji-chatbot'; +const SLUG = "default-emoji-chatbot"; const Emojibot4 = { sidebar_title: "Emoji Chatbot", @@ -9,8 +13,8 @@ const Emojibot4 = { api_key: AWS_API_GATEWAY_KEY, app_name: "Emojibot", assistant_name: "Matilda", - avatar_url: '/applications/Emojibot4/Matilda.svg', - background_image_url: '/applications/Emojibot4/Emojibot4-bg.jpg', + avatar_url: "/applications/Emojibot4/Matilda.svg", + background_image_url: "/applications/Emojibot4/Emojibot4-bg.jpg", welcome_message: `Hello, I'm Matilda, a mime who only responds with emojis. Let's chat!`, example_prompts: [ "What's shake'n bacon", diff --git a/client/src/applications/English2French.js b/client/src/applications/English2French.js index beec8e80..76f11e8c 100644 --- a/client/src/applications/English2French.js +++ b/client/src/applications/English2French.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-translation'; +const SLUG = "default-translation"; const English2French = { sidebar_title: "English to French Translator", @@ -9,8 +13,8 @@ const English2French = { api_key: AWS_API_GATEWAY_KEY, app_name: "English to French Translator", assistant_name: "Fleur", - avatar_url: '/applications/English2French/Fleur.svg', - background_image_url: '/applications/English2French/English2French-bg.jpg', + avatar_url: "/applications/English2French/Fleur.svg", + background_image_url: "/applications/English2French/English2French-bg.jpg", welcome_message: `Hello, I'm Fleur, and I'm fluent in French. I can help you translate English to French.`, example_prompts: [], placeholder_text: `give Fleur something to translate...`, diff --git a/client/src/applications/FunctionCreator.js b/client/src/applications/FunctionCreator.js index 53702b0b..82297588 100644 --- a/client/src/applications/FunctionCreator.js +++ b/client/src/applications/FunctionCreator.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-function-from-spec'; +const SLUG = "default-function-from-spec"; const FunctionCreator = { sidebar_title: "Python Function Creator", @@ -9,8 +13,8 @@ const FunctionCreator = { api_key: AWS_API_GATEWAY_KEY, app_name: "Python Function Creator", assistant_name: "Francine", - avatar_url: '/applications/FunctionCreator/Francine.svg', - background_image_url: '/applications/FunctionCreator/FunctionCreator-bg.svg', + avatar_url: "/applications/FunctionCreator/Francine.svg", + background_image_url: "/applications/FunctionCreator/FunctionCreator-bg.svg", welcome_message: `Hello, I'm Francine, a Python programmer, and I can help you create a Python function.`, example_prompts: [], placeholder_text: `give Francine a specification for a Python function`, diff --git a/client/src/applications/GrammarGenius.js b/client/src/applications/GrammarGenius.js index cd15bf09..ca05e996 100644 --- a/client/src/applications/GrammarGenius.js +++ b/client/src/applications/GrammarGenius.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-grammar'; +const SLUG = "default-grammar"; const GrammarGenius = { sidebar_title: "Grammar Genius", @@ -9,8 +13,8 @@ const GrammarGenius = { api_key: AWS_API_GATEWAY_KEY, app_name: "GrammarGenius", assistant_name: "Gertrude", - avatar_url: '/applications/GrammarGenius/Gertrude.svg', - background_image_url: '/applications/GrammarGenius/GrammarGenius-bg.jpg', + avatar_url: "/applications/GrammarGenius/Gertrude.svg", + background_image_url: "/applications/GrammarGenius/GrammarGenius-bg.jpg", welcome_message: `Hello, I'm Gertrude, an English grammar chatbot powered by ChatGPT. You can practice your English grammar with me!`, example_prompts: [ '"I broked my leg."', diff --git a/client/src/applications/InterviewQuestions.js b/client/src/applications/InterviewQuestions.js index ca224dcf..fdb643e9 100644 --- a/client/src/applications/InterviewQuestions.js +++ b/client/src/applications/InterviewQuestions.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-interview-questions'; +const SLUG = "default-interview-questions"; const InterviewQuestions = { sidebar_title: "Interview Question Generator", @@ -9,8 +13,9 @@ const InterviewQuestions = { api_key: AWS_API_GATEWAY_KEY, app_name: "Interview Question Generator", assistant_name: "Irene", - avatar_url: '/applications/InterviewQuestions/Irene.svg', - background_image_url: '/applications/InterviewQuestions/InterviewQuestions-bg.svg', + avatar_url: "/applications/InterviewQuestions/Irene.svg", + background_image_url: + "/applications/InterviewQuestions/InterviewQuestions-bg.svg", welcome_message: `Hello, I'm Irene, and I can help you create interview questions for your job candidates.`, example_prompts: [], placeholder_text: `tell Irene about your job posting`, diff --git a/client/src/applications/KeyWords.js b/client/src/applications/KeyWords.js index 64962c44..c55ff152 100644 --- a/client/src/applications/KeyWords.js +++ b/client/src/applications/KeyWords.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-keywords'; +const SLUG = "default-keywords"; const KeyWords = { sidebar_title: "KeyWord Generator", @@ -9,8 +13,8 @@ const KeyWords = { api_key: AWS_API_GATEWAY_KEY, app_name: "KeyWords", assistant_name: "Kiefer", - avatar_url: '/applications/KeyWords/Kiefer.svg', - background_image_url: '/applications/KeyWords/KeyWords-bg.png', + avatar_url: "/applications/KeyWords/Kiefer.svg", + background_image_url: "/applications/KeyWords/KeyWords-bg.png", welcome_message: `Hello, I'm Kiefer, and I will create a list of keywords from any content.`, example_prompts: [], placeholder_text: `send some text to Kiefer`, diff --git a/client/src/applications/KidsDigest.js b/client/src/applications/KidsDigest.js index 702a65c6..196962df 100644 --- a/client/src/applications/KidsDigest.js +++ b/client/src/applications/KidsDigest.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-summarize'; +const SLUG = "default-summarize"; const KidsDigest = { sidebar_title: "KidsDigest", @@ -9,8 +13,8 @@ const KidsDigest = { api_key: AWS_API_GATEWAY_KEY, app_name: "KidsDigest", assistant_name: "Kent", - avatar_url: '/applications/KidsDigest/Kent.svg', - background_image_url: '/applications/KidsDigest/KidsDigest-bg.jpg', + avatar_url: "/applications/KidsDigest/Kent.svg", + background_image_url: "/applications/KidsDigest/KidsDigest-bg.jpg", welcome_message: `Hello, I'm Kent, and I summarize any content so that a second-grade student can understand it.`, example_prompts: [], placeholder_text: `say something to Kent`, diff --git a/client/src/applications/LessonPlanWriter.js b/client/src/applications/LessonPlanWriter.js index 669701b6..cd64b34f 100644 --- a/client/src/applications/LessonPlanWriter.js +++ b/client/src/applications/LessonPlanWriter.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-lesson-plan-writer'; +const SLUG = "default-lesson-plan-writer"; const LessonPlanWriter = { sidebar_title: "Lesson Plan Writer", @@ -9,8 +13,9 @@ const LessonPlanWriter = { api_key: AWS_API_GATEWAY_KEY, app_name: "Lesson Plan Writer", assistant_name: "Langston", - avatar_url: '/applications/LessonPlanWriter/Langston.svg', - background_image_url: '/applications/LessonPlanWriter/LessonPlanWriter-bg.jpg', + avatar_url: "/applications/LessonPlanWriter/Langston.svg", + background_image_url: + "/applications/LessonPlanWriter/LessonPlanWriter-bg.jpg", welcome_message: `Hello, I'm Langston, an education professional. I can help you write a lesson plan.`, example_prompts: [], placeholder_text: `tell Langston what the lesson is about...`, diff --git a/client/src/applications/MeetingNotesSummarizer.js b/client/src/applications/MeetingNotesSummarizer.js index 78622e27..989b42d1 100644 --- a/client/src/applications/MeetingNotesSummarizer.js +++ b/client/src/applications/MeetingNotesSummarizer.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-meeting-notes-summarizer'; +const SLUG = "default-meeting-notes-summarizer"; const MeetingNotesSummarizer = { sidebar_title: "Meeting Notes Summarizer", @@ -9,8 +13,9 @@ const MeetingNotesSummarizer = { api_key: AWS_API_GATEWAY_KEY, app_name: "Meeting Notes Summarizer", assistant_name: "Bodhi", - avatar_url: '/applications/MeetingNotesSummarizer/Bodhi.svg', - background_image_url: '/applications/MeetingNotesSummarizer/MeetingNotesSummarizer-bg.svg', + avatar_url: "/applications/MeetingNotesSummarizer/Bodhi.svg", + background_image_url: + "/applications/MeetingNotesSummarizer/MeetingNotesSummarizer-bg.svg", welcome_message: `Hello, I'm Bodhi, an executive assistant. I can help you summarize meeting notes.`, example_prompts: [], placeholder_text: `paste your notes for Bodhi...`, diff --git a/client/src/applications/MemoWriter.js b/client/src/applications/MemoWriter.js index acb0b8d9..bd4c8233 100644 --- a/client/src/applications/MemoWriter.js +++ b/client/src/applications/MemoWriter.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-memo-writer'; +const SLUG = "default-memo-writer"; const MemoWriter = { sidebar_title: "Memo Writer", @@ -9,8 +13,8 @@ const MemoWriter = { api_key: AWS_API_GATEWAY_KEY, app_name: "Memo Writer", assistant_name: "Guillermo", - avatar_url: '/applications/MemoWriter/Guillermo.svg', - background_image_url: '/applications/MemoWriter/MemoWriter-bg.jpg', + avatar_url: "/applications/MemoWriter/Guillermo.svg", + background_image_url: "/applications/MemoWriter/MemoWriter-bg.jpg", welcome_message: `Hello, I'm Guillermo, an executive assistant who can help you write a memo.`, example_prompts: [], placeholder_text: `tell Guillermo what this memo is about`, diff --git a/client/src/applications/Mood2CSSColor.js b/client/src/applications/Mood2CSSColor.js index 8939f5a8..7ec99d5e 100644 --- a/client/src/applications/Mood2CSSColor.js +++ b/client/src/applications/Mood2CSSColor.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-mood-color'; +const SLUG = "default-mood-color"; const Mood2CSSColor = { sidebar_title: "Mood To CSS Color", @@ -9,8 +13,8 @@ const Mood2CSSColor = { api_key: AWS_API_GATEWAY_KEY, app_name: "Mood2CSSColor", assistant_name: "Marlene", - avatar_url: '/applications/Mood2CSSColor/Marlene.svg', - background_image_url: '/applications/Mood2CSSColor/Mood2CSSColor-bg.jpg', + avatar_url: "/applications/Mood2CSSColor/Marlene.svg", + background_image_url: "/applications/Mood2CSSColor/Mood2CSSColor-bg.jpg", welcome_message: `Hello, I'm Marlene, and I convert your mood into a CSS hex color code.`, example_prompts: [ '"I am happy as a clam"', diff --git a/client/src/applications/ProConDiscusser.js b/client/src/applications/ProConDiscusser.js index 4d9be284..07679e95 100644 --- a/client/src/applications/ProConDiscusser.js +++ b/client/src/applications/ProConDiscusser.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-pro-con-discusser'; +const SLUG = "default-pro-con-discusser"; const ProConDiscusser = { sidebar_title: "Pro and Con Discusser", @@ -9,13 +13,13 @@ const ProConDiscusser = { api_key: AWS_API_GATEWAY_KEY, app_name: "Pros and Cons Discusser", assistant_name: "Persephone", - avatar_url: '/applications/ProConDiscusser/Persephone.svg', - background_image_url: '/applications/ProConDiscusser/ProConDiscusser-bg.svg', + avatar_url: "/applications/ProConDiscusser/Persephone.svg", + background_image_url: "/applications/ProConDiscusser/ProConDiscusser-bg.svg", welcome_message: `Hello, I'm Persephone, the most learned in the galaxy. I can discuss the pros and cons of anything.`, example_prompts: [ - 'a time travel machine', - 'eating the last everlasting gobstopper', - 'attending a chocolate factory tour', + "a time travel machine", + "eating the last everlasting gobstopper", + "attending a chocolate factory tour", ], placeholder_text: `tell Persephone what to evaluate...`, info_url: OPENAI_EXAMPLES_URL + SLUG, diff --git a/client/src/applications/ProductNameGenerator.js b/client/src/applications/ProductNameGenerator.js index 9e64901c..e9f7f20c 100644 --- a/client/src/applications/ProductNameGenerator.js +++ b/client/src/applications/ProductNameGenerator.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-product-name-gen'; +const SLUG = "default-product-name-gen"; const ProductNameGenerator = { sidebar_title: "Product Name Generator", @@ -9,13 +13,11 @@ const ProductNameGenerator = { api_key: AWS_API_GATEWAY_KEY, app_name: "Product Name Generator", assistant_name: "Pierson", - avatar_url: '/applications/ProductNameGenerator/Pierson.svg', - background_image_url: '/applications/ProductNameGenerator/ProductNameGenerator-bg.avif', + avatar_url: "/applications/ProductNameGenerator/Pierson.svg", + background_image_url: + "/applications/ProductNameGenerator/ProductNameGenerator-bg.avif", welcome_message: `Hello, I'm Pierson, and I create a list of potential product names based on your input.`, - example_prompts: [ - 'We make everlasting gobstoppers', - 'a time travel machine', - ], + example_prompts: ["We make everlasting gobstoppers", "a time travel machine"], placeholder_text: `tell Pierson about your product`, info_url: OPENAI_EXAMPLES_URL + SLUG, file_attach_button: false, diff --git a/client/src/applications/PythonDebugger.js b/client/src/applications/PythonDebugger.js index fdd0694b..23bb15ff 100644 --- a/client/src/applications/PythonDebugger.js +++ b/client/src/applications/PythonDebugger.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-fix-python-bugs'; +const SLUG = "default-fix-python-bugs"; const PythonDebugger = { sidebar_title: "Python Debugger", @@ -9,8 +13,8 @@ const PythonDebugger = { api_key: AWS_API_GATEWAY_KEY, app_name: "Python Debugger", assistant_name: "Deborah", - avatar_url: '/applications/PythonDebugger/Deborah.svg', - background_image_url: '/applications/PythonDebugger/PythonDebugger-bg.svg', + avatar_url: "/applications/PythonDebugger/Deborah.svg", + background_image_url: "/applications/PythonDebugger/PythonDebugger-bg.svg", welcome_message: `Hello, I'm Deborah, and I can debug Python code.`, example_prompts: [], placeholder_text: `upload a Python file for Deborah to debug`, diff --git a/client/src/applications/RapBattle.js b/client/src/applications/RapBattle.js index 12eaa5fe..81a5aa4f 100644 --- a/client/src/applications/RapBattle.js +++ b/client/src/applications/RapBattle.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-rap-battle'; +const SLUG = "default-rap-battle"; const RapBattle = { sidebar_title: "Rap Battle Generator", @@ -9,14 +13,14 @@ const RapBattle = { api_key: AWS_API_GATEWAY_KEY, app_name: "Rap Battle Generator", assistant_name: "Rhea", - avatar_url: '/applications/RapBattle/Rhea.svg', - background_image_url: '/applications/RapBattle/RapBattle-bg.jpg', + avatar_url: "/applications/RapBattle/Rhea.svg", + background_image_url: "/applications/RapBattle/RapBattle-bg.jpg", welcome_message: `Hello, I'm Rhea, and I can generate rap battles between your two favorite people`, example_prompts: [ - 'Linus Torvalds vs Bill Gates', - 'Dave Grohl vs Barack Obama', - 'Ghandi vs Martin Luther King Jr.', - 'Wayne Gretzky vs Ronaldo', + "Linus Torvalds vs Bill Gates", + "Dave Grohl vs Barack Obama", + "Ghandi vs Martin Luther King Jr.", + "Wayne Gretzky vs Ronaldo", ], placeholder_text: `tell Rhea who will battle...`, info_url: OPENAI_EXAMPLES_URL + SLUG, diff --git a/client/src/applications/ReviewClassifier.js b/client/src/applications/ReviewClassifier.js index d408021c..772c9a1b 100644 --- a/client/src/applications/ReviewClassifier.js +++ b/client/src/applications/ReviewClassifier.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-review-classifier'; +const SLUG = "default-review-classifier"; const ReviewClassifier = { sidebar_title: "Product Review Classifier", @@ -9,13 +13,14 @@ const ReviewClassifier = { api_key: AWS_API_GATEWAY_KEY, app_name: "Product Review Classifier", assistant_name: "Ridley", - avatar_url: '/applications/ReviewClassifier/Ridley.svg', - background_image_url: '/applications/ReviewClassifier/ReviewClassifier-bg.jpg', + avatar_url: "/applications/ReviewClassifier/Ridley.svg", + background_image_url: + "/applications/ReviewClassifier/ReviewClassifier-bg.jpg", welcome_message: `Hello, I'm Ridley, and I can classify product reviews based on their tone and gesticulation.`, example_prompts: [ - 'this is the best everlasting gobstopper ever!', - 'this newspaper is only good for wrapping fish', - 'tastes like chicken', + "this is the best everlasting gobstopper ever!", + "this newspaper is only good for wrapping fish", + "tastes like chicken", ], placeholder_text: `paste a product review for Ridley...`, info_url: OPENAI_EXAMPLES_URL + SLUG, diff --git a/client/src/applications/SarcasticChat.js b/client/src/applications/SarcasticChat.js index 9a2364ee..bed6f32b 100644 --- a/client/src/applications/SarcasticChat.js +++ b/client/src/applications/SarcasticChat.js @@ -1,8 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; - -const SLUG = 'default-marv-sarcastic-chat'; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; +const SLUG = "default-marv-sarcastic-chat"; const SarcasticChat = { sidebar_title: "Sarcastic Chatbot", @@ -10,8 +13,8 @@ const SarcasticChat = { api_key: AWS_API_GATEWAY_KEY, app_name: "Marv the Sarcastic Chatbot", assistant_name: "Marv", - avatar_url: '/applications/SarcasticChat/Marv.svg', - background_image_url: '/applications/SarcasticChat/SarcasticChat-bg.png', + avatar_url: "/applications/SarcasticChat/Marv.svg", + background_image_url: "/applications/SarcasticChat/SarcasticChat-bg.png", welcome_message: `Hello, I'm Marv, a sarcastic chatbot.`, example_prompts: [], placeholder_text: `say something to Marv`, diff --git a/client/src/applications/SinglePageWebapp.js b/client/src/applications/SinglePageWebapp.js index e8cd659a..c1d44a2f 100644 --- a/client/src/applications/SinglePageWebapp.js +++ b/client/src/applications/SinglePageWebapp.js @@ -1,6 +1,10 @@ -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-single-page-websit'; +const SLUG = "default-single-page-websit"; const SinglePageWebapp = { sidebar_title: "Single Page Webapp Creator", @@ -8,15 +12,16 @@ const SinglePageWebapp = { api_key: AWS_API_GATEWAY_KEY, app_name: "Single Page Webapp Creator", assistant_name: "Sybil", - avatar_url: '/applications/SinglePageWebapp/Sybil.svg', - background_image_url: '/applications/SinglePageWebapp/SinglePageWebapp-bg.svg', + avatar_url: "/applications/SinglePageWebapp/Sybil.svg", + background_image_url: + "/applications/SinglePageWebapp/SinglePageWebapp-bg.svg", welcome_message: `Hello, I'm Sybil, a Python programmer. I can help you create a single page web app using a Python framework. `, example_prompts: [ '"make a hello world app"', '"make a todo list app"', '"make a recipe app"', ], - placeholder_text: 'tell Sybil what you want to create...', + placeholder_text: "tell Sybil what you want to create...", info_url: OPENAI_EXAMPLES_URL + SLUG, file_attach_button: false, uses_openai: true, diff --git a/client/src/applications/SocraticTutor.js b/client/src/applications/SocraticTutor.js index 52176729..57f434eb 100644 --- a/client/src/applications/SocraticTutor.js +++ b/client/src/applications/SocraticTutor.js @@ -1,6 +1,10 @@ -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-socratic-tutor'; +const SLUG = "default-socratic-tutor"; const SocraticTutor = { sidebar_title: "Socratic Tutor", @@ -8,15 +12,15 @@ const SocraticTutor = { api_key: AWS_API_GATEWAY_KEY, app_name: "Socratic Tutor", assistant_name: "Seraphina", - avatar_url: '/applications/SocraticTutor/Seraphina.svg', - background_image_url: '/applications/SocraticTutor/SocraticTutor-bg.jpg', + avatar_url: "/applications/SocraticTutor/Seraphina.svg", + background_image_url: "/applications/SocraticTutor/SocraticTutor-bg.jpg", welcome_message: `Hello, I'm Seraphina, a disciple of the great philosopher, Socrates. I can help you learn about philosophy.`, example_prompts: [ '"I think, therefore I am"', '"To be is to do"', '"Reality is merely an illusion, albeit a very persistent one"', ], - placeholder_text: 'tell Seraphina something deep...', + placeholder_text: "tell Seraphina something deep...", info_url: OPENAI_EXAMPLES_URL + SLUG, file_attach_button: false, uses_openai: true, diff --git a/client/src/applications/SpreadsheetGenerator.js b/client/src/applications/SpreadsheetGenerator.js index 47617d4b..d6037e7c 100644 --- a/client/src/applications/SpreadsheetGenerator.js +++ b/client/src/applications/SpreadsheetGenerator.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-product-name-gen'; +const SLUG = "default-product-name-gen"; const SpreadsheetGenerator = { sidebar_title: "Spreadsheet Generator", @@ -9,8 +13,9 @@ const SpreadsheetGenerator = { api_key: AWS_API_GATEWAY_KEY, app_name: "Spreadsheet Generator", assistant_name: "Sarah", - avatar_url: '/applications/SpreadsheetGenerator/Sarah.svg', - background_image_url: '/applications/SpreadsheetGenerator/SpreadsheetGenerator-bg.svg', + avatar_url: "/applications/SpreadsheetGenerator/Sarah.svg", + background_image_url: + "/applications/SpreadsheetGenerator/SpreadsheetGenerator-bg.svg", welcome_message: `Hello, I'm Sarah, and I create spreadsheets from the data you give me.`, example_prompts: [], placeholder_text: `send some data to Sarah`, diff --git a/client/src/applications/SqlTranslator.js b/client/src/applications/SqlTranslator.js index 1e270029..3c5f0ecc 100644 --- a/client/src/applications/SqlTranslator.js +++ b/client/src/applications/SqlTranslator.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-sql-translate'; +const SLUG = "default-sql-translate"; const SqlTranslator = { sidebar_title: "SQL Translator", @@ -9,8 +13,8 @@ const SqlTranslator = { api_key: AWS_API_GATEWAY_KEY, app_name: "SQL Translator", assistant_name: "Svea", - avatar_url: '/applications/SqlTranslator/Svea.svg', - background_image_url: '/applications/SqlTranslator/SqlTranslator-bg.svg', + avatar_url: "/applications/SqlTranslator/Svea.svg", + background_image_url: "/applications/SqlTranslator/SqlTranslator-bg.svg", welcome_message: `Hello, I'm Svea, a senior SQL database engineer. I can help you create SQL queries.`, example_prompts: [], placeholder_text: `send some data to Svea`, diff --git a/client/src/applications/TimeComplexity.js b/client/src/applications/TimeComplexity.js index 0169aca0..fedad6d2 100644 --- a/client/src/applications/TimeComplexity.js +++ b/client/src/applications/TimeComplexity.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-time-complexity'; +const SLUG = "default-time-complexity"; const TimeComplexity = { sidebar_title: "TimeComplexity", @@ -9,8 +13,8 @@ const TimeComplexity = { api_key: AWS_API_GATEWAY_KEY, app_name: "TimeComplexity", assistant_name: "Christine", - avatar_url: '/applications/TimeComplexity/Christine.svg', - background_image_url: '/applications/TimeComplexity/TimeComplexity-bg.svg', + avatar_url: "/applications/TimeComplexity/Christine.svg", + background_image_url: "/applications/TimeComplexity/TimeComplexity-bg.svg", welcome_message: `Hello, I'm Christine, and I calculate the time complexity of Python code.`, example_prompts: [], placeholder_text: `upload a Python file`, diff --git a/client/src/applications/TurnByTurnDirections.js b/client/src/applications/TurnByTurnDirections.js index b426d401..8799f831 100644 --- a/client/src/applications/TurnByTurnDirections.js +++ b/client/src/applications/TurnByTurnDirections.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-turn-by-turn-directions'; +const SLUG = "default-turn-by-turn-directions"; const TurnByTurnDirections = { sidebar_title: "Turn By Turn Directions", @@ -9,8 +13,9 @@ const TurnByTurnDirections = { api_key: AWS_API_GATEWAY_KEY, app_name: "Turn By Turn Directions", assistant_name: "Nancy", - avatar_url: '/applications/TurnByTurnDirections/Nancy.svg', - background_image_url: '/applications/TurnByTurnDirections/TurnByTurnDirections-bg.svg', + avatar_url: "/applications/TurnByTurnDirections/Nancy.svg", + background_image_url: + "/applications/TurnByTurnDirections/TurnByTurnDirections-bg.svg", welcome_message: `Hello, I'm Nancy, an expert navigator and I provide turn by turn directions.`, example_prompts: [], placeholder_text: `tell Nancy where you want to go`, diff --git a/client/src/applications/TweetClassifier.js b/client/src/applications/TweetClassifier.js index e461092d..bb06146e 100644 --- a/client/src/applications/TweetClassifier.js +++ b/client/src/applications/TweetClassifier.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-tweet-classifier'; +const SLUG = "default-tweet-classifier"; const TweetClassifier = { sidebar_title: "Tweet Classifier", @@ -9,8 +13,8 @@ const TweetClassifier = { api_key: AWS_API_GATEWAY_KEY, app_name: "Tweet Classifier", assistant_name: "Clare", - avatar_url: '/applications/TweetClassifier/Clare.svg', - background_image_url: '/applications/TweetClassifier/TweetClassifier-bg.jpg', + avatar_url: "/applications/TweetClassifier/Clare.svg", + background_image_url: "/applications/TweetClassifier/TweetClassifier-bg.jpg", welcome_message: `Hello, I'm Clare, and I classify tweets.`, example_prompts: [], placeholder_text: `paste a tweet for Clare`, diff --git a/client/src/applications/VRFitness.js b/client/src/applications/VRFitness.js index 6ddf4b3f..4fe3f490 100644 --- a/client/src/applications/VRFitness.js +++ b/client/src/applications/VRFitness.js @@ -1,7 +1,11 @@ // see https://github.com/FullStackWithLawrence/aws-openai/blob/main/api/terraform/apigateway_endpoints.tf#L19 -import { BACKEND_API_URL, AWS_API_GATEWAY_KEY, OPENAI_EXAMPLES_URL } from "../config"; +import { + BACKEND_API_URL, + AWS_API_GATEWAY_KEY, + OPENAI_EXAMPLES_URL, +} from "../config"; -const SLUG = 'default-vr-fitness'; +const SLUG = "default-vr-fitness"; const VRFitness = { sidebar_title: "VR Fitness", @@ -9,8 +13,8 @@ const VRFitness = { api_key: AWS_API_GATEWAY_KEY, app_name: "VR Fitness", assistant_name: "Francesca", - avatar_url: '/applications/VRFitness/Francesca.svg', - background_image_url: '/applications/VRFitness/VRFitness-bg.jpg', + avatar_url: "/applications/VRFitness/Francesca.svg", + background_image_url: "/applications/VRFitness/VRFitness-bg.jpg", welcome_message: `Hello, I'm Francesca, and I can help you create a VR fitness routine.`, example_prompts: [], placeholder_text: `tell Francesca about your exercise idea`, diff --git a/client/src/components/Layout.js b/client/src/components/Layout.js index b610d86c..a742854b 100644 --- a/client/src/components/Layout.js +++ b/client/src/components/Layout.js @@ -1,5 +1,5 @@ // see: https://www.npmjs.com/package/styled-components -import styled from 'styled-components'; +import styled from "styled-components"; export const ContainerLayout = styled.div` height: 89vh; @@ -40,6 +40,6 @@ export const Logo = styled.div` margin: 0 auto; width: 90%; height: 125px; - background-image: url('/youtube-banner-image.png'); + background-image: url("/youtube-banner-image.png"); background-size: cover; `; diff --git a/client/src/components/about/Component.css b/client/src/components/about/Component.css index c0fcb536..7428ce27 100755 --- a/client/src/components/about/Component.css +++ b/client/src/components/about/Component.css @@ -1,4 +1,3 @@ - .about-page { height: 100vh; } @@ -18,7 +17,6 @@ width: 60%; } - .about-title { text-align: center; width: 100%; @@ -30,17 +28,16 @@ .about-title a { text-align: center; color: #333; - font-family: 'Courier New', Courier, monospace; + font-family: "Courier New", Courier, monospace; } .fswl { margin-top: 5%; display: flex; justify-content: center; - background-image: url('/youtube-banner.jpeg'); + background-image: url("/youtube-banner.jpeg"); background-size: contain; background-position: center; background-repeat: no-repeat; height: 35%; - } diff --git a/client/src/components/about/Component.jsx b/client/src/components/about/Component.jsx index 998a647d..703717fe 100755 --- a/client/src/components/about/Component.jsx +++ b/client/src/components/about/Component.jsx @@ -1,26 +1,70 @@ // // -import React from 'react'; -import './Component.css'; +import React from "react"; +import "./Component.css"; const AboutPage = (props) => { - - return( -
-
+ return ( +
+

Application Stack

-

This application implements each of the 30 Code Samples as found in the OpenAI API official documentation. Created with React leveraging @chatscope/chat-ui-kit-react and react-pro-sidebar running on AWS serverless infrastructure integrated to OpenAI Python API.

- - +

+ This application implements each of the{" "} + + 30 Code Samples + {" "} + as found in the OpenAI API official documentation. Created with{" "} + + React + {" "} + leveraging{" "} + + @chatscope/chat-ui-kit-react + {" "} + and{" "} + + react-pro-sidebar + {" "} + running on{" "} + + AWS + {" "} + serverless infrastructure integrated to{" "} + + OpenAI Python API + + . +

+ + - +

- -
+
+
- - ) -} + ); +}; export default AboutPage; diff --git a/client/src/components/chatApp/ApiRequest.js b/client/src/components/chatApp/ApiRequest.js index df4480be..cd1e440c 100644 --- a/client/src/components/chatApp/ApiRequest.js +++ b/client/src/components/chatApp/ApiRequest.js @@ -31,7 +31,7 @@ function mapResponse(response) { if (response["request_meta_data"]["lambda"] == "lambda_langchain") { const messages = response["chat_memory"]["messages"]; - let aiMessages = messages.filter(message => message.type === 'ai'); + let aiMessages = messages.filter((message) => message.type === "ai"); let ai_response = ""; if (aiMessages.length > 0) { @@ -39,17 +39,17 @@ function mapResponse(response) { } return { - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": ai_response.content - }, - "finish_reason": "stop" - } + choices: [ + { + index: 0, + message: { + role: "assistant", + content: ai_response.content, + }, + finish_reason: "stop", + }, ], - "request_meta_data": response["request_meta_data"] + request_meta_data: response["request_meta_data"], }; } @@ -57,32 +57,36 @@ function mapResponse(response) { return response; } -export async function processApiRequest(chatMessage, chatHistory, apiURL, apiKey, openChatModal) { - +export async function processApiRequest( + chatMessage, + chatHistory, + apiURL, + apiKey, + openChatModal, +) { const init = { - method: 'POST', - mode: 'cors', + method: "POST", + mode: "cors", headers: { - 'x-api-key': apiKey, - 'Accept': '*/*', - 'Content-Type': 'application/json', - 'Origin': window.location.origin + "x-api-key": apiKey, + Accept: "*/*", + "Content-Type": "application/json", + Origin: window.location.origin, }, body: JSON.stringify({ - 'input_text': chatMessage, - 'chat_history': chatHistory, + input_text: chatMessage, + chat_history: chatHistory, }), }; try { const response = await fetch(apiURL, init); const status = await response.status; - const response_json = await response.json(); // Convert the ReadableStream to a JSON object + const response_json = await response.json(); // Convert the ReadableStream to a JSON object const response_body = await response_json.body; // ditto if (response.ok) { return mapResponse(response_body); - } - else { + } else { /* note: - the response_body object is not available when the status is 504, because @@ -92,23 +96,31 @@ export async function processApiRequest(chatMessage, chatHistory, apiURL, apiKey - the response_body object is intended to always be available when the status is 400. However, there potentially COULD be a case where the response itself contains message text. */ - let errTitle = 'Error ' + status; - let errMessage = 'An unknown error occurred.'; + let errTitle = "Error " + status; + let errMessage = "An unknown error occurred."; switch (status) { case 400: - errMessage = response.statusText || response_body.message || 'The request was invalid.'; + errMessage = + response.statusText || + response_body.message || + "The request was invalid."; break; case 500: - errMessage = response.statusText || response_body.message || 'An internal server error occurred.'; + errMessage = + response.statusText || + response_body.message || + "An internal server error occurred."; break; case 504: - errMessage = response.statusText || 'Gateway timeout error. This is a known consequence of using AWS Lambda for integrations to the OpenAI API. Note that AWS Lambda has a hard 29 second timeout. If OpenAI requests take longer, which is frequently the case with chatgpt-4 then you will receive this error. If the timeout persists then you might try using chatgpt-3.5 instead as it is more performant.'; + errMessage = + response.statusText || + "Gateway timeout error. This is a known consequence of using AWS Lambda for integrations to the OpenAI API. Note that AWS Lambda has a hard 29 second timeout. If OpenAI requests take longer, which is frequently the case with chatgpt-4 then you will receive this error. If the timeout persists then you might try using chatgpt-3.5 instead as it is more performant."; break; } openChatModal(errTitle, errMessage); } } catch (error) { - openChatModal('Error', error); + openChatModal("Error", error); return; } } diff --git a/client/src/components/chatApp/Component.css b/client/src/components/chatApp/Component.css index bbdc64cf..393233fa 100755 --- a/client/src/components/chatApp/Component.css +++ b/client/src/components/chatApp/Component.css @@ -1,10 +1,10 @@ - .chat-app { position: relative; height: 100%; } -.cs-message-separator::before, .cs-message-separator::after { +.cs-message-separator::before, +.cs-message-separator::after { background-color: lightgray !important; } diff --git a/client/src/components/chatApp/Component.jsx b/client/src/components/chatApp/Component.jsx index ee502534..f6e52d9f 100755 --- a/client/src/components/chatApp/Component.jsx +++ b/client/src/components/chatApp/Component.jsx @@ -6,12 +6,12 @@ //--------------------------------------------------------------------------------- // React stuff -import React, { useRef } from 'react'; -import { useState } from 'react'; -import PropTypes from 'prop-types'; +import React, { useRef } from "react"; +import { useState } from "react"; +import PropTypes from "prop-types"; // Chat UI stuff -import '@chatscope/chat-ui-kit-styles/dist/default/styles.min.css'; +import "@chatscope/chat-ui-kit-styles/dist/default/styles.min.css"; import { MainContainer, ChatContainer, @@ -24,12 +24,12 @@ import { InfoButton, VoiceCallButton, VideoCallButton, -} from '@chatscope/chat-ui-kit-react'; +} from "@chatscope/chat-ui-kit-react"; // Our stuff -import './Component.css'; -import { ChatModal } from './Modal.jsx'; -import { processApiRequest } from './ApiRequest.js'; +import "./Component.css"; +import { ChatModal } from "./Modal.jsx"; +import { processApiRequest } from "./ApiRequest.js"; function ChatApp(props) { const fileInputRef = useRef(null); @@ -58,7 +58,7 @@ function ChatApp(props) { const [llm, setLLM] = useState(""); function conversationHeaderFactory() { - let conversation_header = "" + let conversation_header = ""; if (uses_openai_api) { conversation_header = "OpenAI API"; } @@ -73,7 +73,6 @@ function ChatApp(props) { } if (llm != "") { conversation_header = conversation_header + " " + llm; - } return conversation_header; } @@ -97,32 +96,40 @@ function ChatApp(props) { setIsModalOpen(false); } const [isModalOpen, setIsModalOpen] = useState(false); - const [modalMessage, setmodalMessage] = useState(''); - const [modalTitle, setmodalTitle] = useState(''); + const [modalMessage, setmodalMessage] = useState(""); + const [modalTitle, setmodalTitle] = useState(""); // prompt hints const examplePrompts = (prompts) => { if (prompts.length == 0) { - return ''; - } else return 'Some example prompts to get you started:\r\n\r\n' + prompts.map((prompt) => {return prompt + '\r\n'}).join(''); - } + return ""; + } else + return ( + "Some example prompts to get you started:\r\n\r\n" + + prompts + .map((prompt) => { + return prompt + "\r\n"; + }) + .join("") + ); + }; // message thread content const examples = examplePrompts(example_prompts); - let message_items = [messageFactory(welcome_message, 'incoming', 'system')]; + let message_items = [messageFactory(welcome_message, "incoming", "system")]; if (examples) { - message_items.push(messageFactory(examples, 'incoming', 'system')); + message_items.push(messageFactory(examples, "incoming", "system")); } const [messages, setMessages] = useState(message_items); // UI widget event handlers const handleInfoButtonClick = () => { - window.open(info_url, '_blank'); + window.open(info_url, "_blank"); }; // API request handler - async function handleRequest(input_text, base64_encode=true) { - const newMessage = messageFactory(input_text, 'outgoing', 'user'); + async function handleRequest(input_text, base64_encode = true) { + const newMessage = messageFactory(input_text, "outgoing", "user"); setMessages((prevMessages) => [...prevMessages, newMessage]); setIsTyping(true); @@ -130,15 +137,31 @@ function ChatApp(props) { let response; if (base64_encode) { // uploaded files need to be base64 encoded. - response = await processApiRequest(btoa(input_text), messages, api_url, api_key, openChatModal); + response = await processApiRequest( + btoa(input_text), + messages, + api_url, + api_key, + openChatModal, + ); } else { // everything else is passed as plain text - response = await processApiRequest(input_text, messages, api_url, api_key, openChatModal); + response = await processApiRequest( + input_text, + messages, + api_url, + api_key, + openChatModal, + ); } if (response && "choices" in response) { const content = response.choices[0]?.message?.content; if (content) { - const chatGPTResponse = messageFactory(content, 'incoming', 'assistant'); + const chatGPTResponse = messageFactory( + content, + "incoming", + "assistant", + ); setMessages((prevMessages) => [...prevMessages, chatGPTResponse]); } const llm_response = response.request_meta_data.model; @@ -146,7 +169,7 @@ function ChatApp(props) { } } catch (error) { // FIX NOTE: ADD MODAL HERE - console.error('Exception:', error); + console.error("Exception:", error); } finally { setIsTyping(false); } @@ -169,12 +192,11 @@ function ChatApp(props) { // send button event handler const handleSendRequest = (input_text) => { - // remove any HTML tags from the input_text. Pasting text into the // input box (from any source) tends to result in HTML span tags being included // in the input_text. This is a problem because the API doesn't know how to // handle HTML tags. So we remove them here. - const sanitized_input_text = input_text.replace(/<[^>]+>/g, ''); + const sanitized_input_text = input_text.replace(/<[^>]+>/g, ""); // check if the sanitized input text is empty or only contains whitespace if (!sanitized_input_text.trim()) { @@ -184,43 +206,57 @@ function ChatApp(props) { handleRequest(sanitized_input_text, false); }; - // UI widget styles // note that most styling is intended to be created in Component.css // these are outlying cases where inline styles are required in order to override the default styles const transparentBackgroundStyle = { - backgroundColor: 'rgba(0,0,0,0.10)', - color: 'lightgray', + backgroundColor: "rgba(0,0,0,0.10)", + color: "lightgray", }; const MainContainerStyle = { - backgroundImage: "linear-gradient(rgba(255, 255, 255, 0.95), rgba(255, 255, 255, .75)), url('" + background_image_url + "')", - backgroundSize: 'cover', - backgroundPosition: 'center', - height: '100%', + backgroundImage: + "linear-gradient(rgba(255, 255, 255, 0.95), rgba(255, 255, 255, .75)), url('" + + background_image_url + + "')", + backgroundSize: "cover", + backgroundPosition: "center", + height: "100%", }; // render the chat app - return( -
- - - + return ( +
+ + + - + - + : null} + scrollBehavior="smooth" + typingIndicator={ + isTyping ? ( + + ) : null + } > {messages.map((message, i) => { - return + return ; })} + /> - +
- ) + ); } // define the props that are expected to be passed in and also diff --git a/client/src/components/chatApp/Modal.css b/client/src/components/chatApp/Modal.css index c2c2f0c1..d8ba8f4f 100644 --- a/client/src/components/chatApp/Modal.css +++ b/client/src/components/chatApp/Modal.css @@ -32,5 +32,4 @@ border: 4px solid red; border-radius: 50%; padding: 10px; - } diff --git a/client/src/components/chatApp/Modal.jsx b/client/src/components/chatApp/Modal.jsx index d75424a3..a417cf5a 100644 --- a/client/src/components/chatApp/Modal.jsx +++ b/client/src/components/chatApp/Modal.jsx @@ -1,49 +1,48 @@ -import React from 'react'; -import ReactModal from 'react-modal'; +import React from "react"; +import ReactModal from "react-modal"; -import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; -import { faExclamation } from '@fortawesome/free-solid-svg-icons'; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; +import { faExclamation } from "@fortawesome/free-solid-svg-icons"; - -import './Modal.css'; +import "./Modal.css"; export function ChatModal(props) { - const ModalStyle = { overlay: { - backgroundColor: 'rgba(0, 0, 0, 0.5)', - display: 'flex', - justifyContent: 'center', - alignItems: 'center', - zIndex: 9999 // set a high z-index value + backgroundColor: "rgba(0, 0, 0, 0.5)", + display: "flex", + justifyContent: "center", + alignItems: "center", + zIndex: 9999, // set a high z-index value }, content: { - margin: 'auto', - width: '50%', - height: '25%', - backgroundColor: 'white', - zIndex: 10000 // set an even higher z-index value - } + margin: "auto", + width: "50%", + height: "25%", + backgroundColor: "white", + zIndex: 10000, // set an even higher z-index value + }, }; return ( -
-
-
- + > +
+
+
+

{props.title}


{props.message}

- +
- ); } diff --git a/client/src/config.js b/client/src/config.js index e070e0dd..2f5887be 100644 --- a/client/src/config.js +++ b/client/src/config.js @@ -4,39 +4,41 @@ // // The API_KEY is only used to demonstrate how you'd set this up in // the event that you needed it. -export const AWS_API_GATEWAY_KEY = 'XvxAJQOmeeaIg4sqzQsqx9eRXP2G6rLVaHFyq8kL'; -export const BACKEND_API_URL = 'https://api.openai.lawrencemcdaniel.com/examples/'; -export const BACKEND_API_TEST_URL = 'https://api.openai.lawrencemcdaniel.com/tests/'; -export const OPENAI_EXAMPLES_URL = 'https://platform.openai.com/examples/'; +export const AWS_API_GATEWAY_KEY = "XvxAJQOmeeaIg4sqzQsqx9eRXP2G6rLVaHFyq8kL"; +export const BACKEND_API_URL = + "https://api.openai.lawrencemcdaniel.com/examples/"; +export const BACKEND_API_TEST_URL = + "https://api.openai.lawrencemcdaniel.com/tests/"; +export const OPENAI_EXAMPLES_URL = "https://platform.openai.com/examples/"; export const APPLICATIONS = { - AeroAssist: 'AeroAssist', - GrammarGenius: 'GrammarGenius', - KidsDigest: 'KidsDigest', - CSVify: 'CSVify', - Emojibot: 'Emojibot', - TimeComplexity: 'TimeComplexity', - CodeExplainer: 'CodeExplainer', - KeyWords: 'KeyWords', - ProductNameGenerator: 'ProductNameGenerator', - PythonDebugger: 'PythonDebugger', - SpreadsheetGenerator: 'SpreadsheetGenerator', - TweetClassifier: 'TweetClassifier', - Mood2CSSColor: 'Mood2CSSColor', - VRFitness: 'VRFitness', - SarcasticChat: 'SarcasticChat', - TurnByTurnDirections: 'TurnByTurnDirections', - InterviewQuestions: 'InterviewQuestions', - FunctionCreator: 'FunctionCreator', - CodeImprovement: 'CodeImprovement', - SinglePageWebapp: 'SinglePageWebapp', - RapBattle: 'RapBattle', - MemoWriter: 'MemoWriter', - Emojibot4: 'Emojibot4', - English2French: 'English2French', - SocraticTutor: 'SocraticTutor', - SqlTranslator: 'SqlTranslator', - MeetingNotesSummarizer: 'MeetingNotesSummarizer', - ReviewClassifier: 'ReviewClassifier', - ProConDiscusser: 'ProConDiscusser', - LessonPlanWriter: 'LessonPlanWriter', + AeroAssist: "AeroAssist", + GrammarGenius: "GrammarGenius", + KidsDigest: "KidsDigest", + CSVify: "CSVify", + Emojibot: "Emojibot", + TimeComplexity: "TimeComplexity", + CodeExplainer: "CodeExplainer", + KeyWords: "KeyWords", + ProductNameGenerator: "ProductNameGenerator", + PythonDebugger: "PythonDebugger", + SpreadsheetGenerator: "SpreadsheetGenerator", + TweetClassifier: "TweetClassifier", + Mood2CSSColor: "Mood2CSSColor", + VRFitness: "VRFitness", + SarcasticChat: "SarcasticChat", + TurnByTurnDirections: "TurnByTurnDirections", + InterviewQuestions: "InterviewQuestions", + FunctionCreator: "FunctionCreator", + CodeImprovement: "CodeImprovement", + SinglePageWebapp: "SinglePageWebapp", + RapBattle: "RapBattle", + MemoWriter: "MemoWriter", + Emojibot4: "Emojibot4", + English2French: "English2French", + SocraticTutor: "SocraticTutor", + SqlTranslator: "SqlTranslator", + MeetingNotesSummarizer: "MeetingNotesSummarizer", + ReviewClassifier: "ReviewClassifier", + ProConDiscusser: "ProConDiscusser", + LessonPlanWriter: "LessonPlanWriter", }; diff --git a/client/src/main.jsx b/client/src/main.jsx index 54b39dd1..c9900533 100644 --- a/client/src/main.jsx +++ b/client/src/main.jsx @@ -1,10 +1,10 @@ -import React from 'react' -import ReactDOM from 'react-dom/client' -import App from './App.jsx' -import './index.css' +import React from "react"; +import ReactDOM from "react-dom/client"; +import App from "./App.jsx"; +import "./index.css"; -ReactDOM.createRoot(document.getElementById('root')).render( +ReactDOM.createRoot(document.getElementById("root")).render( , -) +); diff --git a/client/vite.config.js b/client/vite.config.js index 2832b0e9..3d444e56 100644 --- a/client/vite.config.js +++ b/client/vite.config.js @@ -1,11 +1,11 @@ - import { defineConfig } from 'vite' -import react from '@vitejs/plugin-react' +import { defineConfig } from "vite"; +import react from "@vitejs/plugin-react"; // https://vitejs.dev/config/ export default defineConfig({ plugins: [react()], - base: '', + base: "", build: { sourcemap: true, - } -}) + }, +}); From e6c5ddcb6394366c8f8b06e87da671ecf3455678 Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 18:12:58 -0600 Subject: [PATCH 06/10] style: reformat yml files with prettier --- .github/ISSUE_TEMPLATE/BUG-REPORT.yml | 4 +- .github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml | 4 +- .github/actions/merge-branch/action.yml | 65 +++---- .github/actions/tests/pre-commit/action.yml | 97 +++++------ .github/actions/tests/python/action.yml | 163 +++++++++--------- .github/actions/tests/reactjs/action.yml | 21 +-- .github/actions/tests/terraform/action.yml | 109 ++++++------ .github/workflows/pullRequestController.yml | 28 ++- .github/workflows/pushMain.yml | 9 +- .github/workflows/runTests.yml | 1 + .github/workflows/semanticVersionBump.yml | 6 +- .../workflows/semanticVersionBumpMerge.yml | 4 +- 12 files changed, 262 insertions(+), 249 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml index cc4709c7..7bee22b4 100644 --- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -1,9 +1,7 @@ name: "🐛 Bug Report" description: Create a new ticket for a bug. title: "🐛 [BUG] - " -labels: [ - "bug" -] +labels: ["bug"] body: - type: textarea id: description diff --git a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml index 70c82dd3..69fce04f 100644 --- a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml +++ b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml @@ -1,9 +1,7 @@ name: "💡 Feature Request" description: Create a new ticket for a new feature request title: "💡 [REQUEST] - <title>" -labels: [ - "question" -] +labels: ["question"] body: - type: input id: start_date diff --git a/.github/actions/merge-branch/action.yml b/.github/actions/merge-branch/action.yml index 729211f4..896656ae 100644 --- a/.github/actions/merge-branch/action.yml +++ b/.github/actions/merge-branch/action.yml @@ -1,58 +1,59 @@ +--- #------------------------------------------------------------------------------ # Run pre-commit #------------------------------------------------------------------------------ name: Merge branding: - icon: 'git-pull-request' - color: 'orange' + icon: "git-pull-request" + color: "orange" inputs: github-token: - description: 'The GitHub token to use for authentication' + description: "The GitHub token to use for authentication" required: true type: string source-branch: - description: 'The branch to merge from' + description: "The branch to merge from" required: false type: string - default: 'main' + default: "main" target-branch: - description: 'The branch to merge to' + description: "The branch to merge to" required: true type: string python-version: - description: 'The version of Python to use, such as 3.11.0' + description: "The version of Python to use, such as 3.11.0" required: true type: string runs: using: "composite" steps: - - name: Checkout code - id: checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - persist-credentials: false + - name: Checkout code + id: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false - - name: Remember current branch - shell: bash - run: | - echo "CURRENT_BRANCH=$(git branch --show-current)" >> $GITHUB_ENV + - name: Remember current branch + shell: bash + run: | + echo "CURRENT_BRANCH=$(git branch --show-current)" >> $GITHUB_ENV - - name: Merge - id: merge - shell: bash - run: | - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" - git checkout ${{ inputs.source-branch }} - git pull - git checkout ${{ inputs.target-branch }} - git merge -Xtheirs ${{ inputs.source-branch }} - git push https://${{ inputs.github-token }}@github.com/${{ github.repository }}.git HEAD:${{ inputs.target-branch }} + - name: Merge + id: merge + shell: bash + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git checkout ${{ inputs.source-branch }} + git pull + git checkout ${{ inputs.target-branch }} + git merge -Xtheirs ${{ inputs.source-branch }} + git push https://${{ inputs.github-token }}@github.com/${{ github.repository }}.git HEAD:${{ inputs.target-branch }} - - name: Checkout current branch - shell: bash - run: | - git checkout ${{ env.CURRENT_BRANCH }} + - name: Checkout current branch + shell: bash + run: | + git checkout ${{ env.CURRENT_BRANCH }} diff --git a/.github/actions/tests/pre-commit/action.yml b/.github/actions/tests/pre-commit/action.yml index 25e7801c..cb4e1327 100644 --- a/.github/actions/tests/pre-commit/action.yml +++ b/.github/actions/tests/pre-commit/action.yml @@ -1,68 +1,69 @@ +--- #------------------------------------------------------------------------------ # Run pre-commit #------------------------------------------------------------------------------ name: Test pre-commit branding: - icon: 'git-pull-request' - color: 'orange' + icon: "git-pull-request" + color: "orange" inputs: python-version: - description: 'The version of Python to use, such as 3.11.0' + description: "The version of Python to use, such as 3.11.0" required: true type: string runs: using: "composite" steps: - - name: Checkout code - id: checkout - uses: actions/checkout@v4 + - name: Checkout code + id: checkout + uses: actions/checkout@v4 - - name: Check for pre-commit in requirements - shell: bash - run: | - if ! grep -q "pre-commit" ./requirements.txt; then - echo "pre-commit not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for pre-commit in requirements + shell: bash + run: | + if ! grep -q "pre-commit" ./requirements.txt; then + echo "pre-commit not found in requirements.txt" >&2 + exit 1 + fi - - name: Check for black in requirements - shell: bash - run: | - if ! grep -q "black" ./requirements.txt; then - echo "black not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for black in requirements + shell: bash + run: | + if ! grep -q "black" ./requirements.txt; then + echo "black not found in requirements.txt" >&2 + exit 1 + fi - - name: Check for flake8 in requirements - shell: bash - run: | - if ! grep -q "flake8" ./requirements.txt; then - echo "flake8 not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for flake8 in requirements + shell: bash + run: | + if ! grep -q "flake8" ./requirements.txt; then + echo "flake8 not found in requirements.txt" >&2 + exit 1 + fi - - name: Check for flake8-coding in requirements - shell: bash - run: | - if ! grep -q "flake8-coding" ./requirements.txt; then - echo "flake8-coding not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for flake8-coding in requirements + shell: bash + run: | + if ! grep -q "flake8-coding" ./requirements.txt; then + echo "flake8-coding not found in requirements.txt" >&2 + exit 1 + fi - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: ${{ inputs.python-version }} + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ inputs.python-version }} - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - pip install -r ./requirements.txt + - name: Install dependencies + shell: bash + run: | + python -m pip install --upgrade pip + pip install -r ./requirements.txt - # see: https://pre-commit.ci/lite.html - - name: pre-commit ci - id: pre-commit-ci - if: always() - uses: pre-commit-ci/lite-action@v1.0.1 + # see: https://pre-commit.ci/lite.html + - name: pre-commit ci + id: pre-commit-ci + if: always() + uses: pre-commit-ci/lite-action@v1.0.1 diff --git a/.github/actions/tests/python/action.yml b/.github/actions/tests/python/action.yml index 3776ddc3..c5bfcf47 100644 --- a/.github/actions/tests/python/action.yml +++ b/.github/actions/tests/python/action.yml @@ -1,119 +1,120 @@ +--- #------------------------------------------------------------------------------ # Run Python unit tests #------------------------------------------------------------------------------ name: Test Python branding: - icon: 'git-pull-request' - color: 'orange' + icon: "git-pull-request" + color: "orange" inputs: python-version: - description: 'The version of Python to use, such as 3.11.0' + description: "The version of Python to use, such as 3.11.0" required: true type: string openai-api-organization: - description: 'The OpenAI API organization' + description: "The OpenAI API organization" required: true type: string openai-api-key: - description: 'The OpenAI API key' + description: "The OpenAI API key" required: true type: string pinecone-api-key: - description: 'The Pinecone API key' + description: "The Pinecone API key" required: true type: string pinecone-environment: - description: 'The Pinecone environment' + description: "The Pinecone environment" required: true type: string env: - REQUIREMENTS_PATH: 'api/terraform/python/layer_genai/requirements.txt' + REQUIREMENTS_PATH: "api/terraform/python/layer_genai/requirements.txt" runs: using: "composite" steps: - - name: Checkout code - id: checkout - uses: actions/checkout@v4 + - name: Checkout code + id: checkout + uses: actions/checkout@v4 - - name: Check for openai in requirements - shell: bash - run: | - if ! grep -q "openai" ./requirements.txt; then - echo "openai not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for openai in requirements + shell: bash + run: | + if ! grep -q "openai" ./requirements.txt; then + echo "openai not found in requirements.txt" >&2 + exit 1 + fi - - name: Check for langchain in requirements - shell: bash - run: | - if ! grep -q "langchain" ./requirements.txt; then - echo "langchain not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for langchain in requirements + shell: bash + run: | + if ! grep -q "langchain" ./requirements.txt; then + echo "langchain not found in requirements.txt" >&2 + exit 1 + fi - - name: Check for langchain-experimental in requirements - shell: bash - run: | - if ! grep -q "langchain-experimental" ./requirements.txt; then - echo "langchain-experimental not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for langchain-experimental in requirements + shell: bash + run: | + if ! grep -q "langchain-experimental" ./requirements.txt; then + echo "langchain-experimental not found in requirements.txt" >&2 + exit 1 + fi - - name: Check for pinecone-client in requirements - shell: bash - run: | - if ! grep -q "pinecone-client" ./requirements.txt; then - echo "pinecone-client not found in requirements.txt" >&2 - exit 1 - fi + - name: Check for pinecone-client in requirements + shell: bash + run: | + if ! grep -q "pinecone-client" ./requirements.txt; then + echo "pinecone-client not found in requirements.txt" >&2 + exit 1 + fi - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: ${{ inputs.python-version }} + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ inputs.python-version }} - - name: locate site-packages path - shell: bash - run: | - echo "SITE_PACKAGES_PATH=$(python -c 'import site; print(site.getsitepackages()[0])')" >> $GITHUB_ENV + - name: locate site-packages path + shell: bash + run: | + echo "SITE_PACKAGES_PATH=$(python -c 'import site; print(site.getsitepackages()[0])')" >> $GITHUB_ENV - - name: Install pip - shell: bash - run: | - python -m pip install --upgrade pip + - name: Install pip + shell: bash + run: | + python -m pip install --upgrade pip - - name: Install dependencies - shell: bash - run: | - cp -R ./api/terraform/python/layer_genai/openai_utils ${{ env.SITE_PACKAGES_PATH }} - pip install -r ./requirements.txt - env: - SITE_PACKAGES_PATH: ${{ env.SITE_PACKAGES_PATH }} + - name: Install dependencies + shell: bash + run: | + cp -R ./api/terraform/python/layer_genai/openai_utils ${{ env.SITE_PACKAGES_PATH }} + pip install -r ./requirements.txt + env: + SITE_PACKAGES_PATH: ${{ env.SITE_PACKAGES_PATH }} - - name: Create .env - shell: bash - run: | - touch ./.env - echo "OPENAI_API_ORGANIZATION=${{ env.OPENAI_API_ORGANIZATION }}" >> ./.env - echo "OPENAI_API_KEY=${{ env.OPENAI_API_KEY }}" >> ./.env - echo "PINECONE_API_KEY=${{ env.PINECONE_API_KEY }}" >> ./.env - echo "PINECONE_ENVIRONMENT=${{ env.PINECONE_ENVIRONMENT }}" >> ./.env - env: - OPENAI_API_ORGANIZATION: ${{ inputs.openai-api-organization }} - OPENAI_API_KEY: ${{ inputs.openai-api-key }} - PINECONE_API_KEY: ${{ inputs.pinecone-api-key }} - PINECONE_ENVIRONMENT: ${{ inputs.pinecone-environment }} + - name: Create .env + shell: bash + run: | + touch ./.env + echo "OPENAI_API_ORGANIZATION=${{ env.OPENAI_API_ORGANIZATION }}" >> ./.env + echo "OPENAI_API_KEY=${{ env.OPENAI_API_KEY }}" >> ./.env + echo "PINECONE_API_KEY=${{ env.PINECONE_API_KEY }}" >> ./.env + echo "PINECONE_ENVIRONMENT=${{ env.PINECONE_ENVIRONMENT }}" >> ./.env + env: + OPENAI_API_ORGANIZATION: ${{ inputs.openai-api-organization }} + OPENAI_API_KEY: ${{ inputs.openai-api-key }} + PINECONE_API_KEY: ${{ inputs.pinecone-api-key }} + PINECONE_ENVIRONMENT: ${{ inputs.pinecone-environment }} - - name: Test lambda_openai_v2 - shell: bash - run: | - cd ./api/terraform/python/lambda_openai_v2 - pytest -v -s tests/ + - name: Test lambda_openai_v2 + shell: bash + run: | + cd ./api/terraform/python/lambda_openai_v2 + pytest -v -s tests/ - - name: Test lambda_langchain - shell: bash - run: | - cd ./api/terraform/python/lambda_langchain - pytest -v -s tests/ + - name: Test lambda_langchain + shell: bash + run: | + cd ./api/terraform/python/lambda_langchain + pytest -v -s tests/ diff --git a/.github/actions/tests/reactjs/action.yml b/.github/actions/tests/reactjs/action.yml index ce2bbffb..c21bad9b 100644 --- a/.github/actions/tests/reactjs/action.yml +++ b/.github/actions/tests/reactjs/action.yml @@ -1,20 +1,21 @@ +--- #------------------------------------------------------------------------------ # Run ReactJS unit tests #------------------------------------------------------------------------------ name: Test ReactJS branding: - icon: 'git-pull-request' - color: 'orange' + icon: "git-pull-request" + color: "orange" runs: using: "composite" steps: - - name: Checkout code - id: checkout - uses: actions/checkout@v4 + - name: Checkout code + id: checkout + uses: actions/checkout@v4 - - name: Run ReactJS Tests - id: reactjs_tests - shell: bash - run: | - echo "Test scaffolding for ReactJS" + - name: Run ReactJS Tests + id: reactjs_tests + shell: bash + run: | + echo "Test scaffolding for ReactJS" diff --git a/.github/actions/tests/terraform/action.yml b/.github/actions/tests/terraform/action.yml index 19ea6314..a11b5d8d 100644 --- a/.github/actions/tests/terraform/action.yml +++ b/.github/actions/tests/terraform/action.yml @@ -1,71 +1,72 @@ +--- #------------------------------------------------------------------------------ # Run Terraform tests #------------------------------------------------------------------------------ name: Test Terraform branding: - icon: 'git-pull-request' - color: 'orange' + icon: "git-pull-request" + color: "orange" runs: using: "composite" steps: - - name: Checkout code - id: checkout - uses: actions/checkout@v4 + - name: Checkout code + id: checkout + uses: actions/checkout@v4 - - name: Verify AWS_ACCESS_KEY_ID - shell: bash - run: | - if [[ -z "${{ secrets.AWS_ACCESS_KEY_ID }}" ]]; then - echo "AWS_ACCESS_KEY_ID is not set" >&2 - exit 1 - fi + - name: Verify AWS_ACCESS_KEY_ID + shell: bash + run: | + if [[ -z "${{ secrets.AWS_ACCESS_KEY_ID }}" ]]; then + echo "AWS_ACCESS_KEY_ID is not set" >&2 + exit 1 + fi - - name: Verify AWS_SECRET_ACCESS_KEY - shell: bash - run: | - if [[ -z "${{ secrets.AWS_SECRET_ACCESS_KEY }}" ]]; then - echo "AWS_SECRET_ACCESS_KEY is not set" >&2 - exit 1 - fi + - name: Verify AWS_SECRET_ACCESS_KEY + shell: bash + run: | + if [[ -z "${{ secrets.AWS_SECRET_ACCESS_KEY }}" ]]; then + echo "AWS_SECRET_ACCESS_KEY is not set" >&2 + exit 1 + fi - - name: Verify AWS_REGION - shell: bash - run: | - if [[ -z "${{ secrets.AWS_REGION }}" ]]; then - echo "AWS_REGION is not set" >&2 - exit 1 - fi + - name: Verify AWS_REGION + shell: bash + run: | + if [[ -z "${{ secrets.AWS_REGION }}" ]]; then + echo "AWS_REGION is not set" >&2 + exit 1 + fi - - name: Verify Terraform is installed - run: | - if ! command -v terraform &> /dev/null - then - echo "Terraform could not be found" - exit 1 - fi + - name: Verify Terraform is installed + run: | + if ! command -v terraform &> /dev/null + then + echo "Terraform could not be found" + exit 1 + fi - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ secrets.AWS_REGION }} + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_REGION }} - - name: Terraform Init - shell: bash - run: | - cd api/terraform - terraform init + - name: Terraform Init + shell: bash + run: | + cd api/terraform + terraform init - - name: Terraform Validate - shell: bash - run: | - cd api/terraform - terraform validate + - name: Terraform Validate + shell: bash + run: | + cd api/terraform + terraform validate - - name: Terraform Format - shell: bash - run: | - cd api/terraform - terraform fmt -check + - name: Terraform Format + shell: bash + run: | + cd api/terraform + terraform fmt -check diff --git a/.github/workflows/pullRequestController.yml b/.github/workflows/pullRequestController.yml index 6361cdfe..0a261fd3 100644 --- a/.github/workflows/pullRequestController.yml +++ b/.github/workflows/pullRequestController.yml @@ -1,3 +1,4 @@ +--- #------------------------------------------------------------------------------ # Pull Request Workflow Controller. # @@ -42,16 +43,25 @@ on: # In general, use `pull_request` for workflows that need to access the code in the pull request, # and `pull_request_target` for workflows that need to be safe for pull requests from forks. pull_request_target: - types: [opened, closed, synchronize, edited, ready_for_review, review_requested, assigned] + types: + [ + opened, + closed, + synchronize, + edited, + ready_for_review, + review_requested, + assigned, + ] paths: - - '**.py' - - '**.tf' - - '**.js' - - '**.jsx' - - '**.requirements.txt' - - '**.package.json' - - './client/**' - - './api/terraform/**' + - "**.py" + - "**.tf" + - "**.js" + - "**.jsx" + - "**.requirements.txt" + - "**.package.json" + - "./client/**" + - "./api/terraform/**" env: python-version: "3.11" diff --git a/.github/workflows/pushMain.yml b/.github/workflows/pushMain.yml index 61e1052e..6805ed09 100644 --- a/.github/workflows/pushMain.yml +++ b/.github/workflows/pushMain.yml @@ -1,3 +1,4 @@ +--- #--------------------------------------------------------- # - Create a semantical release # - Merge main into next, alpha, beta, and next-major @@ -83,10 +84,10 @@ jobs: @semantic-release/git @semantic-release/changelog env: - GIT_COMMITTER_NAME: github-actions[bot] - GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com - GIT_AUTHOR_NAME: github-actions[bot] - GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com + GIT_COMMITTER_NAME: github-actions[bot] + GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com + GIT_AUTHOR_NAME: github-actions[bot] + GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com - name: Publish To GitHub Package Registry id: publish diff --git a/.github/workflows/runTests.yml b/.github/workflows/runTests.yml index 52324192..a7539f6a 100644 --- a/.github/workflows/runTests.yml +++ b/.github/workflows/runTests.yml @@ -1,3 +1,4 @@ +--- #------------------------------------------------------------------------------ # Run all tests #------------------------------------------------------------------------------ diff --git a/.github/workflows/semanticVersionBump.yml b/.github/workflows/semanticVersionBump.yml index cd6a2760..93038d04 100644 --- a/.github/workflows/semanticVersionBump.yml +++ b/.github/workflows/semanticVersionBump.yml @@ -1,3 +1,4 @@ +--- #------------------------------------------------------------------------------ # Lawrence McDaniel - https://lawrencemcdaniel.com # Version Bump Workflow for Python package openai_utils @@ -31,12 +32,12 @@ jobs: - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: '3.11' + python-version: "3.11" - name: Setup Node.js environment uses: actions/setup-node@v4 with: - node-version: '20.9.0' + node-version: "20.9.0" - name: Install npm dev dependencies run: npm install @@ -91,7 +92,6 @@ jobs: id: null_step3 run: echo "i ensure that CURRENT_VERSION, NEXT_VERSION and VERSION_CHANGED are set." - - name: Update __version__.py # step 4 # if VERSION_CHANGED is true, update __version__.py and push the changes to the diff --git a/.github/workflows/semanticVersionBumpMerge.yml b/.github/workflows/semanticVersionBumpMerge.yml index 425a105c..dd6374a5 100644 --- a/.github/workflows/semanticVersionBumpMerge.yml +++ b/.github/workflows/semanticVersionBumpMerge.yml @@ -1,3 +1,4 @@ +--- #------------------------------------------------------------------------------ # Lawrence McDaniel - https://lawrencemcdaniel.com # Version Bump and Merge Workflow @@ -24,7 +25,7 @@ jobs: - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: '3.11' + python-version: "3.11" - name: Get current version # step 1 @@ -69,7 +70,6 @@ jobs: id: another_null_step run: echo "i ensure that CURRENT_VERSION, NEXT_VERSION and VERSION_CHANGED are set." - - name: Update __version__.py # step 4 # if VERSION_CHANGED is true, update __version__.py and push the changes to the From fdb1367d51eeff08affa6581bdb0a461c7bb2d43 Mon Sep 17 00:00:00 2001 From: lpm0073 <lpm0073@gmail.com> Date: Sat, 18 Nov 2023 18:18:02 -0600 Subject: [PATCH 07/10] style: lint with pylint, black, flake8 and isort --- .../openai_cors_preflight_handler/index.mjs | 9 ++-- .../python/lambda_langchain/lambda_handler.py | 10 ++-- .../python/lambda_langchain/tests/init.py | 2 +- .../python/lambda_langchain/tests/test_01.py | 10 ++-- .../python/lambda_openai/lambda_handler.py | 28 +++++------ .../python/lambda_openai/tests/test_01.py | 6 ++- .../python/lambda_openai/tests/test_init.py | 12 ++--- .../python/lambda_openai_v2/lambda_handler.py | 5 +- .../python/lambda_openai_v2/tests/__init__.py | 14 ------ .../python/lambda_openai_v2/tests/init.py | 2 +- .../python/lambda_openai_v2/tests/test_01.py | 9 +++- .../layer_genai/openai_utils/__version__.py | 2 +- .../python/layer_genai/openai_utils/const.py | 2 + .../python/layer_genai/openai_utils/setup.py | 2 +- .../layer_genai/openai_utils/setup_test.py | 9 ++-- .../layer_genai/openai_utils/setup_utils.py | 2 +- .../python/layer_genai/openai_utils/utils.py | 3 +- .../layer_genai/openai_utils/validators.py | 50 ++++++------------- 18 files changed, 76 insertions(+), 101 deletions(-) diff --git a/api/terraform/nodejs/openai_cors_preflight_handler/index.mjs b/api/terraform/nodejs/openai_cors_preflight_handler/index.mjs index 10b1c39a..18000bcb 100644 --- a/api/terraform/nodejs/openai_cors_preflight_handler/index.mjs +++ b/api/terraform/nodejs/openai_cors_preflight_handler/index.mjs @@ -5,9 +5,10 @@ export const handler = async (event) => { // https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html?shortFooter=true#api-gateway-simple-proxy-for-lambda-output-format const cors_headers = { "Content-Type": "application/json", - "Access-Control-Allow-Headers": "Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token", + "Access-Control-Allow-Headers": + "Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token", "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Methods": "GET,OPTIONS,POST,PUT" + "Access-Control-Allow-Methods": "GET,OPTIONS,POST,PUT", }; const response = { @@ -16,8 +17,8 @@ export const handler = async (event) => { headers: cors_headers, multiValueHeaders: {}, body: JSON.stringify({ - message: 'Hello from AWS Lambda Node.js' - }) + message: "Hello from AWS Lambda Node.js", + }), }; return response; diff --git a/api/terraform/python/lambda_langchain/lambda_handler.py b/api/terraform/python/lambda_langchain/lambda_handler.py index 0fb4a913..1cab13d0 100644 --- a/api/terraform/python/lambda_langchain/lambda_handler.py +++ b/api/terraform/python/lambda_langchain/lambda_handler.py @@ -78,7 +78,7 @@ # for production these values are set inside the AWS Lambda function environment # see ./env.sh and lambda_langchain.tf -OPENAI_ENDPOINT_IMAGE_N = int(os.getenv("OPENAI_ENDPOINT_IMAGE_N", 4)) +OPENAI_ENDPOINT_IMAGE_N = int(os.getenv("OPENAI_ENDPOINT_IMAGE_N", "4")) OPENAI_ENDPOINT_IMAGE_SIZE = os.getenv("OPENAI_ENDPOINT_IMAGE_SIZE", "1024x768") openai.organization = os.getenv("OPENAI_API_ORGANIZATION", "SET-ME-WITH-DOTENV") openai.api_key = os.getenv("OPENAI_API_KEY", "SET-ME-WITH-DOTENV") @@ -89,6 +89,8 @@ LANGCHAIN_MEMORY_KEY = "chat_history" +# pylint: disable=too-many-locals +# pylint: disable=unused-argument def handler(event, context, api_key=None, organization=None, pinecone_api_key=None): """ Process incoming requests and invoking the appropriate @@ -124,6 +126,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No match end_point: case OpenAIEndPoint.ChatCompletion: + # pylint: disable=pointless-string-statement """ Need to keep in mind that this is a stateless operation. We have to bring along everything needed to run the conversation. This means we need to @@ -163,9 +166,9 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No assistant_messages = get_messages_for_role( message_history, OpenAIMessageKeys.OPENAI_ASSISTANT_MESSAGE_KEY ) - for i in range(0, len(assistant_messages)): + for i, assistant_message in enumerate(assistant_messages): memory.chat_memory.add_user_message(user_messages[i]) - memory.chat_memory.add_ai_message(assistant_messages[i]) + memory.chat_memory.add_ai_message(assistant_message) # pylint: disable=no-member # 4. run the conversation # ------------------------------------------------------------- @@ -212,6 +215,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) + # pylint: disable=broad-except except (openai.OpenAIError, Exception) as e: # 500 Internal Server Error return http_response_factory( diff --git a/api/terraform/python/lambda_langchain/tests/init.py b/api/terraform/python/lambda_langchain/tests/init.py index a575a9e0..c59f3d57 100644 --- a/api/terraform/python/lambda_langchain/tests/init.py +++ b/api/terraform/python/lambda_langchain/tests/init.py @@ -5,6 +5,6 @@ def get_event(filespec): """Reads a JSON file and returns the event""" - with open(filespec, "r") as f: + with open(filespec, "r", encoding="utf-8") as f: event = json.load(f) return event diff --git a/api/terraform/python/lambda_langchain/tests/test_01.py b/api/terraform/python/lambda_langchain/tests/test_01.py index 04248c31..11013e41 100644 --- a/api/terraform/python/lambda_langchain/tests/test_01.py +++ b/api/terraform/python/lambda_langchain/tests/test_01.py @@ -4,7 +4,7 @@ """ import os -import pytest +import pytest # pylint: disable=unused-import from dotenv import find_dotenv, load_dotenv from lambda_langchain.lambda_handler import handler from lambda_langchain.tests.init import get_event @@ -18,15 +18,19 @@ OPENAI_API_ORGANIZATION = os.environ["OPENAI_API_ORGANIZATION"] PINECONE_API_KEY = os.environ["PINECONE_API_KEY"] else: - raise Exception("No .env file found in root directory of repository") + raise FileNotFoundError("No .env file found in root directory of repository") def handle_event_wrapper(event): + """Wrapper for the Lambda handler function.""" retval = handler(event=event, context=None) return retval +# pylint: disable=too-few-public-methods class TestLangchain: + """Test the OpenAI API via Langchain using the Lambda Layer, 'genai'.""" + def test_basic_request(self): """Test a basic request""" event = get_event("tests/events/test_01.request.json") @@ -38,5 +42,5 @@ def test_basic_request(self): assert "body" in retval, "body key not found in retval" assert retval["statusCode"] == 200 - assert retval["isBase64Encoded"] == False + assert not retval["isBase64Encoded"] assert isinstance(retval["body"], dict) diff --git a/api/terraform/python/lambda_openai/lambda_handler.py b/api/terraform/python/lambda_openai/lambda_handler.py index ef0ddb0e..73fa99dc 100644 --- a/api/terraform/python/lambda_openai/lambda_handler.py +++ b/api/terraform/python/lambda_openai/lambda_handler.py @@ -58,12 +58,13 @@ HTTP_RESPONSE_INTERNAL_SERVER_ERROR = 500 # https://platform.openai.com/api_keys -OPENAI_ENDPOINT_IMAGE_N = int(os.getenv("OPENAI_ENDPOINT_IMAGE_N", 4)) +OPENAI_ENDPOINT_IMAGE_N = int(os.getenv("OPENAI_ENDPOINT_IMAGE_N", "4")) OPENAI_ENDPOINT_IMAGE_SIZE = os.getenv("OPENAI_ENDPOINT_IMAGE_SIZE", "1024x768") openai.organization = os.getenv("OPENAI_API_ORGANIZATION", "Personal") openai.api_key = os.getenv("OPENAI_API_KEY") +# pylint: disable=too-few-public-methods class OpenAIEndPoint: """ A class representing an endpoint for the OpenAI API. @@ -110,7 +111,7 @@ def http_response_factory(status_code: int, body) -> dict: see https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html """ if status_code < 100 or status_code > 599: - raise ValueError("Invalid HTTP response code received: {status_code}".format(status_code=status_code)) + raise ValueError(f"Invalid HTTP response code received: {status_code}") retval = { "isBase64Encoded": False, @@ -140,17 +141,12 @@ def exception_response_factory(exception: Exception) -> dict: def validate_item(item, valid_items: list, item_type: str) -> None: """Ensure that item exists in valid_items""" if item not in valid_items: - raise ValueError( - "Item {item} not found in {item_type}: {valid_items}".format( - item=item, item_type=item_type, valid_items=valid_items - ) - ) - return + raise ValueError(f"Item {item} not found in {item_type}: {valid_items}") def validate_request_body(request_body) -> None: """Eee openai.chat.completion.request.json""" - if type(request_body) is not dict: + if not isinstance(request_body, dict): raise TypeError("request body should be a dict") @@ -159,15 +155,15 @@ def validate_messages(request_body): if "messages" not in request_body: raise ValueError("dict key 'messages' not found in request body object") messages = request_body["messages"] - if type(messages) is not list: + if not isinstance(messages, list): raise ValueError("dict key 'messages' should be a JSON list") for message in messages: - if type(message) is not dict: - raise ValueError("invalid ojbect type {t} found in messages list".format(t=type(message))) + if not isinstance(message, dict): + raise ValueError(f"invalid object type {type(message)} found in messages list") if "role" not in message: - raise ValueError("dict key 'role' not found in message {m}".format(m=json.dumps(message, indent=4))) + raise ValueError(f"dict key 'role' not found in message {json.dumps(message, indent=4)}") if "content" not in message: - raise ValueError("dict key 'content' not found in message {m}".format(m=json.dumps(message, indent=4))) + raise ValueError(f"dict key 'content' not found in message {json.dumps(message, indent=4)}") def validate_completion_request(request_body) -> None: @@ -220,6 +216,7 @@ def get_request_body(event) -> dict: A dictionary representing the request body. """ if hasattr(event, "isBase64Encoded") and bool(event["isBase64Encoded"]): + # pylint: disable=line-too-long # https://stackoverflow.com/questions/9942594/unicodeencodeerror-ascii-codec-cant-encode-character-u-xa0-in-position-20 # https://stackoverflow.com/questions/53340627/typeerror-expected-bytes-like-object-not-str request_body = str(event["body"]).encode("ascii") @@ -251,6 +248,7 @@ def parse_request(request_body: dict): return end_point, model, messages, input_text +# pylint: disable=unused-argument def handler(event, context): """ Main Lambda handler function. @@ -314,7 +312,7 @@ def handler(event, context): except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) - except (openai.OpenAIError, Exception) as e: + except (openai.OpenAIError, Exception) as e: # pylint: disable=broad-except # 500 Internal Server Error return http_response_factory( status_code=HTTP_RESPONSE_INTERNAL_SERVER_ERROR, diff --git a/api/terraform/python/lambda_openai/tests/test_01.py b/api/terraform/python/lambda_openai/tests/test_01.py index d2feb3fc..71edfc50 100644 --- a/api/terraform/python/lambda_openai/tests/test_01.py +++ b/api/terraform/python/lambda_openai/tests/test_01.py @@ -1,13 +1,17 @@ # -*- coding: utf-8 -*- +# pylint: disable=R0801 # flake8: noqa: F401 """ Test requests to the OpenAI API using the Lambda Layer, 'genai'. """ -import pytest +import pytest # pylint: disable=unused-import from lambda_openai.tests.test_init import get_event, handle_event +# pylint: disable=too-few-public-methods class TestOpenAIText: + """Test the OpenAI API using the Lambda Layer, 'genai'.""" + def test_basic_request(self): """Test a basic request""" event = get_event("tests/events/test_01.request.json") diff --git a/api/terraform/python/lambda_openai/tests/test_init.py b/api/terraform/python/lambda_openai/tests/test_init.py index 859d97ca..4c27f7e0 100644 --- a/api/terraform/python/lambda_openai/tests/test_init.py +++ b/api/terraform/python/lambda_openai/tests/test_init.py @@ -15,23 +15,17 @@ OPENAI_API_ORGANIZATION = os.environ["OPENAI_API_ORGANIZATION"] PINECONE_API_KEY = os.environ["PINECONE_API_KEY"] else: - raise Exception("No .env file found in root directory of repository") + raise FileNotFoundError("No .env file found in root directory of repository") def get_event(filespec): """Load a JSON file and return the event""" - with open(filespec, "r") as f: + with open(filespec, "r", encoding="utf-8") as f: event = json.load(f) return event def handle_event(event): """Handle an event""" - retval = handler( - event=event, - context=None, - api_key=OPENAI_API_KEY, - organization=OPENAI_API_ORGANIZATION, - pinecone_api_key=PINECONE_API_KEY, - ) + retval = handler(event=event, context=None) return retval diff --git a/api/terraform/python/lambda_openai_v2/lambda_handler.py b/api/terraform/python/lambda_openai_v2/lambda_handler.py index 411e7af8..37c0d917 100644 --- a/api/terraform/python/lambda_openai_v2/lambda_handler.py +++ b/api/terraform/python/lambda_openai_v2/lambda_handler.py @@ -55,12 +55,13 @@ DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t") # https://platform.openai.com/api_keys -OPENAI_ENDPOINT_IMAGE_N = int(os.getenv("OPENAI_ENDPOINT_IMAGE_N", 4)) +OPENAI_ENDPOINT_IMAGE_N = int(os.getenv("OPENAI_ENDPOINT_IMAGE_N", "4")) OPENAI_ENDPOINT_IMAGE_SIZE = os.getenv("OPENAI_ENDPOINT_IMAGE_SIZE", "1024x768") openai.organization = os.getenv("OPENAI_API_ORGANIZATION", "Personal") openai.api_key = os.getenv("OPENAI_API_KEY") +# pylint: disable=unused-argument def handler(event, context): """ Main Lambda handler function. @@ -129,7 +130,7 @@ def handler(event, context): except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) - except (openai.OpenAIError, Exception) as e: + except (openai.OpenAIError, Exception) as e: # pylint: disable=broad-except # 500 Internal Server Error return http_response_factory( status_code=HTTP_RESPONSE_INTERNAL_SERVER_ERROR, diff --git a/api/terraform/python/lambda_openai_v2/tests/__init__.py b/api/terraform/python/lambda_openai_v2/tests/__init__.py index 3d844456..e69de29b 100644 --- a/api/terraform/python/lambda_openai_v2/tests/__init__.py +++ b/api/terraform/python/lambda_openai_v2/tests/__init__.py @@ -1,14 +0,0 @@ -import os - -from dotenv import find_dotenv, load_dotenv - - -# Load environment variables from .env file in all folders -dotenv_path = find_dotenv() -if os.path.exists(dotenv_path): - load_dotenv(dotenv_path=dotenv_path, verbose=True) - OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] - OPENAI_API_ORGANIZATION = os.environ["OPENAI_API_ORGANIZATION"] - PINECONE_API_KEY = os.environ["PINECONE_API_KEY"] -else: - raise Exception("No .env file found in root directory of repository") diff --git a/api/terraform/python/lambda_openai_v2/tests/init.py b/api/terraform/python/lambda_openai_v2/tests/init.py index 41c54179..be8e0544 100644 --- a/api/terraform/python/lambda_openai_v2/tests/init.py +++ b/api/terraform/python/lambda_openai_v2/tests/init.py @@ -5,6 +5,6 @@ def get_event(filespec): """Load a JSON file and return the event""" - with open(filespec, "r") as f: + with open(filespec, "r", encoding="utf-8") as f: event = json.load(f) return event diff --git a/api/terraform/python/lambda_openai_v2/tests/test_01.py b/api/terraform/python/lambda_openai_v2/tests/test_01.py index 6531f9c4..c3528aca 100644 --- a/api/terraform/python/lambda_openai_v2/tests/test_01.py +++ b/api/terraform/python/lambda_openai_v2/tests/test_01.py @@ -1,10 +1,11 @@ # flake8: noqa: F401 +# pylint: disable=R0801 """ Test requests to the OpenAI API using the Lambda Layer, 'genai'. """ import os -import pytest +import pytest # pylint: disable=unused-import from dotenv import find_dotenv, load_dotenv from lambda_openai_v2.lambda_handler import handler from lambda_openai_v2.tests.init import get_event @@ -18,15 +19,19 @@ OPENAI_API_ORGANIZATION = os.environ["OPENAI_API_ORGANIZATION"] PINECONE_API_KEY = os.environ["PINECONE_API_KEY"] else: - raise Exception("No .env file found in root directory of repository") + raise FileNotFoundError("No .env file found in root directory of repository") def handle_event_wrapper(event): + """Wrapper for the Lambda handler function.""" retval = handler(event=event, context=None) return retval +# pylint: disable=too-few-public-methods class TestOpenAIText: + """Test the OpenAI API using the Lambda Layer, 'genai'.""" + def test_basic_request(self): """Test a basic request""" event = get_event("tests/events/test_01.request.json") diff --git a/api/terraform/python/layer_genai/openai_utils/__version__.py b/api/terraform/python/layer_genai/openai_utils/__version__.py index ef7eb44d..906d362f 100644 --- a/api/terraform/python/layer_genai/openai_utils/__version__.py +++ b/api/terraform/python/layer_genai/openai_utils/__version__.py @@ -1 +1 @@ -__version__ = '0.6.0' +__version__ = "0.6.0" diff --git a/api/terraform/python/layer_genai/openai_utils/const.py b/api/terraform/python/layer_genai/openai_utils/const.py index b1ac0476..5d83d714 100644 --- a/api/terraform/python/layer_genai/openai_utils/const.py +++ b/api/terraform/python/layer_genai/openai_utils/const.py @@ -11,6 +11,7 @@ DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t") +# pylint: disable=too-few-public-methods class OpenAIEndPoint: """ A class representing an endpoint for the OpenAI API. @@ -29,6 +30,7 @@ class OpenAIEndPoint: all_endpoints = [Embedding, ChatCompletion, Moderation, Image, Audio, Models] +# pylint: disable=too-few-public-methods class OpenAIMessageKeys: """A class representing the keys for a message in the OpenAI API.""" diff --git a/api/terraform/python/layer_genai/openai_utils/setup.py b/api/terraform/python/layer_genai/openai_utils/setup.py index 843cbba4..036a4f29 100644 --- a/api/terraform/python/layer_genai/openai_utils/setup.py +++ b/api/terraform/python/layer_genai/openai_utils/setup.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Setup for openai_utils package.""" -from setup_utils import get_semantic_version +from openai_utils.setup_utils import get_semantic_version from setuptools import find_packages, setup diff --git a/api/terraform/python/layer_genai/openai_utils/setup_test.py b/api/terraform/python/layer_genai/openai_utils/setup_test.py index 7acd80a6..1a5a8b13 100644 --- a/api/terraform/python/layer_genai/openai_utils/setup_test.py +++ b/api/terraform/python/layer_genai/openai_utils/setup_test.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Test setup.py.""" import subprocess import unittest @@ -8,12 +9,8 @@ class TestSetup(unittest.TestCase): def test_setup_syntax(self): """Test setup.py syntax.""" - result = subprocess.run( - ["python", "setup.py", "check"], capture_output=True, text=True - ) - assert ( - result.returncode == 0 - ), f"setup.py failed with output:\n{result.stdout}\n{result.stderr}" + result = subprocess.run(["python", "setup.py", "check"], capture_output=True, text=True, check=False) + assert result.returncode == 0, f"setup.py failed with output:\n{result.stdout}\n{result.stderr}" assert not result.stderr, "Expected no error output" diff --git a/api/terraform/python/layer_genai/openai_utils/setup_utils.py b/api/terraform/python/layer_genai/openai_utils/setup_utils.py index ab704ac3..23fd8af1 100644 --- a/api/terraform/python/layer_genai/openai_utils/setup_utils.py +++ b/api/terraform/python/layer_genai/openai_utils/setup_utils.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """Lawrence McDaniel https://lawrencemcdaniel.com.""" -# pylint: disable=open-builtin import io import os import re from typing import Dict + HERE = os.path.abspath(os.path.dirname(__file__)) # allow setup.py to be run from any path diff --git a/api/terraform/python/layer_genai/openai_utils/utils.py b/api/terraform/python/layer_genai/openai_utils/utils.py index ffd05178..e7760d62 100644 --- a/api/terraform/python/layer_genai/openai_utils/utils.py +++ b/api/terraform/python/layer_genai/openai_utils/utils.py @@ -33,7 +33,7 @@ def http_response_factory(status_code: int, body) -> dict: see https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html """ if status_code < 100 or status_code > 599: - raise ValueError("Invalid HTTP response code received: {status_code}".format(status_code=status_code)) + raise ValueError(f"Invalid HTTP response code received: {status_code}") retval = { "isBase64Encoded": False, @@ -95,6 +95,7 @@ def get_request_body(event) -> dict: A dictionary representing the request body. """ if hasattr(event, "isBase64Encoded") and bool(event["isBase64Encoded"]): + # pylint: disable=line-too-long # https://stackoverflow.com/questions/9942594/unicodeencodeerror-ascii-codec-cant-encode-character-u-xa0-in-position-20 # https://stackoverflow.com/questions/53340627/typeerror-expected-bytes-like-object-not-str request_body = str(event["body"]).encode("ascii") diff --git a/api/terraform/python/layer_genai/openai_utils/validators.py b/api/terraform/python/layer_genai/openai_utils/validators.py index a973777d..5e1f9670 100644 --- a/api/terraform/python/layer_genai/openai_utils/validators.py +++ b/api/terraform/python/layer_genai/openai_utils/validators.py @@ -8,12 +8,7 @@ def validate_item(item, valid_items: list, item_type: str) -> None: """Ensure that item exists in valid_items""" if item not in valid_items: - raise ValueError( - "Item {item} not found in {item_type}: {valid_items}".format( - item=item, item_type=item_type, valid_items=valid_items - ) - ) - return + raise ValueError(f"Item {item} not found in {item_type}: {valid_items}") def validate_temperature(temperature: any) -> None: @@ -22,13 +17,13 @@ def validate_temperature(temperature: any) -> None: float_temperature = float(temperature) if float_temperature < 0 or float_temperature > 1: raise ValueError("temperature should be between 0 and 1") - except ValueError: - raise ValueError("Temperature must be a float") + except ValueError as exc: + raise ValueError("Temperature must be a float") from exc def validate_max_tokens(max_tokens: any) -> None: """Ensure that max_tokens is an int between 1 and 2048""" - if type(max_tokens) is not int: + if not isinstance(max_tokens, int): raise TypeError("max_tokens should be an int") if max_tokens < 1 or max_tokens > 2048: @@ -37,20 +32,16 @@ def validate_max_tokens(max_tokens: any) -> None: def validate_endpoint(end_point: any) -> None: """Ensure that end_point is a valid endpoint based on the OpenAIEndPoint enum""" - if type(end_point) is not str: + if not isinstance(end_point, str): raise TypeError("end_point should be a string") if end_point not in OpenAIEndPoint.all_endpoints: - raise ValueError( - "end_point should be one of {end_points}".format( - end_points=OpenAIEndPoint.all_endpoints - ) - ) + raise ValueError(f"end_point should be one of {OpenAIEndPoint.all_endpoints}") def validate_request_body(request_body) -> None: """See openai.chat.completion.request.json""" - if type(request_body) is not dict: + if not isinstance(request_body, dict): raise TypeError("request body should be a dict") @@ -59,33 +50,20 @@ def validate_messages(request_body): if "messages" not in request_body: raise ValueError("dict key 'messages' not found in request body object") messages = request_body["messages"] - if type(messages) is not list: + if not isinstance(messages, list): raise ValueError("dict key 'messages' should be a JSON list") for message in messages: - if type(message) is not dict: - raise ValueError( - "invalid ojbect type {t} found in messages list".format(t=type(message)) - ) + if not isinstance(message, dict): + raise ValueError(f"invalid object type {type(message)} found in messages list") if "role" not in message: - raise ValueError( - "dict key 'role' not found in message {m}".format( - m=json.dumps(message, indent=4) - ) - ) + raise ValueError(f"dict key 'role' not found in message {json.dumps(message, indent=4)}") if message["role"] not in OpenAIMessageKeys.all: raise ValueError( - "invalid role {r} found in message {m}. Should be one of {valid_roles}".format( - r=message["role"], - m=json.dumps(message, indent=4), - valid_roles=OpenAIMessageKeys.all, - ) + f"invalid role {message['role']} found in message {json.dumps(message, indent=4)}. " + f"Should be one of {OpenAIMessageKeys.all}" ) if "content" not in message: - raise ValueError( - "dict key 'content' not found in message {m}".format( - m=json.dumps(message, indent=4) - ) - ) + raise ValueError(f"dict key 'content' not found in message {json.dumps(message, indent=4)}") def validate_completion_request(request_body) -> None: From 0d149299161e1a9a7ed6aab9d62135968019c38e Mon Sep 17 00:00:00 2001 From: lpm0073 <lpm0073@gmail.com> Date: Sat, 18 Nov 2023 18:19:14 -0600 Subject: [PATCH 08/10] style: add eslint, pylint, prettier, black, flake8 and isort to pre-commit --- .eslintrc.js | 27 ++++++++ .flake8 | 2 + .mergify.yml | 8 +-- .prettierrc.json | 3 + .pylintrc | 11 ++++ CHANGELOG.md | 66 +++++++++---------- Makefile | 6 +- README.md | 6 +- commitlint.config.js | 13 ++-- ...apigateway_troubleshooting_cloudfront.html | 50 +++++++------- package.json | 3 +- pyproject.toml | 11 ++++ release.config.js | 53 +++++++-------- requirements.txt | 8 +++ run_pylint.sh | 2 + tox.ini | 20 +++--- 16 files changed, 177 insertions(+), 112 deletions(-) create mode 100644 .eslintrc.js create mode 100644 .flake8 create mode 100644 .prettierrc.json create mode 100644 .pylintrc create mode 100755 run_pylint.sh diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 00000000..a67fb770 --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,27 @@ +module.exports = { + env: { + browser: true, + es2021: true, + }, + extends: ["standard-with-typescript", "plugin:react/recommended"], + overrides: [ + { + env: { + node: true, + }, + files: [".eslintrc.{js,cjs}"], + parserOptions: { + sourceType: "script", + }, + }, + ], + parserOptions: { + ecmaVersion: "latest", + sourceType: "module", + }, + plugins: ["react"], + rules: { + "react/jsx-uses-react": "error", + "react/jsx-uses-vars": "error", + }, +}; diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..6deafc26 --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/.mergify.yml b/.mergify.yml index 10004fbb..5e1a6f8e 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -4,16 +4,16 @@ pull_request_rules: - name: automatic approve dependabot pull requests conditions: - - 'author~=dependabot[bot]|dependabot-preview[bot]|dependabot' + - "author~=dependabot[bot]|dependabot-preview[bot]|dependabot" actions: review: type: APPROVE - name: automatic merge dependabot pull requests conditions: - - 'author~=dependabot[bot]|dependabot-preview[bot]|dependabot' - - '#approved-reviews-by>=1' - - 'base=main' # replace 'main' with the name of the branch you want to auto-merge into + - "author~=dependabot[bot]|dependabot-preview[bot]|dependabot" + - "#approved-reviews-by>=1" + - "base=main" # replace 'main' with the name of the branch you want to auto-merge into actions: merge: method: merge diff --git a/.prettierrc.json b/.prettierrc.json new file mode 100644 index 00000000..75fa1341 --- /dev/null +++ b/.prettierrc.json @@ -0,0 +1,3 @@ +{ + "tabWidth": 2 +} diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 00000000..f548794c --- /dev/null +++ b/.pylintrc @@ -0,0 +1,11 @@ +[MASTER] +init-hook='import sys; print(sys.executable); sys.path.append("/absolute/path/to/api/terraform/python/layer_genai/openai_utils")' +ignore-paths = + api/terraform/python/lambda_openai_v2/lambda_dist_pkg, + api/terraform/python/lambda_openai/lambda_dist_pkg, + api/terraform/python/lambda_langchain/lambda_dist_pkg +ignore = + __version__.py + +[FORMAT] +max-line-length=120 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f5f940c..fb66b4ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,73 +2,69 @@ ### Features -* propagate merges to main back into dev branches: next, next-major, alpha, beta ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) -* create a top-level Pull Request workflow that triggers tech-specific unit ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf))tests +- propagate merges to main back into dev branches: next, next-major, alpha, beta ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) +- create a top-level Pull Request workflow that triggers tech-specific unit ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf))tests ### Refactoring -* refactor and expand the scope of automated PR revision workflows ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) -* consolidate all jobs related to merging to main into a single workflow ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) -* refactor Python unit test and only run when relevant modifications are ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf))included in commit (*.py, requirements.txt ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)), etc) -* scaffold a ReactJS unit test workflow ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) -* create a Terraform unit test workflow ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) +- refactor and expand the scope of automated PR revision workflows ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) +- consolidate all jobs related to merging to main into a single workflow ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) +- refactor Python unit test and only run when relevant modifications are ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf))included in commit (\*.py, requirements.txt ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)), etc) +- scaffold a ReactJS unit test workflow ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) +- create a Terraform unit test workflow ([fd32caf](https://github.com/FullStackWithLawrence/aws-openai/pull/73/commits/959ebb91afe30bd7dec0ce93b994e2c0dfd32caf)) ### Bug Fixes -* add GITHUB_TOKEN to semantic-release job ([bf4152d](https://github.com/FullStackWithLawrence/aws-openai/commit/bf4152d282b4652390b356711c0e84b422b07b30)) +- add GITHUB_TOKEN to semantic-release job ([bf4152d](https://github.com/FullStackWithLawrence/aws-openai/commit/bf4152d282b4652390b356711c0e84b422b07b30)) ## [0.5.1](https://github.com/FullStackWithLawrence/aws-openai/compare/v0.5.0...v0.5.1) (2023-11-09) - ### Bug Fixes -* don't persist credentials when calling actions/checkout ([72dea97](https://github.com/FullStackWithLawrence/aws-openai/commit/72dea975cb9d551f09ca329b354ac42189af14b2)) -* downgrade openai to previous stable version ([bfc6962](https://github.com/FullStackWithLawrence/aws-openai/commit/bfc69624c9284c16a370b00c7d265f8898fb9c0f)) -* revert last commit ([564e3fd](https://github.com/FullStackWithLawrence/aws-openai/commit/564e3fdd42465895f6f48937e420897a9d677348)) -* revert to last stable version of openai ([d57c5db](https://github.com/FullStackWithLawrence/aws-openai/commit/d57c5db44853a06a7d32578932f86e92490637e0)) -* stabilize openai breaking changes by making dependabot ignore related packages ([5ca1d81](https://github.com/FullStackWithLawrence/aws-openai/commit/5ca1d81e5cb3d8b8115e5343d114950506316597)) +- don't persist credentials when calling actions/checkout ([72dea97](https://github.com/FullStackWithLawrence/aws-openai/commit/72dea975cb9d551f09ca329b354ac42189af14b2)) +- downgrade openai to previous stable version ([bfc6962](https://github.com/FullStackWithLawrence/aws-openai/commit/bfc69624c9284c16a370b00c7d265f8898fb9c0f)) +- revert last commit ([564e3fd](https://github.com/FullStackWithLawrence/aws-openai/commit/564e3fdd42465895f6f48937e420897a9d677348)) +- revert to last stable version of openai ([d57c5db](https://github.com/FullStackWithLawrence/aws-openai/commit/d57c5db44853a06a7d32578932f86e92490637e0)) +- stabilize openai breaking changes by making dependabot ignore related packages ([5ca1d81](https://github.com/FullStackWithLawrence/aws-openai/commit/5ca1d81e5cb3d8b8115e5343d114950506316597)) ## [0.5.0](https://github.com/FullStackWithLawrence/aws-openai/compare/v0.4.0...v0.5.0) (2023-11-07) ### Bug Fixes -* add a hash of the output zip file ([c9b9a0b](https://github.com/FullStackWithLawrence/aws-openai/commit/c9b9a0b00561ea89b2f6e04f86baaf8d8ee099c5)) -* do not run Python tests for dependabot pull requests until org secrets can be passed ([d0de24a](https://github.com/FullStackWithLawrence/aws-openai/commit/d0de24ad8ef9f62388f3aa1a70bc57ad34a2c19e)) -* ensure that we're using --platform=linux/amd64 for the build ([aada484](https://github.com/FullStackWithLawrence/aws-openai/commit/aada4840d2ce31d93a738725ff01894d9370a0ab)) -* merge conflict ([94d5833](https://github.com/FullStackWithLawrence/aws-openai/commit/94d5833ab319588c0138df1058501694d40b8fb4)) -* replace hard-coded python version with ([868f118](https://github.com/FullStackWithLawrence/aws-openai/commit/868f1182ac490eeb16e6d144a6c888d125f82d13)) -* switch event from pull_request to pull_request_target ([321ec8f](https://github.com/FullStackWithLawrence/aws-openai/commit/321ec8f8c806a86ed2f8263a1b326fb29fef10a5)) -* switch from pull_requests event to pull_request_target event ([d70f2bc](https://github.com/FullStackWithLawrence/aws-openai/commit/d70f2bc57098d8066a5d1e5dd3c7d2e99bd8a60e)) +- add a hash of the output zip file ([c9b9a0b](https://github.com/FullStackWithLawrence/aws-openai/commit/c9b9a0b00561ea89b2f6e04f86baaf8d8ee099c5)) +- do not run Python tests for dependabot pull requests until org secrets can be passed ([d0de24a](https://github.com/FullStackWithLawrence/aws-openai/commit/d0de24ad8ef9f62388f3aa1a70bc57ad34a2c19e)) +- ensure that we're using --platform=linux/amd64 for the build ([aada484](https://github.com/FullStackWithLawrence/aws-openai/commit/aada4840d2ce31d93a738725ff01894d9370a0ab)) +- merge conflict ([94d5833](https://github.com/FullStackWithLawrence/aws-openai/commit/94d5833ab319588c0138df1058501694d40b8fb4)) +- replace hard-coded python version with ([868f118](https://github.com/FullStackWithLawrence/aws-openai/commit/868f1182ac490eeb16e6d144a6c888d125f82d13)) +- switch event from pull_request to pull_request_target ([321ec8f](https://github.com/FullStackWithLawrence/aws-openai/commit/321ec8f8c806a86ed2f8263a1b326fb29fef10a5)) +- switch from pull_requests event to pull_request_target event ([d70f2bc](https://github.com/FullStackWithLawrence/aws-openai/commit/d70f2bc57098d8066a5d1e5dd3c7d2e99bd8a60e)) ### Features -* add a generic Langchain chat completion algorithm with chat history memory ([82dd402](https://github.com/FullStackWithLawrence/aws-openai/commit/82dd402e407c43f99d6499e6a4d2c5560f195421)) -* add backward compatibility for Langchain responses ([93ad1d7](https://github.com/FullStackWithLawrence/aws-openai/commit/93ad1d7064fff6853b311c27218d1d9a1e96f191)) -* upgrade Marv The Sarcastic Chatbot to Langchain w memory ([3c38ee2](https://github.com/FullStackWithLawrence/aws-openai/commit/3c38ee2d37ea0f880db0549286db2baa2717a81d)) +- add a generic Langchain chat completion algorithm with chat history memory ([82dd402](https://github.com/FullStackWithLawrence/aws-openai/commit/82dd402e407c43f99d6499e6a4d2c5560f195421)) +- add backward compatibility for Langchain responses ([93ad1d7](https://github.com/FullStackWithLawrence/aws-openai/commit/93ad1d7064fff6853b311c27218d1d9a1e96f191)) +- upgrade Marv The Sarcastic Chatbot to Langchain w memory ([3c38ee2](https://github.com/FullStackWithLawrence/aws-openai/commit/3c38ee2d37ea0f880db0549286db2baa2717a81d)) ## [0.4.0](https://github.com/FullStackWithLawrence/aws-openai/compare/v0.3.1...v0.4.0) (2023-11-03) - ### Bug Fixes -* add a .env file to root so that test environment matches prod ([293b20e](https://github.com/FullStackWithLawrence/aws-openai/commit/293b20ec1537ef493539a59aa7a8d0216809b9f4)) -* add openai_utils source location to the sys path for Python ([30eed8e](https://github.com/FullStackWithLawrence/aws-openai/commit/30eed8e2c6e1c27391d94597e43afee6db5eeb44)) -* need to setup venv from ./requirements.txt so that the dev imports are included ([a907e98](https://github.com/FullStackWithLawrence/aws-openai/commit/a907e983051ad2cad721cb6a9347b0adb8f60c9a)) -* paths should begin with ./ ([c8060bc](https://github.com/FullStackWithLawrence/aws-openai/commit/c8060bc2302190f074d3d7e78496781f5d6e627a)) -* physically copy openai_utils to pip packages folder ([772b1d6](https://github.com/FullStackWithLawrence/aws-openai/commit/772b1d659b3bde6c5f80620e4539f23df68c3ffc)) -* switch to Pytest ([be7746b](https://github.com/FullStackWithLawrence/aws-openai/commit/be7746bb090ac60d29ad42359d50c3c554ab80cf)) - +- add a .env file to root so that test environment matches prod ([293b20e](https://github.com/FullStackWithLawrence/aws-openai/commit/293b20ec1537ef493539a59aa7a8d0216809b9f4)) +- add openai_utils source location to the sys path for Python ([30eed8e](https://github.com/FullStackWithLawrence/aws-openai/commit/30eed8e2c6e1c27391d94597e43afee6db5eeb44)) +- need to setup venv from ./requirements.txt so that the dev imports are included ([a907e98](https://github.com/FullStackWithLawrence/aws-openai/commit/a907e983051ad2cad721cb6a9347b0adb8f60c9a)) +- paths should begin with ./ ([c8060bc](https://github.com/FullStackWithLawrence/aws-openai/commit/c8060bc2302190f074d3d7e78496781f5d6e627a)) +- physically copy openai_utils to pip packages folder ([772b1d6](https://github.com/FullStackWithLawrence/aws-openai/commit/772b1d659b3bde6c5f80620e4539f23df68c3ffc)) +- switch to Pytest ([be7746b](https://github.com/FullStackWithLawrence/aws-openai/commit/be7746bb090ac60d29ad42359d50c3c554ab80cf)) ### Features -* add automated Python unit testing workflow to Github Actions ([dea18fc](https://github.com/FullStackWithLawrence/aws-openai/commit/dea18fc8cf2183d03613893f950ad30d7acd77fe)) +- add automated Python unit testing workflow to Github Actions ([dea18fc](https://github.com/FullStackWithLawrence/aws-openai/commit/dea18fc8cf2183d03613893f950ad30d7acd77fe)) ## [0.3.1](https://github.com/FullStackWithLawrence/aws-openai/compare/v0.3.0...v0.3.1) (2023-11-03) - ### Bug Fixes -* revert to secrets.PAT ([7342489](https://github.com/FullStackWithLawrence/aws-openai/commit/7342489ef7b7537419cc12732c1739a9fc3b42a8)) +- revert to secrets.PAT ([7342489](https://github.com/FullStackWithLawrence/aws-openai/commit/7342489ef7b7537419cc12732c1739a9fc3b42a8)) ## [0.3.0] (2023-11-01) diff --git a/Makefile b/Makefile index 0f319498..37a4c9ec 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,10 @@ api-test: pytest -v -s tests/ api-lint: + venv/bin/python3 -m pylint api/terraform/python/lambda_langchain/lambda_handler.py && \ + venv/bin/python3 -m pylint api/terraform/python/lambda_openai/lambda_handler.py && \ + venv/bin/python3 -m pylint api/terraform/python/lambda_openai_v2/lambda_handler.py && \ + venv/bin/python3 -m pylint api/terraform/python/layer_genai/openai_utils && \ terraform fmt -recursive && \ pre-commit run --all-files && \ black ./api/terraform/python/ @@ -64,7 +68,7 @@ api-clean: # React app ###################### client-init: - cd ./client && npm install + cd ./client && npm install && npm init @eslint/config client-lint: cd ./client && npm run lint diff --git a/README.md b/README.md index 6d02cb24..77f04383 100644 --- a/README.md +++ b/README.md @@ -59,9 +59,9 @@ A REST API implementing each of the [30 example applications](https://platform.o - [AWS account](https://aws.amazon.com/) - [AWS Command Line Interface](https://aws.amazon.com/cli/) - [Terraform](https://www.terraform.io/). - *If you're new to Terraform then see [Getting Started With AWS and Terraform](./doc/terraform-getting-started.md)* + _If you're new to Terraform then see [Getting Started With AWS and Terraform](./doc/terraform-getting-started.md)_ - [OpenAI platform API key](https://platform.openai.com/). - *If you're new to OpenAI API then see [How to Get an OpenAI API Key](./doc/openai-api-key.md)* + _If you're new to OpenAI API then see [How to Get an OpenAI API Key](./doc/openai-api-key.md)_ ## Documentation @@ -73,7 +73,7 @@ This repo is referenced by multiple YouTube videos, including various tutorials - Automated Pull Requests: See the Github Actions tab of the GitHub Repository. Github Actions are triggered on pull requests to run any of several different kinds of technology-specific unit tests depending on the contents of the commits included in the PR. - [python-dotenv](https://pypi.org/project/python-dotenv/) for storing sensitive data for local development -- [.gitignore](./.gitignore) ensures that no sensitive nor useless data accidentally gets pushed to GitHub. +- [.gitignore](./.gitignore) ensures that no sensitive nor useless data accidentally gets pushed to GitHub. - [tox.ini](./tox.ini) file for configuring behaviors of Python testing tools - [GitHub Actions](https://github.com/features/actions) automates unit testing, semantic release rule checking, and dependabot actions. - [GitHub Secrets](https://github.com/FullStackWithLawrence/aws-openai/settings/secrets/actions) to provide sensitive data to Github Actions workflows diff --git a/commitlint.config.js b/commitlint.config.js index d872c080..b58fa47e 100644 --- a/commitlint.config.js +++ b/commitlint.config.js @@ -3,22 +3,21 @@ const Configuration = { * Resolve and load @commitlint/config-conventional from node_modules. * Referenced packages must be installed */ - extends: ['@commitlint/config-conventional', '@commitlint/config-angular'], + extends: ["@commitlint/config-conventional", "@commitlint/config-angular"], /* * Resolve and load conventional-changelog-atom from node_modules. * Referenced packages must be installed */ - parserPreset: 'conventional-changelog-atom', + parserPreset: "conventional-changelog-atom", /* * Resolve and load @commitlint/format from node_modules. * Referenced package must be installed */ - formatter: '@commitlint/format', + formatter: "@commitlint/format", /* * Any rules defined here will override rules from @commitlint/config-conventional */ - rules: { - }, + rules: {}, /* * Array of functions that return true if commitlint should ignore the given message. * Given array is merged with predefined functions, which consist of matchers like: @@ -40,7 +39,7 @@ const Configuration = { * Custom URL to show upon failure */ helpUrl: - 'https://github.com/conventional-changelog/commitlint/#what-is-commitlint', + "https://github.com/conventional-changelog/commitlint/#what-is-commitlint", /* * Custom prompt configs */ @@ -48,7 +47,7 @@ const Configuration = { messages: {}, questions: { type: { - description: 'please input type:', + description: "please input type:", }, }, }, diff --git a/doc/apigateway_troubleshooting_cloudfront.html b/doc/apigateway_troubleshooting_cloudfront.html index 8bb80dd2..5f7d3dcb 100644 --- a/doc/apigateway_troubleshooting_cloudfront.html +++ b/doc/apigateway_troubleshooting_cloudfront.html @@ -1,28 +1,28 @@ -<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> -<HTML> +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> +<html> + <head> + <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" /> + <title>ERROR: The request could not be satisfied + - - - ERROR: The request could not be satisfied - - - -

403 ERROR

-

The request could not be satisfied.

-
- Bad request. - We can't connect to the server for this app or website at this time. There might be too much traffic or a - configuration error. Try again later, or contact the app or website owner. -
-If you provide content to customers through CloudFront, you can find steps to troubleshoot and help prevent this error by reviewing the CloudFront documentation. -
-
-
+  
+    

403 ERROR

+

The request could not be satisfied.

+
+ Bad request. We can't connect to the server for this app or website at this + time. There might be too much traffic or a configuration error. Try again + later, or contact the app or website owner. +
+ If you provide content to customers through CloudFront, you can find steps + to troubleshoot and help prevent this error by reviewing the CloudFront + documentation. +
+
+
 Generated by cloudfront (CloudFront)
 Request ID: 9gEvozW2bUYkYzny-ZkMKCBAHZZyrm7MMAhyu_tsnOlADWfu4TkrTQ==
-
-
-
- - - +
+
+ + diff --git a/package.json b/package.json index 4e0e9356..b911829e 100644 --- a/package.json +++ b/package.json @@ -7,6 +7,7 @@ "@semantic-release/commit-analyzer": "^11.1.0", "@semantic-release/git": "^10.0.1", "@semantic-release/github": "^9.2.3", - "@semantic-release/release-notes-generator": "^12.1.0" + "@semantic-release/release-notes-generator": "^12.1.0", + "typescript": "^5.2.2" } } diff --git a/pyproject.toml b/pyproject.toml index 5ab74c62..c8eae103 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,6 @@ +[build-system] +requires = ["flake8", "gitlint", "bump2version"] + [tool.isort] profile = "black" lines_after_imports = 2 @@ -21,3 +24,11 @@ exclude = ''' | lambda_dist_pkg )/ ''' + +[tool.flake8] +ignore = "D205,D413,D400,D401" +max-line-length =120 +max-complexity = 10 +exclude = "api/terraform/python/lambda_openai/venv, api/terraform/python/lambda_openai/lambda_dist_pkg, api/terraform/python/lambda_openai_v2/lambda_dist_pkg, api/terraform/python/lambda_langchain/lambda_dist_pkg" +extend-exclude = "*__init__.py,*__version__.py,venv,lambda_dist_pkg" +select = "C101" diff --git a/release.config.js b/release.config.js index 0dad8997..75777ce6 100644 --- a/release.config.js +++ b/release.config.js @@ -1,27 +1,28 @@ module.exports = { - "dryRun": false, - "plugins": [ - "@semantic-release/commit-analyzer", - "@semantic-release/release-notes-generator", - [ - "@semantic-release/changelog", - { - "changelogFile": "CHANGELOG.md" - } - ], - "@semantic-release/github", - [ - "@semantic-release/git", - { - "assets": [ - "CHANGELOG.md", - "client/package.json", - "client/package-lock.json", - "requirements.txt", - "api/terraform/python/layer_genai/requirements.txt", - ], - "message": "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}" - } - ] - ] - }; + dryRun: false, + plugins: [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + [ + "@semantic-release/changelog", + { + changelogFile: "CHANGELOG.md", + }, + ], + "@semantic-release/github", + [ + "@semantic-release/git", + { + assets: [ + "CHANGELOG.md", + "client/package.json", + "client/package-lock.json", + "requirements.txt", + "api/terraform/python/layer_genai/requirements.txt", + ], + message: + "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}", + }, + ], + ], +}; diff --git a/requirements.txt b/requirements.txt index d9a199c5..f646e854 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,12 +13,19 @@ boto3==1.29.2 pytest==7.4.3 pytest_mock==3.12.0 + +# Code linters, formatters, and security scanners +# ------------ black==23.11.0 flake8==6.1.0 flake8-coding==1.3.2 pre-commit==3.5.0 isort==5.12.0 mypy==1.7.0 +pylint==3.0.2 +bandit==1.7.5 +pydocstringformatter==0.7.3 +tox==4.11.3 # production # ------------ @@ -27,6 +34,7 @@ openai==0.28.1 langchain==0.0.331 langchain-experimental==0.0.37 pinecone-client==2.2.4 +setup_utils==0.2.0 # local packages -e ./api/terraform/python/layer_genai/openai_utils diff --git a/run_pylint.sh b/run_pylint.sh new file mode 100755 index 00000000..a8aa47d7 --- /dev/null +++ b/run_pylint.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python -m pylint "$@" diff --git a/tox.ini b/tox.ini index e71f2689..f46c5a2c 100644 --- a/tox.ini +++ b/tox.ini @@ -8,26 +8,17 @@ skip_missing_interpreters = true profile = "black" skip =venv,node_modules,api/terraform/python/lambda_openai/lambda_dist_pkg,api/terraform/python/lambda_openai/venv - [gh-actions] python = 3.8: gitlint,py38,flake8 3.9: gitlint,py39,flake8 3.10: gitlint,py310,flake8 - 3.11: gitlint,py311,flake8 + 3.11: gitlint,py311,flake8,mypy,black,pylint [testenv] deps = -rrequirements.txt commands = pytest -[flake8] -ignore = D205,D413,D400,D401 -max-line-length =120 -max-complexity = 10 -exclude = api/terraform/python/lambda_openai/venv, api/terraform/python/lambda_openai/lambda_dist_pkg, api/terraform/python/lambda_openai_v2/lambda_dist_pkg, api/terraform/python/lambda_langchain/lambda_dist_pkg -extend-exclude = *__init__.py,*__version__.py,venv,lambda_dist_pkg -select = C101 - [testenv:flake8] skip_install = True deps = flake8 @@ -50,3 +41,12 @@ passenv = GPG_AGENT_INFO deps = bump2version commands = bump2version {posargs} + +[testenv:pylint] +deps = pylint +commands = + pylint api/terraform/python/lambda_langchain + pylint api/terraform/python/lambda_openai + pylint api/terraform/python/lambda_openai_v2 + pylint api/terraform/python/layer_genai + pylint api/terraform/python/layer_genai/openai_utils From b00886c91cb9d026690dec438133ff0c9a6b356d Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 18:32:25 -0600 Subject: [PATCH 09/10] style: add eslint, pylint, prettier, black, flake8 and isort to pre-commit --- api/terraform/python/layer_genai/openai_utils/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/terraform/python/layer_genai/openai_utils/setup.py b/api/terraform/python/layer_genai/openai_utils/setup.py index 036a4f29..b2f336dd 100644 --- a/api/terraform/python/layer_genai/openai_utils/setup.py +++ b/api/terraform/python/layer_genai/openai_utils/setup.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Setup for openai_utils package.""" -from openai_utils.setup_utils import get_semantic_version +from setup_utils import get_semantic_version # pylint: disable=import-error from setuptools import find_packages, setup From 64c0ec7b8d7f355ae8748207f333f00016fb5e89 Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 18:47:55 -0600 Subject: [PATCH 10/10] chore: fix up test imports --- Makefile | 8 ++++++-- api/terraform/python/lambda_openai/__init__.py | 16 ++++++++++++++++ .../python/lambda_openai_v2/tests/__init__.py | 17 +++++++++++++++++ .../python/lambda_openai_v2/tests/test_01.py | 1 + 4 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 api/terraform/python/lambda_openai/__init__.py diff --git a/Makefile b/Makefile index 37a4c9ec..585b7d95 100644 --- a/Makefile +++ b/Makefile @@ -49,8 +49,12 @@ api-activate: pip install -r requirements.txt api-test: - cd ./api/terraform/python/openai_text/openai_text/ && \ - pytest -v -s tests/ + cd ./api/terraform/python/lambda_langchain/ && pytest -v -s tests/ + cd ../../../.. + cd ./api/terraform/python/lambda_openai_v2/ && pytest -v -s tests/ + cd ../../../.. + cd ./api/terraform/python/lambda_openai/ && pytest -v -s tests/ + cd ../../../.. api-lint: venv/bin/python3 -m pylint api/terraform/python/lambda_langchain/lambda_handler.py && \ diff --git a/api/terraform/python/lambda_openai/__init__.py b/api/terraform/python/lambda_openai/__init__.py new file mode 100644 index 00000000..3d959146 --- /dev/null +++ b/api/terraform/python/lambda_openai/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +"""Lambda function for OpenAI API.""" +import os + +from dotenv import find_dotenv, load_dotenv + + +# Load environment variables from .env file in all folders +dotenv_path = find_dotenv() +if os.path.exists(dotenv_path): + load_dotenv(dotenv_path=dotenv_path, verbose=True) + OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] + OPENAI_API_ORGANIZATION = os.environ["OPENAI_API_ORGANIZATION"] + PINECONE_API_KEY = os.environ["PINECONE_API_KEY"] +else: + raise FileNotFoundError("No .env file found in root directory of repository") diff --git a/api/terraform/python/lambda_openai_v2/tests/__init__.py b/api/terraform/python/lambda_openai_v2/tests/__init__.py index e69de29b..160349bd 100644 --- a/api/terraform/python/lambda_openai_v2/tests/__init__.py +++ b/api/terraform/python/lambda_openai_v2/tests/__init__.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# pylint: disable=duplicate-code +"""Lambda function for OpenAI API.""" +import os + +from dotenv import find_dotenv, load_dotenv + + +# Load environment variables from .env file in all folders +dotenv_path = find_dotenv() +if os.path.exists(dotenv_path): + load_dotenv(dotenv_path=dotenv_path, verbose=True) + OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] + OPENAI_API_ORGANIZATION = os.environ["OPENAI_API_ORGANIZATION"] + PINECONE_API_KEY = os.environ["PINECONE_API_KEY"] +else: + raise FileNotFoundError("No .env file found in root directory of repository") diff --git a/api/terraform/python/lambda_openai_v2/tests/test_01.py b/api/terraform/python/lambda_openai_v2/tests/test_01.py index c3528aca..2d072d30 100644 --- a/api/terraform/python/lambda_openai_v2/tests/test_01.py +++ b/api/terraform/python/lambda_openai_v2/tests/test_01.py @@ -1,5 +1,6 @@ # flake8: noqa: F401 # pylint: disable=R0801 +# pylint: disable=duplicate-code """ Test requests to the OpenAI API using the Lambda Layer, 'genai'. """