From b380d3228ec3bca37648bcb8c63e48297e773e15 Mon Sep 17 00:00:00 2001 From: lpm0073 Date: Sat, 18 Nov 2023 11:26:46 -0600 Subject: [PATCH] chore: lint with mypy and isort --- .pre-commit-config.yaml | 5 +- .../python/lambda_langchain/lambda_handler.py | 70 ++++++++----------- .../python/lambda_langchain/tests/test_01.py | 8 ++- .../python/lambda_openai/lambda_handler.py | 38 +++------- .../python/lambda_openai/tests/test_init.py | 6 +- .../python/lambda_openai_v2/lambda_handler.py | 23 +++--- .../python/lambda_openai_v2/tests/__init__.py | 4 +- .../python/lambda_openai_v2/tests/test_01.py | 8 ++- .../python/layer_genai/openai_utils/const.py | 2 + .../python/layer_genai/openai_utils/setup.py | 4 +- .../python/layer_genai/openai_utils/utils.py | 16 ++--- pyproject.toml | 23 ++++++ tox.ini | 5 +- 13 files changed, 100 insertions(+), 112 deletions(-) create mode 100644 pyproject.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 13257020..31e73f6b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,10 +22,7 @@ repos: rev: 5.12.0 hooks: - id: isort - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.0 - hooks: - - id: mypy + args: ["--settings-path=pyproject.toml"] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: diff --git a/api/terraform/python/lambda_langchain/lambda_handler.py b/api/terraform/python/lambda_langchain/lambda_handler.py index 6537405f..0fb4a913 100644 --- a/api/terraform/python/lambda_langchain/lambda_handler.py +++ b/api/terraform/python/lambda_langchain/lambda_handler.py @@ -15,56 +15,58 @@ https://python.langchain.com/docs/integrations/memory/aws_dynamodb https://bobbyhadz.com/blog/react-generate-unique-id """ -import os import json -from dotenv import load_dotenv, find_dotenv +import os # OpenAI imports import openai +from dotenv import find_dotenv, load_dotenv +from langchain.chains import LLMChain # Langchain imports from langchain.chat_models import ChatOpenAI -from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain.prompts import ( ChatPromptTemplate, + HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, - HumanMessagePromptTemplate, ) -# from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory -# from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage -# from langchain.schema.messages import BaseMessage - # local imports from 'layer_genai' virtual environment or AWS Lambda layer. from openai_utils.const import ( - OpenAIEndPoint, - OpenAIMessageKeys, - HTTP_RESPONSE_OK, HTTP_RESPONSE_BAD_REQUEST, HTTP_RESPONSE_INTERNAL_SERVER_ERROR, + HTTP_RESPONSE_OK, VALID_CHAT_COMPLETION_MODELS, VALID_EMBEDDING_MODELS, + OpenAIEndPoint, + OpenAIMessageKeys, ) from openai_utils.utils import ( - http_response_factory, - exception_response_factory, dump_environment, - get_request_body, - parse_request, + exception_response_factory, get_content_for_role, get_message_history, get_messages_for_role, + get_request_body, + http_response_factory, + parse_request, ) from openai_utils.validators import ( - validate_item, - validate_request_body, - validate_messages, validate_completion_request, validate_embedding_request, + validate_item, + validate_messages, + validate_request_body, ) + +# from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory +# from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage +# from langchain.schema.messages import BaseMessage + + ############################################################################### # ENVIRONMENT CREDENTIALS ############################################################################### @@ -108,9 +110,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No # ---------------------------------------------------------------------- request_body = get_request_body(event=event) validate_request_body(request_body=request_body) - end_point, model, messages, input_text, temperature, max_tokens = parse_request( - request_body - ) + end_point, model, messages, input_text, temperature, max_tokens = parse_request(request_body) validate_messages(request_body=request_body) request_meta_data = { "request_meta_data": { @@ -138,18 +138,12 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No item_type="ChatCompletion models", ) validate_completion_request(request_body) - system_message = get_content_for_role( - messages, OpenAIMessageKeys.OPENAI_SYSTEM_MESSAGE_KEY - ) - user_message = get_content_for_role( - messages, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY - ) + system_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_SYSTEM_MESSAGE_KEY) + user_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY) # 2. initialize the LangChain ChatOpenAI model # ------------------------------------------------------------- - llm = ChatOpenAI( - model=model, temperature=temperature, max_tokens=max_tokens - ) + llm = ChatOpenAI(model=model, temperature=temperature, max_tokens=max_tokens) prompt = ChatPromptTemplate( messages=[ SystemMessagePromptTemplate.from_template(system_message), @@ -165,9 +159,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No return_messages=True, ) message_history = get_message_history(messages) - user_messages = get_messages_for_role( - message_history, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY - ) + user_messages = get_messages_for_role(message_history, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY) assistant_messages = get_messages_for_role( message_history, OpenAIMessageKeys.OPENAI_ASSISTANT_MESSAGE_KEY ) @@ -211,9 +203,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No openai_results = openai.Moderation.create(input=input_text) case OpenAIEndPoint.Models: - openai_results = ( - openai.Model.retrieve(model) if model else openai.Model.list() - ) + openai_results = openai.Model.retrieve(model) if model else openai.Model.list() case OpenAIEndPoint.Audio: raise NotImplementedError("Audio support is coming soon") @@ -221,9 +211,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No # handle anything that went wrong except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request - return http_response_factory( - status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e) - ) + return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) except (openai.OpenAIError, Exception) as e: # 500 Internal Server Error return http_response_factory( @@ -232,6 +220,4 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No ) # success!! return the results - return http_response_factory( - status_code=HTTP_RESPONSE_OK, body={**openai_results, **request_meta_data} - ) + return http_response_factory(status_code=HTTP_RESPONSE_OK, body={**openai_results, **request_meta_data}) diff --git a/api/terraform/python/lambda_langchain/tests/test_01.py b/api/terraform/python/lambda_langchain/tests/test_01.py index 5529446b..04248c31 100644 --- a/api/terraform/python/lambda_langchain/tests/test_01.py +++ b/api/terraform/python/lambda_langchain/tests/test_01.py @@ -2,11 +2,13 @@ """ Test requests to the OpenAI API via Langchain using the Lambda Layer, 'genai'. """ -import pytest import os -from dotenv import load_dotenv, find_dotenv -from lambda_langchain.tests.init import get_event + +import pytest +from dotenv import find_dotenv, load_dotenv from lambda_langchain.lambda_handler import handler +from lambda_langchain.tests.init import get_event + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() diff --git a/api/terraform/python/lambda_openai/lambda_handler.py b/api/terraform/python/lambda_openai/lambda_handler.py index 263cc96f..ef0ddb0e 100644 --- a/api/terraform/python/lambda_openai/lambda_handler.py +++ b/api/terraform/python/lambda_openai/lambda_handler.py @@ -44,12 +44,14 @@ import base64 import json # library for interacting with JSON data https://www.json.org/json-en.html -import openai import os # library for interacting with the operating system import platform # library to view informatoin about the server host this Lambda runs on import sys # libraries for error management import traceback # libraries for error management +import openai + + DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t") HTTP_RESPONSE_OK = 200 HTTP_RESPONSE_BAD_REQUEST = 400 @@ -108,11 +110,7 @@ def http_response_factory(status_code: int, body) -> dict: see https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html """ if status_code < 100 or status_code > 599: - raise ValueError( - "Invalid HTTP response code received: {status_code}".format( - status_code=status_code - ) - ) + raise ValueError("Invalid HTTP response code received: {status_code}".format(status_code=status_code)) retval = { "isBase64Encoded": False, @@ -165,21 +163,11 @@ def validate_messages(request_body): raise ValueError("dict key 'messages' should be a JSON list") for message in messages: if type(message) is not dict: - raise ValueError( - "invalid ojbect type {t} found in messages list".format(t=type(message)) - ) + raise ValueError("invalid ojbect type {t} found in messages list".format(t=type(message))) if "role" not in message: - raise ValueError( - "dict key 'role' not found in message {m}".format( - m=json.dumps(message, indent=4) - ) - ) + raise ValueError("dict key 'role' not found in message {m}".format(m=json.dumps(message, indent=4))) if "content" not in message: - raise ValueError( - "dict key 'content' not found in message {m}".format( - m=json.dumps(message, indent=4) - ) - ) + raise ValueError("dict key 'content' not found in message {m}".format(m=json.dumps(message, indent=4))) def validate_completion_request(request_body) -> None: @@ -294,9 +282,7 @@ def handler(event, context): item_type="ChatCompletion models", ) validate_completion_request(request_body) - openai_results = openai.ChatCompletion.create( - model=model, messages=messages - ) + openai_results = openai.ChatCompletion.create(model=model, messages=messages) case OpenAIEndPoint.Embedding: # https://platform.openai.com/docs/guides/embeddings/embeddings @@ -319,9 +305,7 @@ def handler(event, context): openai_results = openai.Moderation.create(input=input_text) case OpenAIEndPoint.Models: - openai_results = ( - openai.Model.retrieve(model) if model else openai.Model.list() - ) + openai_results = openai.Model.retrieve(model) if model else openai.Model.list() case OpenAIEndPoint.Audio: raise NotImplementedError("Audio support is coming soon") @@ -329,9 +313,7 @@ def handler(event, context): # handle anything that went wrong except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request - return http_response_factory( - status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e) - ) + return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) except (openai.OpenAIError, Exception) as e: # 500 Internal Server Error return http_response_factory( diff --git a/api/terraform/python/lambda_openai/tests/test_init.py b/api/terraform/python/lambda_openai/tests/test_init.py index a7899582..859d97ca 100644 --- a/api/terraform/python/lambda_openai/tests/test_init.py +++ b/api/terraform/python/lambda_openai/tests/test_init.py @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- """Shared code for testing the lambda function""" -from dotenv import load_dotenv, find_dotenv -import os import json +import os + +from dotenv import find_dotenv, load_dotenv from lambda_openai.lambda_handler import handler + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() if os.path.exists(dotenv_path): diff --git a/api/terraform/python/lambda_openai_v2/lambda_handler.py b/api/terraform/python/lambda_openai_v2/lambda_handler.py index a53f1c8f..411e7af8 100644 --- a/api/terraform/python/lambda_openai_v2/lambda_handler.py +++ b/api/terraform/python/lambda_openai_v2/lambda_handler.py @@ -31,26 +31,27 @@ # ----------------------- import openai from openai_utils.const import ( - OpenAIEndPoint, - HTTP_RESPONSE_OK, HTTP_RESPONSE_BAD_REQUEST, HTTP_RESPONSE_INTERNAL_SERVER_ERROR, + HTTP_RESPONSE_OK, VALID_CHAT_COMPLETION_MODELS, VALID_EMBEDDING_MODELS, + OpenAIEndPoint, ) from openai_utils.utils import ( - http_response_factory, - exception_response_factory, dump_environment, + exception_response_factory, get_request_body, + http_response_factory, parse_request, ) from openai_utils.validators import ( - validate_item, validate_completion_request, validate_embedding_request, + validate_item, ) + DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t") # https://platform.openai.com/api_keys @@ -71,9 +72,7 @@ def handler(event, context): try: openai_results = {} request_body = get_request_body(event=event) - end_point, model, messages, input_text, temperature, max_tokens = parse_request( - request_body - ) + end_point, model, messages, input_text, temperature, max_tokens = parse_request(request_body) request_meta_data = { "request_meta_data": { "lambda": "lambda_openai_v2", @@ -121,9 +120,7 @@ def handler(event, context): openai_results = openai.Moderation.create(input=input_text) case OpenAIEndPoint.Models: - openai_results = ( - openai.Model.retrieve(model) if model else openai.Model.list() - ) + openai_results = openai.Model.retrieve(model) if model else openai.Model.list() case OpenAIEndPoint.Audio: raise NotImplementedError("Audio support is coming soon") @@ -131,9 +128,7 @@ def handler(event, context): # handle anything that went wrong except (openai.APIError, ValueError, TypeError, NotImplementedError) as e: # 400 Bad Request - return http_response_factory( - status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e) - ) + return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)) except (openai.OpenAIError, Exception) as e: # 500 Internal Server Error return http_response_factory( diff --git a/api/terraform/python/lambda_openai_v2/tests/__init__.py b/api/terraform/python/lambda_openai_v2/tests/__init__.py index 33b22ea4..3d844456 100644 --- a/api/terraform/python/lambda_openai_v2/tests/__init__.py +++ b/api/terraform/python/lambda_openai_v2/tests/__init__.py @@ -1,6 +1,8 @@ -from dotenv import load_dotenv, find_dotenv import os +from dotenv import find_dotenv, load_dotenv + + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() if os.path.exists(dotenv_path): diff --git a/api/terraform/python/lambda_openai_v2/tests/test_01.py b/api/terraform/python/lambda_openai_v2/tests/test_01.py index 6375bd6d..6531f9c4 100644 --- a/api/terraform/python/lambda_openai_v2/tests/test_01.py +++ b/api/terraform/python/lambda_openai_v2/tests/test_01.py @@ -2,11 +2,13 @@ """ Test requests to the OpenAI API using the Lambda Layer, 'genai'. """ -import pytest import os -from dotenv import load_dotenv, find_dotenv -from lambda_openai_v2.tests.init import get_event + +import pytest +from dotenv import find_dotenv, load_dotenv from lambda_openai_v2.lambda_handler import handler +from lambda_openai_v2.tests.init import get_event + # Load environment variables from .env file in all folders dotenv_path = find_dotenv() diff --git a/api/terraform/python/layer_genai/openai_utils/const.py b/api/terraform/python/layer_genai/openai_utils/const.py index 4fdfed19..b1ac0476 100644 --- a/api/terraform/python/layer_genai/openai_utils/const.py +++ b/api/terraform/python/layer_genai/openai_utils/const.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- """A module containing constants for the OpenAI API.""" import os + import openai + HTTP_RESPONSE_OK = 200 HTTP_RESPONSE_BAD_REQUEST = 400 HTTP_RESPONSE_INTERNAL_SERVER_ERROR = 500 diff --git a/api/terraform/python/layer_genai/openai_utils/setup.py b/api/terraform/python/layer_genai/openai_utils/setup.py index 6612cffd..843cbba4 100644 --- a/api/terraform/python/layer_genai/openai_utils/setup.py +++ b/api/terraform/python/layer_genai/openai_utils/setup.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- """Setup for openai_utils package.""" -from setuptools import setup, find_packages - from setup_utils import get_semantic_version +from setuptools import find_packages, setup + setup( name="openai_utils", diff --git a/api/terraform/python/layer_genai/openai_utils/utils.py b/api/terraform/python/layer_genai/openai_utils/utils.py index 03365845..ffd05178 100644 --- a/api/terraform/python/layer_genai/openai_utils/utils.py +++ b/api/terraform/python/layer_genai/openai_utils/utils.py @@ -2,24 +2,24 @@ """Utility functions for the OpenAI Lambda functions""" import base64 import json # library for interacting with JSON data https://www.json.org/json-en.html -import openai import os # library for interacting with the operating system import platform # library to view informatoin about the server host this Lambda runs on import sys # libraries for error management import traceback # libraries for error management +import openai from openai_utils.const import ( - OpenAIEndPoint, DEBUG_MODE, LANGCHAIN_MESSAGE_HISTORY_ROLES, + OpenAIEndPoint, ) from openai_utils.validators import ( + validate_endpoint, validate_item, - validate_request_body, + validate_max_tokens, validate_messages, + validate_request_body, validate_temperature, - validate_max_tokens, - validate_endpoint, ) @@ -33,11 +33,7 @@ def http_response_factory(status_code: int, body) -> dict: see https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html """ if status_code < 100 or status_code > 599: - raise ValueError( - "Invalid HTTP response code received: {status_code}".format( - status_code=status_code - ) - ) + raise ValueError("Invalid HTTP response code received: {status_code}".format(status_code=status_code)) retval = { "isBase64Encoded": False, diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..5ab74c62 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,23 @@ +[tool.isort] +profile = "black" +lines_after_imports = 2 + +[tool.black] +line-length = 120 +target-version = ['py311'] +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | venv + | build + | buck-out + | build + | dist + | lambda_dist_pkg +)/ +''' diff --git a/tox.ini b/tox.ini index 24a61b09..e71f2689 100644 --- a/tox.ini +++ b/tox.ini @@ -4,11 +4,10 @@ envlist = py3.11,flake8 skip_missing_interpreters = true -[isort] +[tool.isort] +profile = "black" skip =venv,node_modules,api/terraform/python/lambda_openai/lambda_dist_pkg,api/terraform/python/lambda_openai/venv -[mypy] -files = api/terraform/python/ [gh-actions] python =