Skip to content

Commit

Permalink
chore: lint with mypy and isort
Browse files Browse the repository at this point in the history
  • Loading branch information
lpm0073 committed Nov 18, 2023
1 parent f5c72fc commit b380d32
Show file tree
Hide file tree
Showing 13 changed files with 100 additions and 112 deletions.
5 changes: 1 addition & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,7 @@ repos:
rev: 5.12.0
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.7.0
hooks:
- id: mypy
args: ["--settings-path=pyproject.toml"]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
Expand Down
70 changes: 28 additions & 42 deletions api/terraform/python/lambda_langchain/lambda_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,56 +15,58 @@
https://python.langchain.com/docs/integrations/memory/aws_dynamodb
https://bobbyhadz.com/blog/react-generate-unique-id
"""
import os
import json
from dotenv import load_dotenv, find_dotenv
import os

# OpenAI imports
import openai
from dotenv import find_dotenv, load_dotenv
from langchain.chains import LLMChain

# Langchain imports
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)

# from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
# from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage
# from langchain.schema.messages import BaseMessage

# local imports from 'layer_genai' virtual environment or AWS Lambda layer.
from openai_utils.const import (
OpenAIEndPoint,
OpenAIMessageKeys,
HTTP_RESPONSE_OK,
HTTP_RESPONSE_BAD_REQUEST,
HTTP_RESPONSE_INTERNAL_SERVER_ERROR,
HTTP_RESPONSE_OK,
VALID_CHAT_COMPLETION_MODELS,
VALID_EMBEDDING_MODELS,
OpenAIEndPoint,
OpenAIMessageKeys,
)
from openai_utils.utils import (
http_response_factory,
exception_response_factory,
dump_environment,
get_request_body,
parse_request,
exception_response_factory,
get_content_for_role,
get_message_history,
get_messages_for_role,
get_request_body,
http_response_factory,
parse_request,
)
from openai_utils.validators import (
validate_item,
validate_request_body,
validate_messages,
validate_completion_request,
validate_embedding_request,
validate_item,
validate_messages,
validate_request_body,
)


# from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
# from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage
# from langchain.schema.messages import BaseMessage


###############################################################################
# ENVIRONMENT CREDENTIALS
###############################################################################
Expand Down Expand Up @@ -108,9 +110,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No
# ----------------------------------------------------------------------
request_body = get_request_body(event=event)
validate_request_body(request_body=request_body)
end_point, model, messages, input_text, temperature, max_tokens = parse_request(
request_body
)
end_point, model, messages, input_text, temperature, max_tokens = parse_request(request_body)
validate_messages(request_body=request_body)
request_meta_data = {
"request_meta_data": {
Expand Down Expand Up @@ -138,18 +138,12 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No
item_type="ChatCompletion models",
)
validate_completion_request(request_body)
system_message = get_content_for_role(
messages, OpenAIMessageKeys.OPENAI_SYSTEM_MESSAGE_KEY
)
user_message = get_content_for_role(
messages, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY
)
system_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_SYSTEM_MESSAGE_KEY)
user_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY)

# 2. initialize the LangChain ChatOpenAI model
# -------------------------------------------------------------
llm = ChatOpenAI(
model=model, temperature=temperature, max_tokens=max_tokens
)
llm = ChatOpenAI(model=model, temperature=temperature, max_tokens=max_tokens)
prompt = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(system_message),
Expand All @@ -165,9 +159,7 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No
return_messages=True,
)
message_history = get_message_history(messages)
user_messages = get_messages_for_role(
message_history, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY
)
user_messages = get_messages_for_role(message_history, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY)
assistant_messages = get_messages_for_role(
message_history, OpenAIMessageKeys.OPENAI_ASSISTANT_MESSAGE_KEY
)
Expand Down Expand Up @@ -211,19 +203,15 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No
openai_results = openai.Moderation.create(input=input_text)

case OpenAIEndPoint.Models:
openai_results = (
openai.Model.retrieve(model) if model else openai.Model.list()
)
openai_results = openai.Model.retrieve(model) if model else openai.Model.list()

case OpenAIEndPoint.Audio:
raise NotImplementedError("Audio support is coming soon")

# handle anything that went wrong
except (openai.APIError, ValueError, TypeError, NotImplementedError) as e:
# 400 Bad Request
return http_response_factory(
status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)
)
return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e))
except (openai.OpenAIError, Exception) as e:
# 500 Internal Server Error
return http_response_factory(
Expand All @@ -232,6 +220,4 @@ def handler(event, context, api_key=None, organization=None, pinecone_api_key=No
)

# success!! return the results
return http_response_factory(
status_code=HTTP_RESPONSE_OK, body={**openai_results, **request_meta_data}
)
return http_response_factory(status_code=HTTP_RESPONSE_OK, body={**openai_results, **request_meta_data})
8 changes: 5 additions & 3 deletions api/terraform/python/lambda_langchain/tests/test_01.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@
"""
Test requests to the OpenAI API via Langchain using the Lambda Layer, 'genai'.
"""
import pytest
import os
from dotenv import load_dotenv, find_dotenv
from lambda_langchain.tests.init import get_event

import pytest
from dotenv import find_dotenv, load_dotenv
from lambda_langchain.lambda_handler import handler
from lambda_langchain.tests.init import get_event


# Load environment variables from .env file in all folders
dotenv_path = find_dotenv()
Expand Down
38 changes: 10 additions & 28 deletions api/terraform/python/lambda_openai/lambda_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,14 @@

import base64
import json # library for interacting with JSON data https://www.json.org/json-en.html
import openai
import os # library for interacting with the operating system
import platform # library to view informatoin about the server host this Lambda runs on
import sys # libraries for error management
import traceback # libraries for error management

import openai


DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t")
HTTP_RESPONSE_OK = 200
HTTP_RESPONSE_BAD_REQUEST = 400
Expand Down Expand Up @@ -108,11 +110,7 @@ def http_response_factory(status_code: int, body) -> dict:
see https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html
"""
if status_code < 100 or status_code > 599:
raise ValueError(
"Invalid HTTP response code received: {status_code}".format(
status_code=status_code
)
)
raise ValueError("Invalid HTTP response code received: {status_code}".format(status_code=status_code))

retval = {
"isBase64Encoded": False,
Expand Down Expand Up @@ -165,21 +163,11 @@ def validate_messages(request_body):
raise ValueError("dict key 'messages' should be a JSON list")
for message in messages:
if type(message) is not dict:
raise ValueError(
"invalid ojbect type {t} found in messages list".format(t=type(message))
)
raise ValueError("invalid ojbect type {t} found in messages list".format(t=type(message)))
if "role" not in message:
raise ValueError(
"dict key 'role' not found in message {m}".format(
m=json.dumps(message, indent=4)
)
)
raise ValueError("dict key 'role' not found in message {m}".format(m=json.dumps(message, indent=4)))
if "content" not in message:
raise ValueError(
"dict key 'content' not found in message {m}".format(
m=json.dumps(message, indent=4)
)
)
raise ValueError("dict key 'content' not found in message {m}".format(m=json.dumps(message, indent=4)))


def validate_completion_request(request_body) -> None:
Expand Down Expand Up @@ -294,9 +282,7 @@ def handler(event, context):
item_type="ChatCompletion models",
)
validate_completion_request(request_body)
openai_results = openai.ChatCompletion.create(
model=model, messages=messages
)
openai_results = openai.ChatCompletion.create(model=model, messages=messages)

case OpenAIEndPoint.Embedding:
# https://platform.openai.com/docs/guides/embeddings/embeddings
Expand All @@ -319,19 +305,15 @@ def handler(event, context):
openai_results = openai.Moderation.create(input=input_text)

case OpenAIEndPoint.Models:
openai_results = (
openai.Model.retrieve(model) if model else openai.Model.list()
)
openai_results = openai.Model.retrieve(model) if model else openai.Model.list()

case OpenAIEndPoint.Audio:
raise NotImplementedError("Audio support is coming soon")

# handle anything that went wrong
except (openai.APIError, ValueError, TypeError, NotImplementedError) as e:
# 400 Bad Request
return http_response_factory(
status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)
)
return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e))
except (openai.OpenAIError, Exception) as e:
# 500 Internal Server Error
return http_response_factory(
Expand Down
6 changes: 4 additions & 2 deletions api/terraform/python/lambda_openai/tests/test_init.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
"""Shared code for testing the lambda function"""
from dotenv import load_dotenv, find_dotenv
import os
import json
import os

from dotenv import find_dotenv, load_dotenv
from lambda_openai.lambda_handler import handler


# Load environment variables from .env file in all folders
dotenv_path = find_dotenv()
if os.path.exists(dotenv_path):
Expand Down
23 changes: 9 additions & 14 deletions api/terraform/python/lambda_openai_v2/lambda_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,26 +31,27 @@
# -----------------------
import openai
from openai_utils.const import (
OpenAIEndPoint,
HTTP_RESPONSE_OK,
HTTP_RESPONSE_BAD_REQUEST,
HTTP_RESPONSE_INTERNAL_SERVER_ERROR,
HTTP_RESPONSE_OK,
VALID_CHAT_COMPLETION_MODELS,
VALID_EMBEDDING_MODELS,
OpenAIEndPoint,
)
from openai_utils.utils import (
http_response_factory,
exception_response_factory,
dump_environment,
exception_response_factory,
get_request_body,
http_response_factory,
parse_request,
)
from openai_utils.validators import (
validate_item,
validate_completion_request,
validate_embedding_request,
validate_item,
)


DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "t")

# https://platform.openai.com/api_keys
Expand All @@ -71,9 +72,7 @@ def handler(event, context):
try:
openai_results = {}
request_body = get_request_body(event=event)
end_point, model, messages, input_text, temperature, max_tokens = parse_request(
request_body
)
end_point, model, messages, input_text, temperature, max_tokens = parse_request(request_body)
request_meta_data = {
"request_meta_data": {
"lambda": "lambda_openai_v2",
Expand Down Expand Up @@ -121,19 +120,15 @@ def handler(event, context):
openai_results = openai.Moderation.create(input=input_text)

case OpenAIEndPoint.Models:
openai_results = (
openai.Model.retrieve(model) if model else openai.Model.list()
)
openai_results = openai.Model.retrieve(model) if model else openai.Model.list()

case OpenAIEndPoint.Audio:
raise NotImplementedError("Audio support is coming soon")

# handle anything that went wrong
except (openai.APIError, ValueError, TypeError, NotImplementedError) as e:
# 400 Bad Request
return http_response_factory(
status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e)
)
return http_response_factory(status_code=HTTP_RESPONSE_BAD_REQUEST, body=exception_response_factory(e))
except (openai.OpenAIError, Exception) as e:
# 500 Internal Server Error
return http_response_factory(
Expand Down
4 changes: 3 additions & 1 deletion api/terraform/python/lambda_openai_v2/tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from dotenv import load_dotenv, find_dotenv
import os

from dotenv import find_dotenv, load_dotenv


# Load environment variables from .env file in all folders
dotenv_path = find_dotenv()
if os.path.exists(dotenv_path):
Expand Down
8 changes: 5 additions & 3 deletions api/terraform/python/lambda_openai_v2/tests/test_01.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@
"""
Test requests to the OpenAI API using the Lambda Layer, 'genai'.
"""
import pytest
import os
from dotenv import load_dotenv, find_dotenv
from lambda_openai_v2.tests.init import get_event

import pytest
from dotenv import find_dotenv, load_dotenv
from lambda_openai_v2.lambda_handler import handler
from lambda_openai_v2.tests.init import get_event


# Load environment variables from .env file in all folders
dotenv_path = find_dotenv()
Expand Down
2 changes: 2 additions & 0 deletions api/terraform/python/layer_genai/openai_utils/const.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
# -*- coding: utf-8 -*-
"""A module containing constants for the OpenAI API."""
import os

import openai


HTTP_RESPONSE_OK = 200
HTTP_RESPONSE_BAD_REQUEST = 400
HTTP_RESPONSE_INTERNAL_SERVER_ERROR = 500
Expand Down
4 changes: 2 additions & 2 deletions api/terraform/python/layer_genai/openai_utils/setup.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
"""Setup for openai_utils package."""
from setuptools import setup, find_packages

from setup_utils import get_semantic_version
from setuptools import find_packages, setup


setup(
name="openai_utils",
Expand Down
Loading

0 comments on commit b380d32

Please sign in to comment.