Skip to content

Commit

Permalink
refactor: optimize imports and reformat with black
Browse files Browse the repository at this point in the history
  • Loading branch information
Baptiste O'Jeanson authored and bojeanson committed Jul 6, 2023
1 parent 23bfa8d commit f2d8075
Show file tree
Hide file tree
Showing 107 changed files with 2,540 additions and 1,608 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci_edge_model_serving.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ jobs:
run: make install_tflite_dependencies_linux
working-directory: ./edge_model_serving

- name: Lint with flake8
- name: Lint with flake8 and black
run: make lint
working-directory: ./edge_model_serving

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci_edge_orchestrator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ jobs:
run: make install
working-directory: ./edge_orchestrator

- name: Lint with flake8
- name: Lint with flake8 and black
run: make lint
working-directory: ./edge_orchestrator

Expand Down
3 changes: 1 addition & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
__pycache__/
*.py[cod]
*$py.class
/venv-torch
.vscode
*.DS_Store

Expand Down Expand Up @@ -131,7 +130,7 @@ celerybeat.pid
.env
.venv
env/
venv/
venv*
ENV/
*.bak/

Expand Down
1 change: 1 addition & 0 deletions edge_model_serving/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ torch_env:
.PHONY: lint ## 🐍 Lint Python files to conform to the PEP 8 style guide
lint:
flake8 --count --show-source --statistics
black . --check

.PHONY: autopep8 ## 🐍 Automatically formats Python code to conform to the PEP 8 style guide
autopep8:
Expand Down
17 changes: 10 additions & 7 deletions edge_model_serving/tflite_serving/convert_pb_to_tflite.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,22 @@
import tensorflow as tf
import os

model_name = 'mask_classification_model'
saved_model_dir = f'model_serving/{model_name}/1'
import tensorflow as tf

model_name = "mask_classification_model"
saved_model_dir = f"model_serving/{model_name}/1"

# Converting a SavedModel to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) # path to the SavedModel directory
converter = tf.lite.TFLiteConverter.from_saved_model(
saved_model_dir
) # path to the SavedModel directory
tflite_model = converter.convert()

tflite_model_dir = f'tflite_serving/{model_name}'
tflite_model_dir = f"tflite_serving/{model_name}"
if not os.path.exists(tflite_model_dir):
os.mkdir(tflite_model_dir)

# Save the model.
with open(f'{tflite_model_dir}/model.tflite', 'wb') as f:
with open(f"{tflite_model_dir}/model.tflite", "wb") as f:
f.write(tflite_model)

# reduce the size of a floating point model by quantizing the weights to float16
Expand All @@ -23,5 +26,5 @@
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()

with open(f'{tflite_model_dir}/model_quant.tflite', 'wb') as f:
with open(f"{tflite_model_dir}/model_quant.tflite", "wb") as f:
f.write(tflite_quant_model)
3 changes: 2 additions & 1 deletion edge_model_serving/tflite_serving/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ dependencies = [
"fastapi==0.80.0",
"numpy==1.21.6",
"Pillow==8.4.0",
"uvicorn==0.11.7",
"uvicorn==0.22.0",
]
requires-python = ">=3.7"

Expand All @@ -23,6 +23,7 @@ where = ["src/"]

[project.optional-dependencies]
dev = [
"black==23.3.0",
"flake8==5.0.4",
"pytest==7.2.2",
"pytest-cov==4.0.0",
Expand Down
43 changes: 22 additions & 21 deletions edge_model_serving/tflite_serving/src/tflite_serving/api_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,23 +25,23 @@ async def get_models(request: Request):
async def get_model_metadata(model_name: str, model_version: str, request: Request):
interpreter = request.app.state.model_interpreters[model_name]
input_details = interpreter.get_input_details()
return {
'inputs_shape': input_details[0]['shape'].tolist()
}
return {"inputs_shape": input_details[0]["shape"].tolist()}


@api_router.post('/models/{model_name}/versions/{model_version}:predict')
async def predict(model_name: str, model_version: str, payload: JSONStructure, request: Request):
@api_router.post("/models/{model_name}/versions/{model_version}:predict")
async def predict(
model_name: str, model_version: str, payload: JSONStructure, request: Request
):
interpreter = request.app.state.model_interpreters[model_name]
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

input_dtype = input_details[0]["dtype"]
logging.info(f'interpreting with {model_name} for input type {input_dtype}')
logging.warning(f'output details: {output_details}')
logging.info(f"interpreting with {model_name} for input type {input_dtype}")
logging.warning(f"output details: {output_details}")

try:
input_data = payload[b'inputs']
input_data = payload[b"inputs"]
input_array = np.array(input_data, dtype=input_dtype)

interpreter.set_tensor(input_details[0]["index"], input_array)
Expand All @@ -52,29 +52,30 @@ async def predict(model_name: str, model_version: str, payload: JSONStructure, r

if len(output_details) >= 3:
boxes = interpreter.get_tensor(output_details[0]["index"])
classes = interpreter.get_tensor(
output_details[1]["index"]).astype(int) + 1
classes = interpreter.get_tensor(output_details[1]["index"]).astype(int) + 1
scores = interpreter.get_tensor(output_details[2]["index"])

logging.warning(
f'interpreting with {model_name} for input type {input_dtype}')
logging.warning(f'Boxes of object detected: {boxes[0]}')
logging.warning(f'Classes of object detected: {classes[0]}')
logging.warning(f'Scores of object detected: {scores[0]}')
f"interpreting with {model_name} for input type {input_dtype}"
)
logging.warning(f"Boxes of object detected: {boxes[0]}")
logging.warning(f"Classes of object detected: {classes[0]}")
logging.warning(f"Scores of object detected: {scores[0]}")

prediction = {
'outputs': {
'detection_boxes': boxes.tolist(),
'detection_classes': classes.tolist(),
'detection_scores': scores.tolist()
"outputs": {
"detection_boxes": boxes.tolist(),
"detection_classes": classes.tolist(),
"detection_scores": scores.tolist(),
}
}
elif len(output_details) == 1:
scores = interpreter.get_tensor(output_details[0]["index"])
logging.warning(
f'interpreting with {model_name} for input type {input_dtype}')
logging.warning(f'Scores of classification: {scores[0]}')
prediction = {'outputs': scores.tolist()}
f"interpreting with {model_name} for input type {input_dtype}"
)
logging.warning(f"Scores of classification: {scores[0]}")
prediction = {"outputs": scores.tolist()}

return prediction

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,14 @@ def create_interpreter(model_path: str) -> Interpreter:

def create_model_interpreters() -> Dict[str, Interpreter]:
model_interpreters = {}
models_path = Path(os.getenv("MODELS_PATH")) if os.getenv("MODELS_PATH") else Path.cwd().parent
tflite_model_path = models_path / 'tflite'
for model_path in tflite_model_path.glob('**/*.tflite'):
model_interpreters[model_path.parent.name] = create_interpreter(model_path.as_posix())
models_path = (
Path(os.getenv("MODELS_PATH"))
if os.getenv("MODELS_PATH")
else Path.cwd().parent
)
tflite_model_path = models_path / "tflite"
for model_path in tflite_model_path.glob("**/*.tflite"):
model_interpreters[model_path.parent.name] = create_interpreter(
model_path.as_posix()
)
return model_interpreters
33 changes: 19 additions & 14 deletions edge_model_serving/tflite_serving/tests/test_tflite_serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@


class TestTfliteServing:
base_url = 'http://localhost:8501/v1'
image_test_path = 'tests/data/mask_people_dataset/person_without_mask.jpg'
binary_test = open(image_test_path, 'rb')
base_url = "http://localhost:8501/v1"
image_test_path = "tests/data/mask_people_dataset/person_without_mask.jpg"
binary_test = open(image_test_path, "rb")
os.environ["MODELS_PATH"] = (Path.cwd().parent / "models").as_posix()
from tflite_serving.tflite_server import app

test_client = TestClient(app)

def test_get_home_should_return_link_to_docs(self):
Expand All @@ -26,11 +27,13 @@ def test_get_home_should_return_link_to_docs(self):

def test_get_models_should_return_4_models(self):
# Given
model_url = f'{self.base_url}/models'
expected_models = ["cellphone_connection_control",
"marker_quality_control",
"mobilenet_ssd_v2_coco",
"mobilenet_ssd_v2_face"]
model_url = f"{self.base_url}/models"
expected_models = [
"cellphone_connection_control",
"marker_quality_control",
"mobilenet_ssd_v2_coco",
"mobilenet_ssd_v2_face",
]

# When
actual_response = self.test_client.get(model_url)
Expand All @@ -41,7 +44,9 @@ def test_get_models_should_return_4_models(self):

def test_get_model_resolution_should_return_inputs_shape(self):
# Given
model_url = f'{self.base_url}/models/cellphone_connection_control/versions/1/resolution'
model_url = (
f"{self.base_url}/models/cellphone_connection_control/versions/1/resolution"
)
expected_resolution = {"inputs_shape": [1, 224, 224, 3]}

# When
Expand All @@ -53,12 +58,12 @@ def test_get_model_resolution_should_return_inputs_shape(self):

def test_serving_return_object_detection_prediction(self):
# Given
model_url = f'{self.base_url}/models/mobilenet_ssd_v2_coco/versions/1:predict'
model_url = f"{self.base_url}/models/mobilenet_ssd_v2_coco/versions/1:predict"

image_resolution = (300, 300, 3)
fake_img_array = np.zeros(image_resolution)
fake_img_preprocessed = np.expand_dims(fake_img_array, axis=0).astype(np.uint8)
payload = {'inputs': fake_img_preprocessed.tolist()}
payload = {"inputs": fake_img_preprocessed.tolist()}

# When
actual_response = self.test_client.post(model_url, json=payload)
Expand All @@ -75,14 +80,14 @@ def test_serving_return_object_detection_prediction(self):

def test_serving_return_classification_prediction(self):
# Given
model_url = f'{self.base_url}/models/marker_quality_control/versions/1:predict'
model_url = f"{self.base_url}/models/marker_quality_control/versions/1:predict"

image_resolution = (224, 224, 3)
fake_img_array = np.zeros(image_resolution)
fake_img_preprocessed = np.expand_dims(fake_img_array, axis=0).astype(np.uint8)
payload = {'inputs': fake_img_preprocessed.tolist()}
payload = {"inputs": fake_img_preprocessed.tolist()}

expected_prediction = {'outputs': [[0.021249305456876755, 0.9787507057189941]]}
expected_prediction = {"outputs": [[0.021249305456876755, 0.9787507057189941]]}

# When
actual_response = self.test_client.post(model_url, json=payload)
Expand Down
4 changes: 2 additions & 2 deletions edge_model_serving/torch_serving/model.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from torchvision.models.detection.faster_rcnn import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.faster_rcnn import FasterRCNN


class FRCNNObjectDetector(FasterRCNN):
def __init__(self, num_classes=91, **kwargs):
backbone = resnet_fpn_backbone('resnet50', True)
backbone = resnet_fpn_backbone("resnet50", True)
super(FRCNNObjectDetector, self).__init__(backbone, num_classes, **kwargs)
3 changes: 2 additions & 1 deletion edge_orchestrator/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ help:

.PHONY: lint ## 🐍 Lint Python files to conform to the PEP 8 style guide
lint:
flake8 --count --show-source --statistics --exclude venv*
flake8 --count --show-source --statistics
black . --check

.PHONY: autopep8 ## 🐍 Automatically formats Python code to conform to the PEP 8 style guide
autopep8:
Expand Down
4 changes: 2 additions & 2 deletions edge_orchestrator/edge_orchestrator/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
disable_existing_loggers=False,
defaults={
"edge_orchestrator_level": "INFO",
"edge_orchestrator_formatter": "classic"
}
"edge_orchestrator_formatter": "classic",
},
)

logger = logging.getLogger("edge_orchestrator")
2 changes: 1 addition & 1 deletion edge_orchestrator/edge_orchestrator/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@

from edge_orchestrator.application.server import server

if __name__ == '__main__':
if __name__ == "__main__":
orchestrator_app = server()
uvicorn.run(orchestrator_app, host="0.0.0.0", port=8000, log_level="info")
51 changes: 36 additions & 15 deletions edge_orchestrator/edge_orchestrator/api_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,51 @@


def load_config():
configuration = os.environ.get('API_CONFIG', 'default')
logger.info(f'App running with configuration: {configuration}')

available_configurations = ['test', 'docker', 'default', 'edge', 'edge-lite', 'upload-gcp']
configuration = os.environ.get("API_CONFIG", "default")
logger.info(f"App running with configuration: {configuration}")

available_configurations = [
"test",
"docker",
"default",
"edge",
"edge-lite",
"upload-gcp",
]
if configuration not in available_configurations:
raise ValueError(f"Unknown configuration '{configuration}'. "
f'Valid configurations are {available_configurations}.')
elif configuration == 'test':
raise ValueError(
f"Unknown configuration '{configuration}'. "
f"Valid configurations are {available_configurations}."
)
elif configuration == "test":
from edge_orchestrator.environment.test import Test

configuration_class = Test
elif configuration == 'docker':
elif configuration == "docker":
from edge_orchestrator.environment.docker import Docker

configuration_class = Docker
elif configuration == 'default':
elif configuration == "default":
from edge_orchestrator.environment.default import Default

configuration_class = Default
elif configuration == 'edge':
from edge_orchestrator.environment.edge_with_mongo_db_metadata_storage import EdgeWithMongoDbMetadataStorage
elif configuration == "edge":
from edge_orchestrator.environment.edge_with_mongo_db_metadata_storage import (
EdgeWithMongoDbMetadataStorage,
)

configuration_class = EdgeWithMongoDbMetadataStorage
elif configuration == 'edge-lite':
from edge_orchestrator.environment.edge_with_azure_container_storage import EdgeWithAzureContainerStorage
elif configuration == "edge-lite":
from edge_orchestrator.environment.edge_with_azure_container_storage import (
EdgeWithAzureContainerStorage,
)

configuration_class = EdgeWithAzureContainerStorage
elif configuration == 'upload-gcp':
from edge_orchestrator.environment.upload_with_gcp_bucket import UploadWithGCPBucket
elif configuration == "upload-gcp":
from edge_orchestrator.environment.upload_with_gcp_bucket import (
UploadWithGCPBucket,
)

configuration_class = UploadWithGCPBucket

return configuration_class()
Expand Down
Loading

0 comments on commit f2d8075

Please sign in to comment.