From f2d8075a9d54248e6d237c2b0df6ae7e5bcc7579 Mon Sep 17 00:00:00 2001 From: Baptiste O'Jeanson <bojeanson@octo.com> Date: Fri, 30 Jun 2023 19:27:49 +0200 Subject: [PATCH] refactor: optimize imports and reformat with black --- .github/workflows/ci_edge_model_serving.yml | 2 +- .github/workflows/ci_edge_orchestrator.yml | 2 +- .gitignore | 3 +- edge_model_serving/Makefile | 1 + .../tflite_serving/convert_pb_to_tflite.py | 17 +- .../tflite_serving/pyproject.toml | 3 +- .../src/tflite_serving/api_routes.py | 43 ++-- .../src/tflite_serving/tflite_interpreter.py | 14 +- .../tests/test_tflite_serving.py | 33 +-- edge_model_serving/torch_serving/model.py | 4 +- edge_orchestrator/Makefile | 3 +- .../edge_orchestrator/__init__.py | 4 +- .../edge_orchestrator/__main__.py | 2 +- .../edge_orchestrator/api_config.py | 51 ++-- .../application/api_routes.py | 59 +++-- .../application/trigger_routes.py | 28 ++- .../expected_label_rule.py | 3 +- .../max_nb_objects_rule.py | 3 +- .../min_nb_objects_rule.py | 3 +- .../models/business_rules/camera_rule.py | 3 +- .../item_threshold_ratio_rule.py | 9 +- .../item_business_rule/item_threshold_rule.py | 9 +- .../domain/models/business_rules/item_rule.py | 3 +- .../edge_orchestrator/domain/models/camera.py | 23 +- .../domain/models/decision.py | 6 +- .../domain/models/edge_station.py | 6 +- .../edge_orchestrator/domain/models/item.py | 57 +++-- .../domain/models/model_infos.py | 129 +++++----- .../domain/ports/binary_storage.py | 1 - .../domain/ports/inventory.py | 2 +- .../domain/ports/metadata_storage.py | 3 +- .../domain/ports/model_forward.py | 11 +- .../domain/ports/station_config.py | 3 +- .../domain/ports/telemetry_sink.py | 1 - .../domain/use_cases/supervisor.py | 206 +++++++++------ .../domain/use_cases/uploader.py | 27 +- .../edge_orchestrator/environment/default.py | 35 ++- .../edge_orchestrator/environment/docker.py | 46 ++-- .../edge_with_azure_container_storage.py | 41 ++- .../edge_with_filesystem_metadata_storage.py | 47 +++- .../edge_with_mongo_db_metadata_storage.py | 45 +++- .../edge_orchestrator/environment/test.py | 49 ++-- .../environment/upload_with_gcp_bucket.py | 39 ++- .../azure_container_binary_storage.py | 38 ++- .../filesystem_binary_storage.py | 9 +- .../binary_storage/gcp_binary_storage.py | 7 +- .../binary_storage/memory_binary_storage.py | 1 - .../infrastructure/camera/fake_camera.py | 9 +- .../camera/raspberry_pi_camera.py | 7 +- .../infrastructure/camera/usb_camera.py | 13 +- .../inventory/json_inventory.py | 21 +- .../azure_container_metadata_storage.py | 39 ++- .../filesystem_metadata_storage.py | 10 +- .../metadata_storage/gcp_metadata_storage.py | 11 +- .../mongodb_metadata_storage.py | 20 +- .../model_forward/fake_model_forward.py | 52 ++-- .../tf_serving_classification_wrapper.py | 44 ++-- ...ng_detection_and_classification_wrapper.py | 47 ++-- .../tf_serving_detection_wrapper.py | 53 ++-- .../model_forward/tf_serving_wrapper.py | 28 ++- .../station_config/json_station_config.py | 62 +++-- .../azure_iot_hub_telemetry_sink.py | 6 +- .../telemetry_sink/fake_telemetry_sink.py | 1 - .../postgresql_telemetry_sink.py | 61 +++-- edge_orchestrator/setup.cfg | 3 +- edge_orchestrator/setup.py | 87 +++---- edge_orchestrator/tests/conftest.py | 39 +-- .../tests/data/fake_item/inputs.json | 6 +- .../tests/data/item_1/inputs.json | 6 +- .../tests/data/item_2/inputs.json | 4 +- edge_orchestrator/tests/fixtures/binaries.py | 57 +++-- .../tests/fixtures/cameras_metadata.py | 29 ++- .../tests/fixtures/containers.py | 129 ++++++---- edge_orchestrator/tests/fixtures/items.py | 70 ++++-- .../tests/fixtures/items_config.py | 238 +++++++++--------- edge_orchestrator/tests/fixtures/metadata.py | 17 +- .../tests/functional_tests/environment.py | 57 +++-- .../functional_tests/steps/common_steps.py | 75 ++++-- .../steps/supervisor_configs_routes.py | 9 +- .../steps/supervisor_inventory_route.py | 2 +- .../steps/supervisor_items_routes.py | 26 +- .../steps/supervisor_trigger_route.py | 121 +++++---- .../supervisor_trigger_route.feature | 4 +- .../integration_tests/application/conftest.py | 2 +- .../application/test_server.py | 45 ++-- .../test_mongodb_metadata_storage.py | 159 +++++++----- .../test_tf_serving_detection_wrapper.py | 46 +++- .../test_tflite_serving_detection_wrapper.py | 41 ++- .../test_json_station_config.py | 83 +++--- .../test_postgresql_telemetry_sink.py | 37 +-- edge_orchestrator/tests/tests_pyramid.py | 125 +++++---- .../tests/tf_serving_container.py | 17 +- .../test_camera_business_rules.py | 192 +++++++++----- .../test_item_business_rules.py | 16 +- .../domain/models/test_edge_station.py | 27 +- .../unit_tests/domain/models/test_item.py | 8 +- .../unit_tests/domain/test_supervisor.py | 165 +++++++----- .../test_filesystem_binary_storage.py | 48 ++-- .../binary_storage/test_gcp_binary_storage.py | 29 +-- .../test_memory_binary_storage.py | 42 +++- .../infrastructure/camera/test_fake_camera.py | 6 +- .../test_filesystem_metadata_storage.py | 106 ++++---- .../test_memory_item_storage.py | 120 +++++---- .../test_fake_model_forwarder.py | 98 +++++--- .../test_tf_serving_classification_wrapper.py | 3 +- ...ng_detection_and_classification_wrapper.py | 71 +++--- .../test_tf_serving_detection_wrapper.py | 65 +++-- 107 files changed, 2540 insertions(+), 1608 deletions(-) diff --git a/.github/workflows/ci_edge_model_serving.yml b/.github/workflows/ci_edge_model_serving.yml index 3dc657da..4aad66e3 100644 --- a/.github/workflows/ci_edge_model_serving.yml +++ b/.github/workflows/ci_edge_model_serving.yml @@ -30,7 +30,7 @@ jobs: run: make install_tflite_dependencies_linux working-directory: ./edge_model_serving - - name: Lint with flake8 + - name: Lint with flake8 and black run: make lint working-directory: ./edge_model_serving diff --git a/.github/workflows/ci_edge_orchestrator.yml b/.github/workflows/ci_edge_orchestrator.yml index fe00ae27..fb582a65 100644 --- a/.github/workflows/ci_edge_orchestrator.yml +++ b/.github/workflows/ci_edge_orchestrator.yml @@ -58,7 +58,7 @@ jobs: run: make install working-directory: ./edge_orchestrator - - name: Lint with flake8 + - name: Lint with flake8 and black run: make lint working-directory: ./edge_orchestrator diff --git a/.gitignore b/.gitignore index 859d4ce0..fcc253ad 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,6 @@ __pycache__/ *.py[cod] *$py.class -/venv-torch .vscode *.DS_Store @@ -131,7 +130,7 @@ celerybeat.pid .env .venv env/ -venv/ +venv* ENV/ *.bak/ diff --git a/edge_model_serving/Makefile b/edge_model_serving/Makefile index ca9068c4..73650420 100644 --- a/edge_model_serving/Makefile +++ b/edge_model_serving/Makefile @@ -37,6 +37,7 @@ torch_env: .PHONY: lint ## 🐍 Lint Python files to conform to the PEP 8 style guide lint: flake8 --count --show-source --statistics + black . --check .PHONY: autopep8 ## 🐍 Automatically formats Python code to conform to the PEP 8 style guide autopep8: diff --git a/edge_model_serving/tflite_serving/convert_pb_to_tflite.py b/edge_model_serving/tflite_serving/convert_pb_to_tflite.py index a60cc834..304736f8 100644 --- a/edge_model_serving/tflite_serving/convert_pb_to_tflite.py +++ b/edge_model_serving/tflite_serving/convert_pb_to_tflite.py @@ -1,19 +1,22 @@ -import tensorflow as tf import os -model_name = 'mask_classification_model' -saved_model_dir = f'model_serving/{model_name}/1' +import tensorflow as tf + +model_name = "mask_classification_model" +saved_model_dir = f"model_serving/{model_name}/1" # Converting a SavedModel to a TensorFlow Lite model. -converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) # path to the SavedModel directory +converter = tf.lite.TFLiteConverter.from_saved_model( + saved_model_dir +) # path to the SavedModel directory tflite_model = converter.convert() -tflite_model_dir = f'tflite_serving/{model_name}' +tflite_model_dir = f"tflite_serving/{model_name}" if not os.path.exists(tflite_model_dir): os.mkdir(tflite_model_dir) # Save the model. -with open(f'{tflite_model_dir}/model.tflite', 'wb') as f: +with open(f"{tflite_model_dir}/model.tflite", "wb") as f: f.write(tflite_model) # reduce the size of a floating point model by quantizing the weights to float16 @@ -23,5 +26,5 @@ converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() - with open(f'{tflite_model_dir}/model_quant.tflite', 'wb') as f: + with open(f"{tflite_model_dir}/model_quant.tflite", "wb") as f: f.write(tflite_quant_model) diff --git a/edge_model_serving/tflite_serving/pyproject.toml b/edge_model_serving/tflite_serving/pyproject.toml index 781107aa..271300ff 100644 --- a/edge_model_serving/tflite_serving/pyproject.toml +++ b/edge_model_serving/tflite_serving/pyproject.toml @@ -10,7 +10,7 @@ dependencies = [ "fastapi==0.80.0", "numpy==1.21.6", "Pillow==8.4.0", - "uvicorn==0.11.7", + "uvicorn==0.22.0", ] requires-python = ">=3.7" @@ -23,6 +23,7 @@ where = ["src/"] [project.optional-dependencies] dev = [ + "black==23.3.0", "flake8==5.0.4", "pytest==7.2.2", "pytest-cov==4.0.0", diff --git a/edge_model_serving/tflite_serving/src/tflite_serving/api_routes.py b/edge_model_serving/tflite_serving/src/tflite_serving/api_routes.py index efc706f6..c757a831 100644 --- a/edge_model_serving/tflite_serving/src/tflite_serving/api_routes.py +++ b/edge_model_serving/tflite_serving/src/tflite_serving/api_routes.py @@ -25,23 +25,23 @@ async def get_models(request: Request): async def get_model_metadata(model_name: str, model_version: str, request: Request): interpreter = request.app.state.model_interpreters[model_name] input_details = interpreter.get_input_details() - return { - 'inputs_shape': input_details[0]['shape'].tolist() - } + return {"inputs_shape": input_details[0]["shape"].tolist()} -@api_router.post('/models/{model_name}/versions/{model_version}:predict') -async def predict(model_name: str, model_version: str, payload: JSONStructure, request: Request): +@api_router.post("/models/{model_name}/versions/{model_version}:predict") +async def predict( + model_name: str, model_version: str, payload: JSONStructure, request: Request +): interpreter = request.app.state.model_interpreters[model_name] input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_dtype = input_details[0]["dtype"] - logging.info(f'interpreting with {model_name} for input type {input_dtype}') - logging.warning(f'output details: {output_details}') + logging.info(f"interpreting with {model_name} for input type {input_dtype}") + logging.warning(f"output details: {output_details}") try: - input_data = payload[b'inputs'] + input_data = payload[b"inputs"] input_array = np.array(input_data, dtype=input_dtype) interpreter.set_tensor(input_details[0]["index"], input_array) @@ -52,29 +52,30 @@ async def predict(model_name: str, model_version: str, payload: JSONStructure, r if len(output_details) >= 3: boxes = interpreter.get_tensor(output_details[0]["index"]) - classes = interpreter.get_tensor( - output_details[1]["index"]).astype(int) + 1 + classes = interpreter.get_tensor(output_details[1]["index"]).astype(int) + 1 scores = interpreter.get_tensor(output_details[2]["index"]) logging.warning( - f'interpreting with {model_name} for input type {input_dtype}') - logging.warning(f'Boxes of object detected: {boxes[0]}') - logging.warning(f'Classes of object detected: {classes[0]}') - logging.warning(f'Scores of object detected: {scores[0]}') + f"interpreting with {model_name} for input type {input_dtype}" + ) + logging.warning(f"Boxes of object detected: {boxes[0]}") + logging.warning(f"Classes of object detected: {classes[0]}") + logging.warning(f"Scores of object detected: {scores[0]}") prediction = { - 'outputs': { - 'detection_boxes': boxes.tolist(), - 'detection_classes': classes.tolist(), - 'detection_scores': scores.tolist() + "outputs": { + "detection_boxes": boxes.tolist(), + "detection_classes": classes.tolist(), + "detection_scores": scores.tolist(), } } elif len(output_details) == 1: scores = interpreter.get_tensor(output_details[0]["index"]) logging.warning( - f'interpreting with {model_name} for input type {input_dtype}') - logging.warning(f'Scores of classification: {scores[0]}') - prediction = {'outputs': scores.tolist()} + f"interpreting with {model_name} for input type {input_dtype}" + ) + logging.warning(f"Scores of classification: {scores[0]}") + prediction = {"outputs": scores.tolist()} return prediction diff --git a/edge_model_serving/tflite_serving/src/tflite_serving/tflite_interpreter.py b/edge_model_serving/tflite_serving/src/tflite_serving/tflite_interpreter.py index 8d415456..ecefea84 100644 --- a/edge_model_serving/tflite_serving/src/tflite_serving/tflite_interpreter.py +++ b/edge_model_serving/tflite_serving/src/tflite_serving/tflite_interpreter.py @@ -13,8 +13,14 @@ def create_interpreter(model_path: str) -> Interpreter: def create_model_interpreters() -> Dict[str, Interpreter]: model_interpreters = {} - models_path = Path(os.getenv("MODELS_PATH")) if os.getenv("MODELS_PATH") else Path.cwd().parent - tflite_model_path = models_path / 'tflite' - for model_path in tflite_model_path.glob('**/*.tflite'): - model_interpreters[model_path.parent.name] = create_interpreter(model_path.as_posix()) + models_path = ( + Path(os.getenv("MODELS_PATH")) + if os.getenv("MODELS_PATH") + else Path.cwd().parent + ) + tflite_model_path = models_path / "tflite" + for model_path in tflite_model_path.glob("**/*.tflite"): + model_interpreters[model_path.parent.name] = create_interpreter( + model_path.as_posix() + ) return model_interpreters diff --git a/edge_model_serving/tflite_serving/tests/test_tflite_serving.py b/edge_model_serving/tflite_serving/tests/test_tflite_serving.py index f0560932..2108f199 100644 --- a/edge_model_serving/tflite_serving/tests/test_tflite_serving.py +++ b/edge_model_serving/tflite_serving/tests/test_tflite_serving.py @@ -6,11 +6,12 @@ class TestTfliteServing: - base_url = 'http://localhost:8501/v1' - image_test_path = 'tests/data/mask_people_dataset/person_without_mask.jpg' - binary_test = open(image_test_path, 'rb') + base_url = "http://localhost:8501/v1" + image_test_path = "tests/data/mask_people_dataset/person_without_mask.jpg" + binary_test = open(image_test_path, "rb") os.environ["MODELS_PATH"] = (Path.cwd().parent / "models").as_posix() from tflite_serving.tflite_server import app + test_client = TestClient(app) def test_get_home_should_return_link_to_docs(self): @@ -26,11 +27,13 @@ def test_get_home_should_return_link_to_docs(self): def test_get_models_should_return_4_models(self): # Given - model_url = f'{self.base_url}/models' - expected_models = ["cellphone_connection_control", - "marker_quality_control", - "mobilenet_ssd_v2_coco", - "mobilenet_ssd_v2_face"] + model_url = f"{self.base_url}/models" + expected_models = [ + "cellphone_connection_control", + "marker_quality_control", + "mobilenet_ssd_v2_coco", + "mobilenet_ssd_v2_face", + ] # When actual_response = self.test_client.get(model_url) @@ -41,7 +44,9 @@ def test_get_models_should_return_4_models(self): def test_get_model_resolution_should_return_inputs_shape(self): # Given - model_url = f'{self.base_url}/models/cellphone_connection_control/versions/1/resolution' + model_url = ( + f"{self.base_url}/models/cellphone_connection_control/versions/1/resolution" + ) expected_resolution = {"inputs_shape": [1, 224, 224, 3]} # When @@ -53,12 +58,12 @@ def test_get_model_resolution_should_return_inputs_shape(self): def test_serving_return_object_detection_prediction(self): # Given - model_url = f'{self.base_url}/models/mobilenet_ssd_v2_coco/versions/1:predict' + model_url = f"{self.base_url}/models/mobilenet_ssd_v2_coco/versions/1:predict" image_resolution = (300, 300, 3) fake_img_array = np.zeros(image_resolution) fake_img_preprocessed = np.expand_dims(fake_img_array, axis=0).astype(np.uint8) - payload = {'inputs': fake_img_preprocessed.tolist()} + payload = {"inputs": fake_img_preprocessed.tolist()} # When actual_response = self.test_client.post(model_url, json=payload) @@ -75,14 +80,14 @@ def test_serving_return_object_detection_prediction(self): def test_serving_return_classification_prediction(self): # Given - model_url = f'{self.base_url}/models/marker_quality_control/versions/1:predict' + model_url = f"{self.base_url}/models/marker_quality_control/versions/1:predict" image_resolution = (224, 224, 3) fake_img_array = np.zeros(image_resolution) fake_img_preprocessed = np.expand_dims(fake_img_array, axis=0).astype(np.uint8) - payload = {'inputs': fake_img_preprocessed.tolist()} + payload = {"inputs": fake_img_preprocessed.tolist()} - expected_prediction = {'outputs': [[0.021249305456876755, 0.9787507057189941]]} + expected_prediction = {"outputs": [[0.021249305456876755, 0.9787507057189941]]} # When actual_response = self.test_client.post(model_url, json=payload) diff --git a/edge_model_serving/torch_serving/model.py b/edge_model_serving/torch_serving/model.py index 91c9742c..249e1d26 100644 --- a/edge_model_serving/torch_serving/model.py +++ b/edge_model_serving/torch_serving/model.py @@ -1,8 +1,8 @@ -from torchvision.models.detection.faster_rcnn import FasterRCNN from torchvision.models.detection.backbone_utils import resnet_fpn_backbone +from torchvision.models.detection.faster_rcnn import FasterRCNN class FRCNNObjectDetector(FasterRCNN): def __init__(self, num_classes=91, **kwargs): - backbone = resnet_fpn_backbone('resnet50', True) + backbone = resnet_fpn_backbone("resnet50", True) super(FRCNNObjectDetector, self).__init__(backbone, num_classes, **kwargs) diff --git a/edge_orchestrator/Makefile b/edge_orchestrator/Makefile index 5d67e677..9a29e5f6 100644 --- a/edge_orchestrator/Makefile +++ b/edge_orchestrator/Makefile @@ -17,7 +17,8 @@ help: .PHONY: lint ## 🐍 Lint Python files to conform to the PEP 8 style guide lint: - flake8 --count --show-source --statistics --exclude venv* + flake8 --count --show-source --statistics + black . --check .PHONY: autopep8 ## 🐍 Automatically formats Python code to conform to the PEP 8 style guide autopep8: diff --git a/edge_orchestrator/edge_orchestrator/__init__.py b/edge_orchestrator/edge_orchestrator/__init__.py index 70e97fc6..bd90e346 100644 --- a/edge_orchestrator/edge_orchestrator/__init__.py +++ b/edge_orchestrator/edge_orchestrator/__init__.py @@ -8,8 +8,8 @@ disable_existing_loggers=False, defaults={ "edge_orchestrator_level": "INFO", - "edge_orchestrator_formatter": "classic" - } + "edge_orchestrator_formatter": "classic", + }, ) logger = logging.getLogger("edge_orchestrator") diff --git a/edge_orchestrator/edge_orchestrator/__main__.py b/edge_orchestrator/edge_orchestrator/__main__.py index 1f38c1d5..eeb96acd 100644 --- a/edge_orchestrator/edge_orchestrator/__main__.py +++ b/edge_orchestrator/edge_orchestrator/__main__.py @@ -2,6 +2,6 @@ from edge_orchestrator.application.server import server -if __name__ == '__main__': +if __name__ == "__main__": orchestrator_app = server() uvicorn.run(orchestrator_app, host="0.0.0.0", port=8000, log_level="info") diff --git a/edge_orchestrator/edge_orchestrator/api_config.py b/edge_orchestrator/edge_orchestrator/api_config.py index ffc77b6d..7aa787ef 100644 --- a/edge_orchestrator/edge_orchestrator/api_config.py +++ b/edge_orchestrator/edge_orchestrator/api_config.py @@ -4,30 +4,51 @@ def load_config(): - configuration = os.environ.get('API_CONFIG', 'default') - logger.info(f'App running with configuration: {configuration}') - - available_configurations = ['test', 'docker', 'default', 'edge', 'edge-lite', 'upload-gcp'] + configuration = os.environ.get("API_CONFIG", "default") + logger.info(f"App running with configuration: {configuration}") + + available_configurations = [ + "test", + "docker", + "default", + "edge", + "edge-lite", + "upload-gcp", + ] if configuration not in available_configurations: - raise ValueError(f"Unknown configuration '{configuration}'. " - f'Valid configurations are {available_configurations}.') - elif configuration == 'test': + raise ValueError( + f"Unknown configuration '{configuration}'. " + f"Valid configurations are {available_configurations}." + ) + elif configuration == "test": from edge_orchestrator.environment.test import Test + configuration_class = Test - elif configuration == 'docker': + elif configuration == "docker": from edge_orchestrator.environment.docker import Docker + configuration_class = Docker - elif configuration == 'default': + elif configuration == "default": from edge_orchestrator.environment.default import Default + configuration_class = Default - elif configuration == 'edge': - from edge_orchestrator.environment.edge_with_mongo_db_metadata_storage import EdgeWithMongoDbMetadataStorage + elif configuration == "edge": + from edge_orchestrator.environment.edge_with_mongo_db_metadata_storage import ( + EdgeWithMongoDbMetadataStorage, + ) + configuration_class = EdgeWithMongoDbMetadataStorage - elif configuration == 'edge-lite': - from edge_orchestrator.environment.edge_with_azure_container_storage import EdgeWithAzureContainerStorage + elif configuration == "edge-lite": + from edge_orchestrator.environment.edge_with_azure_container_storage import ( + EdgeWithAzureContainerStorage, + ) + configuration_class = EdgeWithAzureContainerStorage - elif configuration == 'upload-gcp': - from edge_orchestrator.environment.upload_with_gcp_bucket import UploadWithGCPBucket + elif configuration == "upload-gcp": + from edge_orchestrator.environment.upload_with_gcp_bucket import ( + UploadWithGCPBucket, + ) + configuration_class = UploadWithGCPBucket return configuration_class() diff --git a/edge_orchestrator/edge_orchestrator/application/api_routes.py b/edge_orchestrator/edge_orchestrator/application/api_routes.py index d7aca593..842aba4d 100644 --- a/edge_orchestrator/edge_orchestrator/application/api_routes.py +++ b/edge_orchestrator/edge_orchestrator/application/api_routes.py @@ -3,7 +3,12 @@ from fastapi import APIRouter, Depends, Response from fastapi.responses import JSONResponse -from edge_orchestrator.api_config import get_metadata_storage, get_binary_storage, get_station_config, get_inventory +from edge_orchestrator.api_config import ( + get_binary_storage, + get_inventory, + get_metadata_storage, + get_station_config, +) from edge_orchestrator.application.dto.station_config_dto import StationConfigDto from edge_orchestrator.domain.ports.binary_storage import BinaryStorage from edge_orchestrator.domain.ports.inventory import Inventory @@ -13,57 +18,71 @@ api_router = APIRouter() -@api_router.get('/') +@api_router.get("/") def home(): - return 'the edge orchestrator is up and running' + return "the edge orchestrator is up and running" -@api_router.get('/items') +@api_router.get("/items") def read_all(metadata_storage: MetadataStorage = Depends(get_metadata_storage)): return metadata_storage.get_all_items_metadata() -@api_router.get('/items/{item_id}') -def get_item(item_id: str, metadata_storage: MetadataStorage = Depends(get_metadata_storage)): +@api_router.get("/items/{item_id}") +def get_item( + item_id: str, metadata_storage: MetadataStorage = Depends(get_metadata_storage) +): return metadata_storage.get_item_metadata(item_id) -@api_router.get('/items/{item_id}/binaries/{camera_id}') -def get_item_binary(item_id: str, camera_id: str, binary_storage: BinaryStorage = Depends(get_binary_storage)): +@api_router.get("/items/{item_id}/binaries/{camera_id}") +def get_item_binary( + item_id: str, + camera_id: str, + binary_storage: BinaryStorage = Depends(get_binary_storage), +): content_binary = binary_storage.get_item_binary(item_id, camera_id) - return Response(content=content_binary, status_code=HTTPStatus.OK, media_type='image/jpeg') + return Response( + content=content_binary, status_code=HTTPStatus.OK, media_type="image/jpeg" + ) -@api_router.get('/items/{item_id}/binaries') -def get_item_binaries(item_id: str, binary_storage: BinaryStorage = Depends(get_binary_storage)): +@api_router.get("/items/{item_id}/binaries") +def get_item_binaries( + item_id: str, binary_storage: BinaryStorage = Depends(get_binary_storage) +): return binary_storage.get_item_binaries(item_id) -@api_router.get('/items/{item_id}/state') -def get_item_state(item_id: str, metadata_storage: MetadataStorage = Depends(get_metadata_storage)): +@api_router.get("/items/{item_id}/state") +def get_item_state( + item_id: str, metadata_storage: MetadataStorage = Depends(get_metadata_storage) +): return metadata_storage.get_item_state(item_id) -@api_router.get('/inventory') +@api_router.get("/inventory") def get_inventory(inventory: Inventory = Depends(get_inventory)): return inventory -@api_router.get('/configs') +@api_router.get("/configs") def get_all_configs(station_config: StationConfig = Depends(get_station_config)): station_config.load() return station_config.all_configs -@api_router.get('/configs/active') +@api_router.get("/configs/active") def get_active_config(station_config: StationConfig = Depends(get_station_config)): return station_config.active_config -@api_router.post('/configs/active') -def set_station_config(station_config_dto: StationConfigDto, - station_config: StationConfig = Depends(get_station_config)): - if station_config_dto.config_name == '': +@api_router.post("/configs/active") +def set_station_config( + station_config_dto: StationConfigDto, + station_config: StationConfig = Depends(get_station_config), +): + if station_config_dto.config_name == "": return JSONResponse( status_code=403, content={"message": "No configuration selected!"}, diff --git a/edge_orchestrator/edge_orchestrator/application/trigger_routes.py b/edge_orchestrator/edge_orchestrator/application/trigger_routes.py index 4c4f2341..e59fc68d 100644 --- a/edge_orchestrator/edge_orchestrator/application/trigger_routes.py +++ b/edge_orchestrator/edge_orchestrator/application/trigger_routes.py @@ -1,5 +1,6 @@ -from fastapi import APIRouter, BackgroundTasks, UploadFile, File +from fastapi import APIRouter, BackgroundTasks, File, UploadFile from fastapi.responses import JSONResponse + from edge_orchestrator.domain.models.item import Item from edge_orchestrator.domain.use_cases.supervisor import Supervisor from edge_orchestrator.domain.use_cases.uploader import Uploader @@ -10,14 +11,18 @@ uploader = Uploader() -@trigger_router.post('/trigger') -async def trigger_job(image: UploadFile = None, background_tasks: BackgroundTasks = None): +@trigger_router.post("/trigger") +async def trigger_job( + image: UploadFile = None, background_tasks: BackgroundTasks = None +): item = Item.from_nothing() if supervisor.station_config.active_config is None: return JSONResponse( status_code=403, - content={"message": "No active configuration selected! " - "Set the active station configuration before triggering the inspection."}, + content={ + "message": "No active configuration selected! " + "Set the active station configuration before triggering the inspection." + }, ) else: if image: @@ -25,14 +30,15 @@ async def trigger_job(image: UploadFile = None, background_tasks: BackgroundTask camera_id = supervisor.station_config.get_cameras()[0] item.binaries = {camera_id: contents} background_tasks.add_task(supervisor.inspect, item) - return {'item_id': item.id} + return {"item_id": item.id} -@trigger_router.post('/upload') -async def upload_job(image: UploadFile = File(...), background_tasks: - BackgroundTasks = None): +@trigger_router.post("/upload") +async def upload_job( + image: UploadFile = File(...), background_tasks: BackgroundTasks = None +): item = Item.from_nothing() contents = image.file.read() - item.binaries = {'0': contents} + item.binaries = {"0": contents} background_tasks.add_task(uploader.upload, item) - return {'item_id': item.id} + return {"item_id": item.id} diff --git a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/expected_label_rule.py b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/expected_label_rule.py index df62602a..932770d5 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/expected_label_rule.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/expected_label_rule.py @@ -1,7 +1,7 @@ from typing import Dict, Union -from edge_orchestrator.domain.models.decision import Decision from edge_orchestrator.domain.models.business_rules.camera_rule import CameraRule +from edge_orchestrator.domain.models.decision import Decision class ExpectedLabelRule(CameraRule): @@ -9,7 +9,6 @@ def __init__(self, expected_label: str): self.expected_label = expected_label def get_camera_decision(self, inference: Dict[str, Union[str, Dict]]) -> Decision: - for inf in inference: if inf in self.expected_label: camera_decision = Decision.OK diff --git a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/max_nb_objects_rule.py b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/max_nb_objects_rule.py index b9184ecc..10784188 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/max_nb_objects_rule.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/max_nb_objects_rule.py @@ -1,7 +1,7 @@ from typing import Dict, Union -from edge_orchestrator.domain.models.decision import Decision from edge_orchestrator.domain.models.business_rules.camera_rule import CameraRule +from edge_orchestrator.domain.models.decision import Decision class MaxNbObjectsRule(CameraRule): @@ -10,7 +10,6 @@ def __init__(self, class_to_detect: str, max_threshold: int): self.max_threshold = max_threshold def get_camera_decision(self, inference: Dict[str, Union[str, Dict]]) -> Decision: - objects_of_interest = [obj for obj in inference if obj in self.class_to_detect] if len(objects_of_interest) < self.max_threshold: diff --git a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/min_nb_objects_rule.py b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/min_nb_objects_rule.py index 51dc6dd0..5c77b86c 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/min_nb_objects_rule.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_business_rules/min_nb_objects_rule.py @@ -1,7 +1,7 @@ from typing import Dict, Union -from edge_orchestrator.domain.models.decision import Decision from edge_orchestrator.domain.models.business_rules.camera_rule import CameraRule +from edge_orchestrator.domain.models.decision import Decision class MinNbObjectsRule(CameraRule): @@ -10,7 +10,6 @@ def __init__(self, class_to_detect: str, min_threshold: int): self.min_threshold = min_threshold def get_camera_decision(self, inference: Dict[str, Union[str, Dict]]) -> Decision: - objects_of_interest = [obj for obj in inference if obj in self.class_to_detect] if len(objects_of_interest) < self.min_threshold: diff --git a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_rule.py b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_rule.py index 49079113..98c4e5b5 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_rule.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/camera_rule.py @@ -1,11 +1,10 @@ +from abc import ABC, abstractmethod from typing import Dict, Union -from abc import ABC, abstractmethod from edge_orchestrator.domain.models.decision import Decision class CameraRule(ABC): - @abstractmethod def get_camera_decision(self, inference: Dict[str, Union[str, Dict]]) -> Decision: pass diff --git a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_ratio_rule.py b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_ratio_rule.py index e8c48724..dd90e11c 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_ratio_rule.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_ratio_rule.py @@ -1,7 +1,7 @@ from typing import Dict -from edge_orchestrator.domain.models.decision import Decision from edge_orchestrator.domain.models.business_rules.item_rule import ItemRule +from edge_orchestrator.domain.models.decision import Decision class ThresholdRatioRule(ItemRule): @@ -9,8 +9,11 @@ def __init__(self, min_threshold: float): self.min_threshold = min_threshold def get_item_decision(self, cameras_decisions: Dict[str, str]) -> Decision: - - ok_decisions = [decision for decision in cameras_decisions.values() if decision == Decision.OK.value] + ok_decisions = [ + decision + for decision in cameras_decisions.values() + if decision == Decision.OK.value + ] ratio_ok = len(ok_decisions) / len(cameras_decisions) diff --git a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_rule.py b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_rule.py index 15f2aa9a..26715633 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_rule.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_business_rule/item_threshold_rule.py @@ -1,7 +1,7 @@ from typing import Dict -from edge_orchestrator.domain.models.decision import Decision from edge_orchestrator.domain.models.business_rules.item_rule import ItemRule +from edge_orchestrator.domain.models.decision import Decision class ThresholdRule(ItemRule): @@ -9,8 +9,11 @@ def __init__(self, threshold: int): self.threshold = threshold def get_item_decision(self, cameras_decisions: Dict[str, str]) -> Decision: - - ko_decisions = [decision for decision in cameras_decisions.values() if decision == Decision.KO.value] + ko_decisions = [ + decision + for decision in cameras_decisions.values() + if decision == Decision.KO.value + ] if len(ko_decisions) >= self.threshold: return Decision.KO diff --git a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_rule.py b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_rule.py index 693355ae..f8836439 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_rule.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/business_rules/item_rule.py @@ -1,11 +1,10 @@ +from abc import ABC, abstractmethod from typing import Dict -from abc import ABC, abstractmethod from edge_orchestrator.domain.models.decision import Decision class ItemRule(ABC): - @abstractmethod def get_item_decision(self, cameras_decisions: Dict[str, str]) -> Decision: pass diff --git a/edge_orchestrator/edge_orchestrator/domain/models/camera.py b/edge_orchestrator/edge_orchestrator/domain/models/camera.py index 69193dee..e3893f4f 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/camera.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/camera.py @@ -1,18 +1,24 @@ from abc import abstractmethod -from typing import Dict, Union, Type - -from edge_orchestrator.domain.models.business_rules.camera_business_rules.expected_label_rule import ExpectedLabelRule -from edge_orchestrator.domain.models.business_rules.camera_business_rules.min_nb_objects_rule import MinNbObjectsRule -from edge_orchestrator.domain.models.business_rules.camera_business_rules.max_nb_objects_rule import MaxNbObjectsRule +from typing import Dict, Type, Union + +from edge_orchestrator.domain.models.business_rules.camera_business_rules.expected_label_rule import ( + ExpectedLabelRule, +) +from edge_orchestrator.domain.models.business_rules.camera_business_rules.max_nb_objects_rule import ( + MaxNbObjectsRule, +) +from edge_orchestrator.domain.models.business_rules.camera_business_rules.min_nb_objects_rule import ( + MinNbObjectsRule, +) from edge_orchestrator.domain.models.business_rules.camera_rule import CameraRule def get_camera_rule(rule_name) -> Type[CameraRule]: - if rule_name == 'expected_label_rule': + if rule_name == "expected_label_rule": return ExpectedLabelRule - elif rule_name == 'min_nb_objects_rule': + elif rule_name == "min_nb_objects_rule": return MinNbObjectsRule - elif rule_name == 'max_nb_objects_rule': + elif rule_name == "max_nb_objects_rule": return MaxNbObjectsRule else: raise NotImplementedError @@ -25,7 +31,6 @@ def get_last_inference_by_camera(inference: Dict) -> Dict: class Camera: - @abstractmethod def __init__(self, id: str, settings: Dict[str, Union[str, Dict]]): self.id = id diff --git a/edge_orchestrator/edge_orchestrator/domain/models/decision.py b/edge_orchestrator/edge_orchestrator/domain/models/decision.py index f799bd3a..e685cdd0 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/decision.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/decision.py @@ -2,6 +2,6 @@ class Decision(Enum): - OK = 'OK' - KO = 'KO' - NO_DECISION = 'NO_DECISION' + OK = "OK" + KO = "KO" + NO_DECISION = "NO_DECISION" diff --git a/edge_orchestrator/edge_orchestrator/domain/models/edge_station.py b/edge_orchestrator/edge_orchestrator/domain/models/edge_station.py index 3b205846..9e5f0612 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/edge_station.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/edge_station.py @@ -1,11 +1,10 @@ from pathlib import Path -from typing import Tuple, Dict +from typing import Dict, Tuple from edge_orchestrator.domain.ports.station_config import StationConfig class EdgeStation: - def __init__(self, station_config: StationConfig, storage: Path): self.station_config = station_config self.storage = storage @@ -23,6 +22,7 @@ def capture(self) -> Tuple[Dict, Dict]: for camera in self.cameras: binaries[camera.id] = camera.capture() cameras_metadata = { - camera.id: self.station_config.get_camera_settings(camera.id) for camera in self.cameras + camera.id: self.station_config.get_camera_settings(camera.id) + for camera in self.cameras } return cameras_metadata, binaries diff --git a/edge_orchestrator/edge_orchestrator/domain/models/item.py b/edge_orchestrator/edge_orchestrator/domain/models/item.py index 99e9269c..1f1c5e1e 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/item.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/item.py @@ -2,9 +2,12 @@ import uuid from typing import Dict, Type -from edge_orchestrator.domain.models.business_rules.item_business_rule.item_threshold_ratio_rule import \ - ThresholdRatioRule -from edge_orchestrator.domain.models.business_rules.item_business_rule.item_threshold_rule import ThresholdRule +from edge_orchestrator.domain.models.business_rules.item_business_rule.item_threshold_ratio_rule import ( + ThresholdRatioRule, +) +from edge_orchestrator.domain.models.business_rules.item_business_rule.item_threshold_rule import ( + ThresholdRule, +) from edge_orchestrator.domain.models.business_rules.item_rule import ItemRule @@ -13,18 +16,23 @@ def generate_id(): def get_item_rule(item_rule_name) -> Type[ItemRule]: - if item_rule_name == 'threshold_ratio_rule': + if item_rule_name == "threshold_ratio_rule": return ThresholdRatioRule - elif item_rule_name == 'min_threshold_KO_rule': + elif item_rule_name == "min_threshold_KO_rule": return ThresholdRule else: raise NotImplementedError class Item: - def __init__(self, serial_number: str, category: str, cameras_metadata: Dict[str, Dict], - binaries: Dict[str, bytes]): + def __init__( + self, + serial_number: str, + category: str, + cameras_metadata: Dict[str, Dict], + binaries: Dict[str, bytes], + ): self.id = generate_id() self.received_time = dt.datetime.now() self.serial_number = serial_number @@ -41,26 +49,33 @@ def __init__(self, serial_number: str, category: str, cameras_metadata: Dict[str @classmethod def from_nothing(cls): - return Item('serial_number', 'category', {}, {}) + return Item("serial_number", "category", {}, {}) def get_metadata(self, with_id: bool = True) -> Dict: metadata = { - 'serial_number': self.serial_number, - 'category': self.category, - 'station_config': self.station_config, - 'cameras': self.cameras_metadata, - 'received_time': self.received_time.strftime('%Y-%m-%d %H:%M:%S'), - 'inferences': self.inferences, - 'decision': self.decision, - 'state': self.state, - 'error': self.error_message + "serial_number": self.serial_number, + "category": self.category, + "station_config": self.station_config, + "cameras": self.cameras_metadata, + "received_time": self.received_time.strftime("%Y-%m-%d %H:%M:%S"), + "inferences": self.inferences, + "decision": self.decision, + "state": self.state, + "error": self.error_message, } if with_id: metadata["id"] = self.id return metadata def __eq__(self, other) -> bool: - return (self.id == other.id and self.received_time == other.received_time and - self.serial_number == other.serial_number and self.category == other.category and - self.cameras_metadata == other.cameras_metadata and self.binaries == other.binaries and - self.inferences == other.inferences and self.decision == other.decision and self.state == other.state) + return ( + self.id == other.id + and self.received_time == other.received_time + and self.serial_number == other.serial_number + and self.category == other.category + and self.cameras_metadata == other.cameras_metadata + and self.binaries == other.binaries + and self.inferences == other.inferences + and self.decision == other.decision + and self.state == other.state + ) diff --git a/edge_orchestrator/edge_orchestrator/domain/models/model_infos.py b/edge_orchestrator/edge_orchestrator/domain/models/model_infos.py index 43097418..8ebff8ae 100644 --- a/edge_orchestrator/edge_orchestrator/domain/models/model_infos.py +++ b/edge_orchestrator/edge_orchestrator/domain/models/model_infos.py @@ -1,30 +1,30 @@ -from typing import List, Dict, Optional -from enum import Enum import os +from enum import Enum from pathlib import Path +from typing import Dict, List, Optional from edge_orchestrator.domain.ports.inventory import Inventory class ModelInfos: - - def __init__(self, - id: str, - name: str, - category: str, - version: str, - camera_id: str, - depends_on: List[str] = [], - image_resolution: Optional[List[int]] = None, - class_names: Optional[List[str]] = None, - boxes_coordinates: Optional[str] = None, - objectness_scores: Optional[str] = None, - number_of_boxes: Optional[str] = None, - detection_classes: Optional[str] = None, - class_to_detect: Optional[List[str]] = None, - class_names_path: Optional[str] = None, - objectness_threshold: Optional[float] = None - ): + def __init__( + self, + id: str, + name: str, + category: str, + version: str, + camera_id: str, + depends_on: List[str] = [], + image_resolution: Optional[List[int]] = None, + class_names: Optional[List[str]] = None, + boxes_coordinates: Optional[str] = None, + objectness_scores: Optional[str] = None, + number_of_boxes: Optional[str] = None, + detection_classes: Optional[str] = None, + class_to_detect: Optional[List[str]] = None, + class_names_path: Optional[str] = None, + objectness_threshold: Optional[float] = None, + ): self.id = id self.name = name self.category = category @@ -42,20 +42,35 @@ def __init__(self, self.objectness_threshold = objectness_threshold @classmethod - def from_model_graph_node(cls, camera_id: str, model_id: str, model: Dict, inventory: Inventory, data_folder: Path): - model_type = model['metadata'] - class_names = inventory.models[model_type].get('class_names') - class_to_detect = model.get('class_to_detect') - class_names_path = inventory.models[model_type].get('class_names_path') - objectness_threshold = inventory.models[model_type].get('objectness_threshold') + def from_model_graph_node( + cls, + camera_id: str, + model_id: str, + model: Dict, + inventory: Inventory, + data_folder: Path, + ): + model_type = model["metadata"] + class_names = inventory.models[model_type].get("class_names") + class_to_detect = model.get("class_to_detect") + class_names_path = inventory.models[model_type].get("class_names_path") + objectness_threshold = inventory.models[model_type].get("objectness_threshold") - if inventory.models[model_type].get('class_names_path') is not None: + if inventory.models[model_type].get("class_names_path") is not None: class_names_path = os.path.join(data_folder, class_names_path) try: - boxes_coordinates = inventory.models[model_type].get('output').get('boxes_coordinates') - objectness_scores = inventory.models[model_type].get('output').get('objectness_scores') - number_of_boxes = inventory.models[model_type].get('output').get('number_of_boxes') - detection_classes = inventory.models[model_type].get('output').get('detection_classes') + boxes_coordinates = ( + inventory.models[model_type].get("output").get("boxes_coordinates") + ) + objectness_scores = ( + inventory.models[model_type].get("output").get("objectness_scores") + ) + number_of_boxes = ( + inventory.models[model_type].get("output").get("number_of_boxes") + ) + detection_classes = ( + inventory.models[model_type].get("output").get("detection_classes") + ) except AttributeError: boxes_coordinates = None objectness_scores = None @@ -65,9 +80,9 @@ def from_model_graph_node(cls, camera_id: str, model_id: str, model: Dict, inven return ModelInfos( id=model_id, name=model_type, - category=inventory.models[model_type]['category'], - version=str(inventory.models[model_type]['version']), - depends_on=model['depends_on'], + category=inventory.models[model_type]["category"], + version=str(inventory.models[model_type]["version"]), + depends_on=model["depends_on"], camera_id=camera_id, class_names=class_names, class_names_path=class_names_path, @@ -75,28 +90,28 @@ def from_model_graph_node(cls, camera_id: str, model_id: str, model: Dict, inven objectness_scores=objectness_scores, number_of_boxes=number_of_boxes, detection_classes=detection_classes, - image_resolution=inventory.models[model_type].get('image_resolution'), + image_resolution=inventory.models[model_type].get("image_resolution"), class_to_detect=class_to_detect, - objectness_threshold=objectness_threshold - + objectness_threshold=objectness_threshold, ) def __eq__(self, other) -> bool: - return (other.name == self.name and - other.category == self.category and - other.version == self.version and - other.depends_on == self.depends_on and - other.camera_id == self.camera_id and - other.class_names == self.class_names and - other.class_names_path == self.class_names_path and - other.boxes_coordinates == self.boxes_coordinates and - other.objectness_scores == self.objectness_scores and - other.number_of_boxes == self.number_of_boxes and - other.detection_classes == self.detection_classes and - other.image_resolution == self.image_resolution and - other.class_to_detect == self.class_to_detect and - other.objectness_threshold == self.objectness_threshold - ) + return ( + other.name == self.name + and other.category == self.category + and other.version == self.version + and other.depends_on == self.depends_on + and other.camera_id == self.camera_id + and other.class_names == self.class_names + and other.class_names_path == self.class_names_path + and other.boxes_coordinates == self.boxes_coordinates + and other.objectness_scores == self.objectness_scores + and other.number_of_boxes == self.number_of_boxes + and other.detection_classes == self.detection_classes + and other.image_resolution == self.image_resolution + and other.class_to_detect == self.class_to_detect + and other.objectness_threshold == self.objectness_threshold + ) def __str__(self): return str(self.__dict__) @@ -106,7 +121,9 @@ def __repr__(self): class ModelTypes(Enum): - CLASSIFICATION = 'classification' - OBJECT_DETECTION = 'object_detection' - OBJECT_DETECTION_WITH_CLASSIFICATION = 'object_detection_with_classification' - OBJECT_DETECTION_WITH_CLASSIFICATION_TORCH = 'object_detection_with_classification_torch' + CLASSIFICATION = "classification" + OBJECT_DETECTION = "object_detection" + OBJECT_DETECTION_WITH_CLASSIFICATION = "object_detection_with_classification" + OBJECT_DETECTION_WITH_CLASSIFICATION_TORCH = ( + "object_detection_with_classification_torch" + ) diff --git a/edge_orchestrator/edge_orchestrator/domain/ports/binary_storage.py b/edge_orchestrator/edge_orchestrator/domain/ports/binary_storage.py index a115b1f3..af68c37a 100644 --- a/edge_orchestrator/edge_orchestrator/domain/ports/binary_storage.py +++ b/edge_orchestrator/edge_orchestrator/domain/ports/binary_storage.py @@ -5,7 +5,6 @@ class BinaryStorage: - @abstractmethod def save_item_binaries(self, item: Item): pass diff --git a/edge_orchestrator/edge_orchestrator/domain/ports/inventory.py b/edge_orchestrator/edge_orchestrator/domain/ports/inventory.py index 84e15e3f..c5d0959a 100644 --- a/edge_orchestrator/edge_orchestrator/domain/ports/inventory.py +++ b/edge_orchestrator/edge_orchestrator/domain/ports/inventory.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Union +from typing import Dict, List, Union class Inventory: diff --git a/edge_orchestrator/edge_orchestrator/domain/ports/metadata_storage.py b/edge_orchestrator/edge_orchestrator/domain/ports/metadata_storage.py index 936afd22..f3ce0b4c 100644 --- a/edge_orchestrator/edge_orchestrator/domain/ports/metadata_storage.py +++ b/edge_orchestrator/edge_orchestrator/domain/ports/metadata_storage.py @@ -1,11 +1,10 @@ from abc import abstractmethod -from typing import List, Dict +from typing import Dict, List from edge_orchestrator.domain.models.item import Item class MetadataStorage: - @abstractmethod def save_item_metadata(self, item: Item): pass diff --git a/edge_orchestrator/edge_orchestrator/domain/ports/model_forward.py b/edge_orchestrator/edge_orchestrator/domain/ports/model_forward.py index a24b8579..ef351ff9 100644 --- a/edge_orchestrator/edge_orchestrator/domain/ports/model_forward.py +++ b/edge_orchestrator/edge_orchestrator/domain/ports/model_forward.py @@ -6,13 +6,14 @@ class Labels(Enum): - KO = 'KO' - OK = 'OK' - NO_DECISION = 'NO_DECISION' + KO = "KO" + OK = "OK" + NO_DECISION = "NO_DECISION" class ModelForward: - @abstractmethod - async def perform_inference(self, model: ModelInfos, binary_data: bytes, binary_name: str) -> Dict[str, Dict]: + async def perform_inference( + self, model: ModelInfos, binary_data: bytes, binary_name: str + ) -> Dict[str, Dict]: pass diff --git a/edge_orchestrator/edge_orchestrator/domain/ports/station_config.py b/edge_orchestrator/edge_orchestrator/domain/ports/station_config.py index fbbd2322..741c94d7 100644 --- a/edge_orchestrator/edge_orchestrator/domain/ports/station_config.py +++ b/edge_orchestrator/edge_orchestrator/domain/ports/station_config.py @@ -1,12 +1,11 @@ from abc import abstractmethod -from typing import Dict, List, Type, Union, Optional +from typing import Dict, List, Optional, Type, Union from edge_orchestrator.domain.models.camera import Camera from edge_orchestrator.domain.models.model_infos import ModelInfos class StationConfig: - all_configs: dict active_config_name: Optional[str] active_config: Optional[dict] diff --git a/edge_orchestrator/edge_orchestrator/domain/ports/telemetry_sink.py b/edge_orchestrator/edge_orchestrator/domain/ports/telemetry_sink.py index 3cb41a79..141eca50 100644 --- a/edge_orchestrator/edge_orchestrator/domain/ports/telemetry_sink.py +++ b/edge_orchestrator/edge_orchestrator/domain/ports/telemetry_sink.py @@ -2,7 +2,6 @@ class TelemetrySink: - @abstractmethod async def send(self, message: str): pass diff --git a/edge_orchestrator/edge_orchestrator/domain/use_cases/supervisor.py b/edge_orchestrator/edge_orchestrator/domain/use_cases/supervisor.py index 81dcfc14..f59baad3 100644 --- a/edge_orchestrator/edge_orchestrator/domain/use_cases/supervisor.py +++ b/edge_orchestrator/edge_orchestrator/domain/use_cases/supervisor.py @@ -3,25 +3,34 @@ from abc import abstractmethod from collections import OrderedDict from enum import Enum -from typing import Dict, Union, Any, List +from typing import Any, Dict, List, Union from PIL import Image -from edge_orchestrator.api_config import get_metadata_storage, get_binary_storage, get_model_forward, \ - get_edge_station, get_station_config, get_telemetry_sink, logger -from edge_orchestrator.domain.models.camera import get_last_inference_by_camera, get_camera_rule -from edge_orchestrator.domain.models.item import get_item_rule -from edge_orchestrator.domain.models.item import Item -from edge_orchestrator.domain.models.model_infos import ModelInfos +from edge_orchestrator.api_config import ( + get_binary_storage, + get_edge_station, + get_metadata_storage, + get_model_forward, + get_station_config, + get_telemetry_sink, + logger, +) +from edge_orchestrator.domain.models.camera import ( + get_camera_rule, + get_last_inference_by_camera, +) from edge_orchestrator.domain.models.decision import Decision +from edge_orchestrator.domain.models.item import Item, get_item_rule +from edge_orchestrator.domain.models.model_infos import ModelInfos class SupervisorState(Enum): - CAPTURE = 'Capture' - SAVE_BINARIES = 'Save Binaries' - INFERENCE = 'Inference' - DECISION = 'Decision' - DONE = 'Done' + CAPTURE = "Capture" + SAVE_BINARIES = "Save Binaries" + INFERENCE = "Inference" + DECISION = "Decision" + DONE = "Done" def check_capture_according_to_config(item: Item, cameras: List[Dict]): @@ -29,15 +38,22 @@ def check_capture_according_to_config(item: Item, cameras: List[Dict]): cameras = set(cameras) missing_camera_binary = cameras.difference(binaries) if len(missing_camera_binary) != 0: - logger.warning(f"Only {len(binaries)} were received and {len(cameras)} are expected!") + logger.warning( + f"Only {len(binaries)} were received and {len(cameras)} are expected!" + ) logger.warning(f"Missing image for camera: {missing_camera_binary}") class Supervisor: - - def __init__(self, metadata_storage=get_metadata_storage(), binary_storage=get_binary_storage(), - model_forward=get_model_forward(), station_config=get_station_config(), - edge_station=get_edge_station(), telemetry_sink=get_telemetry_sink()): + def __init__( + self, + metadata_storage=get_metadata_storage(), + binary_storage=get_binary_storage(), + model_forward=get_model_forward(), + station_config=get_station_config(), + edge_station=get_edge_station(), + telemetry_sink=get_telemetry_sink(), + ): self.metadata_storage = metadata_storage self.binary_storage = binary_storage self.model_forward = model_forward @@ -55,7 +71,6 @@ async def wrapper(item: Item, *args): return wrapper async def inspect(self, item: Item): - item.station_config = self.station_config.active_config_name if self.edge_station is not None: self.edge_station.register_cameras(self.station_config) @@ -83,9 +98,9 @@ async def set_decision(item: Item): decision = self.apply_business_rules(item) item.decision = decision telemetry_msg = { - 'item_id': item.id, - 'config': item.station_config, - 'decision': decision + "item_id": item.id, + "config": item.station_config, + "decision": decision, } await self.telemetry_sink.send(telemetry_msg) @@ -100,15 +115,15 @@ async def set_error_state(item: Item, error_message: str): tasks[SupervisorState.DECISION] = set_decision for supervisor_state, task_fct in tasks.items(): - logger.info(f'Starting {supervisor_state.value}') + logger.info(f"Starting {supervisor_state.value}") try: - logger.info(f'Entering try {supervisor_state.value}') + logger.info(f"Entering try {supervisor_state.value}") await task_fct(item, supervisor_state) except Exception as e: - logger.error(f'Error during {supervisor_state.value}: {e}') + logger.error(f"Error during {supervisor_state.value}: {e}") await set_error_state(item, str(e)) - logger.info(f'End of {supervisor_state.value}') + logger.info(f"End of {supervisor_state.value}") item.state = SupervisorState.DONE.value self.metadata_storage.save_item_metadata(item) @@ -116,48 +131,78 @@ async def set_error_state(item: Item, error_message: str): async def get_predictions(self, item: Item) -> Dict[str, Dict]: predictions = {} for camera_id in self.station_config.get_cameras(): - predictions_per_camera = await self.get_prediction_for_camera(camera_id, item, 'full_image') + predictions_per_camera = await self.get_prediction_for_camera( + camera_id, item, "full_image" + ) predictions[camera_id] = predictions_per_camera return predictions - async def get_prediction_for_camera(self, camera_id: str, item: Item, - image_name: str) -> Dict[str, Union[Dict, Any]]: + async def get_prediction_for_camera( + self, camera_id: str, item: Item, image_name: str + ) -> Dict[str, Union[Dict, Any]]: inference_output = {} binary_data = item.binaries[camera_id] model_pipeline = self.station_config.get_model_pipeline_for_camera(camera_id) - prediction_for_camera = await self.get_inference(inference_output, model_pipeline, binary_data, image_name) + prediction_for_camera = await self.get_inference( + inference_output, model_pipeline, binary_data, image_name + ) return prediction_for_camera - async def get_inference(self, inference_output: Dict, model_pipeline: List[ModelInfos], - full_image: bytes, image_name: str) -> Dict[str, Dict]: + async def get_inference( + self, + inference_output: Dict, + model_pipeline: List[ModelInfos], + full_image: bytes, + image_name: str, + ) -> Dict[str, Dict]: for current_model in model_pipeline: if _model_did_run(current_model.id, inference_output): continue - logger.info(f'Getting inference for model {current_model.id}') + logger.info(f"Getting inference for model {current_model.id}") if _model_has_no_dependency(current_model.depends_on): - inference_output[current_model.id] = await self.model_forward.perform_inference(current_model, - full_image, - image_name) + inference_output[ + current_model.id + ] = await self.model_forward.perform_inference( + current_model, full_image, image_name + ) else: inference_output[current_model.id] = {} - model_dependencies = [model_infos for model_infos in model_pipeline - if model_infos.id in current_model.depends_on] - inference_output_dependencies = await self.get_inference(inference_output, model_dependencies, - full_image, image_name) + model_dependencies = [ + model_infos + for model_infos in model_pipeline + if model_infos.id in current_model.depends_on + ] + inference_output_dependencies = await self.get_inference( + inference_output, model_dependencies, full_image, image_name + ) for model_dependency in model_dependencies: - for object_id, inference_output_dependency in inference_output_dependencies[ - model_dependency.id].items(): # noqa - object_location = inference_output_dependency['location'] + for ( + object_id, + inference_output_dependency, + ) in inference_output_dependencies[ + model_dependency.id + ].items(): # noqa + object_location = inference_output_dependency["location"] cropped_image = crop_image(full_image, object_location) - inference_output_object = await self.model_forward.perform_inference(current_model, - cropped_image, - object_id) - for sub_object_id, sub_object_info in inference_output_object.items(): - if 'location' in sub_object_info.keys(): - sub_object_info['location'] = relocate_sub_object_location_within_full_image( - object_location, sub_object_info['location']) - inference_output[current_model.id][sub_object_id] = sub_object_info + inference_output_object = ( + await self.model_forward.perform_inference( + current_model, cropped_image, object_id + ) + ) + for ( + sub_object_id, + sub_object_info, + ) in inference_output_object.items(): + if "location" in sub_object_info.keys(): + sub_object_info[ + "location" + ] = relocate_sub_object_location_within_full_image( + object_location, sub_object_info["location"] + ) + inference_output[current_model.id][ + sub_object_id + ] = sub_object_info return inference_output @@ -170,22 +215,35 @@ def apply_business_rules(self, item: Item) -> str: else: for camera_id in item.inferences: - camera_rule_name = self.station_config.active_config['cameras'][camera_id]['camera_rule']['name'] - camera_rule_parameters = self.station_config.active_config['cameras'][camera_id]['camera_rule'][ - 'parameters'] # noqa - - last_model_inferences = get_last_inference_by_camera(item.inferences[camera_id]) + camera_rule_name = self.station_config.active_config["cameras"][ + camera_id + ]["camera_rule"]["name"] + camera_rule_parameters = self.station_config.active_config["cameras"][ + camera_id + ]["camera_rule"][ + "parameters" + ] # noqa + + last_model_inferences = get_last_inference_by_camera( + item.inferences[camera_id] + ) if last_model_inferences == Decision.NO_DECISION: return Decision.NO_DECISION labels_of_last_model_inferences = get_labels(last_model_inferences) - item_camera_rule = get_camera_rule(camera_rule_name)(**camera_rule_parameters) - camera_decision = item_camera_rule.get_camera_decision(labels_of_last_model_inferences) + item_camera_rule = get_camera_rule(camera_rule_name)( + **camera_rule_parameters + ) + camera_decision = item_camera_rule.get_camera_decision( + labels_of_last_model_inferences + ) - camera_decisions[f'{camera_id}'] = camera_decision.value + camera_decisions[f"{camera_id}"] = camera_decision.value - item_rule_name = self.station_config.active_config['item_rule']['name'] - item_rule_parameters = self.station_config.active_config['item_rule']['parameters'] + item_rule_name = self.station_config.active_config["item_rule"]["name"] + item_rule_parameters = self.station_config.active_config["item_rule"][ + "parameters" + ] item_rule = get_item_rule(item_rule_name)(**item_rule_parameters) item_decision = item_rule.get_item_decision(camera_decisions) @@ -197,7 +255,7 @@ def get_labels(inferences): inferences_labels = [] objects_in_last_model = list(inferences.keys()) for obj in objects_in_last_model: - inferences_labels.append(inferences[obj]['label']) + inferences_labels.append(inferences[obj]["label"]) return inferences_labels @@ -217,19 +275,27 @@ def crop_image(binary_data: bytes, detected_object: List[int]) -> bytes: area = image.crop((xmin, ymin, xmax, ymax)) if area.mode in ["RGBA", "P"]: area = area.convert("RGB") - area.save(cropped_image, format='JPEG') + area.save(cropped_image, format="JPEG") return cropped_image.getvalue() else: - logger.error('Informations for cropping are incorrect, the initial picture is used') - if (xmin > xmax): - logger.error(f'xmin (={xmin}) is greater than xmax (={xmax})') - elif (ymin > ymax): - logger.error(f'ymin (={ymin}) is greater than xmax (={ymax})') + logger.error( + "Informations for cropping are incorrect, the initial picture is used" + ) + if xmin > xmax: + logger.error(f"xmin (={xmin}) is greater than xmax (={xmax})") + elif ymin > ymax: + logger.error(f"ymin (={ymin}) is greater than xmax (={ymax})") return binary_data -def relocate_sub_object_location_within_full_image(object_location: List[int], sub_object_location: List[int]) -> \ - List[int]: +def relocate_sub_object_location_within_full_image( + object_location: List[int], sub_object_location: List[int] +) -> List[int]: [x_min_o, y_min_o, x_max_o, y_max_o] = object_location [x_min_so, y_min_so, x_max_so, y_max_so] = sub_object_location - return [x_min_so + x_min_o, y_min_so + y_min_o, x_max_so + x_min_o, y_max_so + y_min_o] + return [ + x_min_so + x_min_o, + y_min_so + y_min_o, + x_max_so + x_min_o, + y_max_so + y_min_o, + ] diff --git a/edge_orchestrator/edge_orchestrator/domain/use_cases/uploader.py b/edge_orchestrator/edge_orchestrator/domain/use_cases/uploader.py index 191c537e..648fb620 100644 --- a/edge_orchestrator/edge_orchestrator/domain/use_cases/uploader.py +++ b/edge_orchestrator/edge_orchestrator/domain/use_cases/uploader.py @@ -2,19 +2,25 @@ from collections import OrderedDict from enum import Enum - -from edge_orchestrator.api_config import get_metadata_storage, get_binary_storage, logger +from edge_orchestrator.api_config import ( + get_binary_storage, + get_metadata_storage, + logger, +) from edge_orchestrator.domain.models.item import Item class UploaderState(Enum): - SAVE_BINARIES = 'Save Binaries' - DONE = 'Done' + SAVE_BINARIES = "Save Binaries" + DONE = "Done" class Uploader: - - def __init__(self, metadata_storage=get_metadata_storage(), binary_storage=get_binary_storage()): + def __init__( + self, + metadata_storage=get_metadata_storage(), + binary_storage=get_binary_storage(), + ): self.metadata_storage = metadata_storage self.binary_storage = binary_storage @@ -28,7 +34,6 @@ async def wrapper(item: Item, *args): return wrapper async def upload(self, item: Item): - tasks = OrderedDict() @self.save_item_metadata @@ -42,15 +47,15 @@ async def set_error_state(item: Item, error_message: str): tasks[UploaderState.SAVE_BINARIES] = save_item_binaries for uploader_state, task_fct in tasks.items(): - logger.info(f'Starting {uploader_state.value}') + logger.info(f"Starting {uploader_state.value}") try: - logger.info(f'Entering try {uploader_state.value}') + logger.info(f"Entering try {uploader_state.value}") await task_fct(item, uploader_state) except Exception as e: - logger.error(f'Error during {uploader_state.value}: {e}') + logger.error(f"Error during {uploader_state.value}: {e}") await set_error_state(item, str(e)) - logger.info(f'End of {uploader_state.value}') + logger.info(f"End of {uploader_state.value}") item.state = UploaderState.DONE.value self.metadata_storage.save_item_metadata(item) diff --git a/edge_orchestrator/edge_orchestrator/environment/default.py b/edge_orchestrator/edge_orchestrator/environment/default.py index a3aaeef6..0665ea68 100644 --- a/edge_orchestrator/edge_orchestrator/environment/default.py +++ b/edge_orchestrator/edge_orchestrator/environment/default.py @@ -1,22 +1,37 @@ from edge_orchestrator.domain.models.edge_station import EdgeStation from edge_orchestrator.environment.config import Config -from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import FileSystemBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import ( + FileSystemBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.metadata_storage.memory_metadata_storage import MemoryMetadataStorage +from edge_orchestrator.infrastructure.metadata_storage.memory_metadata_storage import ( + MemoryMetadataStorage, +) -from edge_orchestrator.infrastructure.model_forward.fake_model_forward import FakeModelForward -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from edge_orchestrator.infrastructure.telemetry_sink.fake_telemetry_sink import FakeTelemetrySink +from edge_orchestrator.infrastructure.model_forward.fake_model_forward import ( + FakeModelForward, +) +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from edge_orchestrator.infrastructure.telemetry_sink.fake_telemetry_sink import ( + FakeTelemetrySink, +) class Default(Config): def __init__(self): self.metadata_storage = MemoryMetadataStorage() self.model_forward = FakeModelForward() - self.binary_storage = FileSystemBinaryStorage(self.ROOT_PATH / 'data' / 'storage') - self.inventory = JsonInventory(self.ROOT_PATH / 'config' / 'inventory.json') - self.station_config = JsonStationConfig(self.ROOT_PATH / 'config' / 'station_configs', - self.inventory, self.ROOT_PATH / 'data') - self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / 'data') + self.binary_storage = FileSystemBinaryStorage( + self.ROOT_PATH / "data" / "storage" + ) + self.inventory = JsonInventory(self.ROOT_PATH / "config" / "inventory.json") + self.station_config = JsonStationConfig( + self.ROOT_PATH / "config" / "station_configs", + self.inventory, + self.ROOT_PATH / "data", + ) + self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / "data") self.telemetry_sink = FakeTelemetrySink() diff --git a/edge_orchestrator/edge_orchestrator/environment/docker.py b/edge_orchestrator/edge_orchestrator/environment/docker.py index a87308b2..c2d05e95 100644 --- a/edge_orchestrator/edge_orchestrator/environment/docker.py +++ b/edge_orchestrator/edge_orchestrator/environment/docker.py @@ -2,28 +2,46 @@ from edge_orchestrator.domain.models.edge_station import EdgeStation from edge_orchestrator.environment.config import Config -from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import FileSystemBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import ( + FileSystemBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import MongoDbMetadataStorage -from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import TFServingWrapper -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import PostgresTelemetrySink +from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import ( + MongoDbMetadataStorage, +) +from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import ( + TFServingWrapper, +) +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import ( + PostgresTelemetrySink, +) class Docker(Config): - MONGO_DB_URI = os.environ.get('MONGO_DB_URI', 'mongodb://edge_db:27017/') - POSTGRES_DB_URI = os.environ.get('POSTGRES_DB_URI', 'postgresql://vio:vio@hub_monitoring_db:5432/vio') - SERVING_MODEL_URL = os.environ.get('SERVING_MODEL_URL', 'http://edge_model_serving:8501') + MONGO_DB_URI = os.environ.get("MONGO_DB_URI", "mongodb://edge_db:27017/") + POSTGRES_DB_URI = os.environ.get( + "POSTGRES_DB_URI", "postgresql://vio:vio@hub_monitoring_db:5432/vio" + ) + SERVING_MODEL_URL = os.environ.get( + "SERVING_MODEL_URL", "http://edge_model_serving:8501" + ) def __init__(self): self.metadata_storage = MongoDbMetadataStorage(self.MONGO_DB_URI) - self.binary_storage = FileSystemBinaryStorage(self.ROOT_PATH / 'data' / 'storage') - self.inventory = JsonInventory(self.ROOT_PATH / 'config' / 'inventory.json') + self.binary_storage = FileSystemBinaryStorage( + self.ROOT_PATH / "data" / "storage" + ) + self.inventory = JsonInventory(self.ROOT_PATH / "config" / "inventory.json") self.station_config = JsonStationConfig( - station_configs_folder=self.ROOT_PATH / 'config' / 'station_configs', + station_configs_folder=self.ROOT_PATH / "config" / "station_configs", inventory=self.inventory, - data_folder=self.ROOT_PATH / 'data' + data_folder=self.ROOT_PATH / "data", + ) + self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / "data") + self.model_forward = TFServingWrapper( + self.SERVING_MODEL_URL, self.inventory, self.station_config ) - self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / 'data') - self.model_forward = TFServingWrapper(self.SERVING_MODEL_URL, self.inventory, self.station_config) self.telemetry_sink = PostgresTelemetrySink(self.POSTGRES_DB_URI) diff --git a/edge_orchestrator/edge_orchestrator/environment/edge_with_azure_container_storage.py b/edge_orchestrator/edge_orchestrator/environment/edge_with_azure_container_storage.py index b9169e55..4a33de8a 100644 --- a/edge_orchestrator/edge_orchestrator/environment/edge_with_azure_container_storage.py +++ b/edge_orchestrator/edge_orchestrator/environment/edge_with_azure_container_storage.py @@ -2,25 +2,40 @@ from edge_orchestrator.domain.models.edge_station import EdgeStation from edge_orchestrator.environment.config import Config -from edge_orchestrator.infrastructure.binary_storage.azure_container_binary_storage import \ - AzureContainerBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.azure_container_binary_storage import ( + AzureContainerBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.metadata_storage.azure_container_metadata_storage import \ - AzureContainerMetadataStorage -from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import TFServingWrapper -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from edge_orchestrator.infrastructure.telemetry_sink.azure_iot_hub_telemetry_sink import AzureIotHubTelemetrySink +from edge_orchestrator.infrastructure.metadata_storage.azure_container_metadata_storage import ( + AzureContainerMetadataStorage, +) +from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import ( + TFServingWrapper, +) +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from edge_orchestrator.infrastructure.telemetry_sink.azure_iot_hub_telemetry_sink import ( + AzureIotHubTelemetrySink, +) class EdgeWithAzureContainerStorage(Config): - SERVING_MODEL_URL = os.environ.get('SERVING_MODEL_URL', 'http://edge_model_serving:8501') + SERVING_MODEL_URL = os.environ.get( + "SERVING_MODEL_URL", "http://edge_model_serving:8501" + ) def __init__(self): self.metadata_storage = AzureContainerMetadataStorage() self.binary_storage = AzureContainerBinaryStorage() - self.inventory = JsonInventory(self.ROOT_PATH / 'config' / 'inventory.json') - self.station_config = JsonStationConfig(self.ROOT_PATH / 'config' / 'station_configs', - self.inventory, self.ROOT_PATH / 'data') - self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / 'data') - self.model_forward = TFServingWrapper(self.SERVING_MODEL_URL, self.inventory, self.station_config) + self.inventory = JsonInventory(self.ROOT_PATH / "config" / "inventory.json") + self.station_config = JsonStationConfig( + self.ROOT_PATH / "config" / "station_configs", + self.inventory, + self.ROOT_PATH / "data", + ) + self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / "data") + self.model_forward = TFServingWrapper( + self.SERVING_MODEL_URL, self.inventory, self.station_config + ) self.telemetry_sink = AzureIotHubTelemetrySink() diff --git a/edge_orchestrator/edge_orchestrator/environment/edge_with_filesystem_metadata_storage.py b/edge_orchestrator/edge_orchestrator/environment/edge_with_filesystem_metadata_storage.py index 12d4663d..4a7ba70d 100644 --- a/edge_orchestrator/edge_orchestrator/environment/edge_with_filesystem_metadata_storage.py +++ b/edge_orchestrator/edge_orchestrator/environment/edge_with_filesystem_metadata_storage.py @@ -2,23 +2,44 @@ from edge_orchestrator.domain.models.edge_station import EdgeStation from edge_orchestrator.environment.config import Config -from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import FileSystemBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import ( + FileSystemBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.metadata_storage.filesystem_metadata_storage import FileSystemMetadataStorage -from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import TFServingWrapper -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from edge_orchestrator.infrastructure.telemetry_sink.azure_iot_hub_telemetry_sink import AzureIotHubTelemetrySink +from edge_orchestrator.infrastructure.metadata_storage.filesystem_metadata_storage import ( + FileSystemMetadataStorage, +) +from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import ( + TFServingWrapper, +) +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from edge_orchestrator.infrastructure.telemetry_sink.azure_iot_hub_telemetry_sink import ( + AzureIotHubTelemetrySink, +) class EdgeWithFileSystemMetadataStorage(Config): - SERVING_MODEL_URL = os.environ.get('SERVING_MODEL_URL', 'http://edge_model_serving:8501') + SERVING_MODEL_URL = os.environ.get( + "SERVING_MODEL_URL", "http://edge_model_serving:8501" + ) def __init__(self): - self.metadata_storage = FileSystemMetadataStorage(self.ROOT_PATH / 'data' / 'storage') - self.binary_storage = FileSystemBinaryStorage(self.ROOT_PATH / 'data' / 'storage') - self.inventory = JsonInventory(self.ROOT_PATH / 'config' / 'inventory.json') - self.station_config = JsonStationConfig(self.ROOT_PATH / 'config' / 'station_configs', - self.inventory, self.ROOT_PATH / 'data') - self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / 'data') - self.model_forward = TFServingWrapper(self.SERVING_MODEL_URL, self.inventory, self.station_config) + self.metadata_storage = FileSystemMetadataStorage( + self.ROOT_PATH / "data" / "storage" + ) + self.binary_storage = FileSystemBinaryStorage( + self.ROOT_PATH / "data" / "storage" + ) + self.inventory = JsonInventory(self.ROOT_PATH / "config" / "inventory.json") + self.station_config = JsonStationConfig( + self.ROOT_PATH / "config" / "station_configs", + self.inventory, + self.ROOT_PATH / "data", + ) + self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / "data") + self.model_forward = TFServingWrapper( + self.SERVING_MODEL_URL, self.inventory, self.station_config + ) self.telemetry_sink = AzureIotHubTelemetrySink() diff --git a/edge_orchestrator/edge_orchestrator/environment/edge_with_mongo_db_metadata_storage.py b/edge_orchestrator/edge_orchestrator/environment/edge_with_mongo_db_metadata_storage.py index 3c9f503e..eaf00f9d 100644 --- a/edge_orchestrator/edge_orchestrator/environment/edge_with_mongo_db_metadata_storage.py +++ b/edge_orchestrator/edge_orchestrator/environment/edge_with_mongo_db_metadata_storage.py @@ -2,24 +2,43 @@ from edge_orchestrator.domain.models.edge_station import EdgeStation from edge_orchestrator.environment.config import Config -from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import FileSystemBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import ( + FileSystemBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import MongoDbMetadataStorage -from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import TFServingWrapper -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from edge_orchestrator.infrastructure.telemetry_sink.azure_iot_hub_telemetry_sink import AzureIotHubTelemetrySink +from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import ( + MongoDbMetadataStorage, +) +from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import ( + TFServingWrapper, +) +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from edge_orchestrator.infrastructure.telemetry_sink.azure_iot_hub_telemetry_sink import ( + AzureIotHubTelemetrySink, +) class EdgeWithMongoDbMetadataStorage(Config): - MONGO_DB_URI = os.environ.get('MONGO_DB_URI', 'mongodb://edge_db:27017/') - SERVING_MODEL_URL = os.environ.get('SERVING_MODEL_URL', 'http://edge_model_serving:8501') + MONGO_DB_URI = os.environ.get("MONGO_DB_URI", "mongodb://edge_db:27017/") + SERVING_MODEL_URL = os.environ.get( + "SERVING_MODEL_URL", "http://edge_model_serving:8501" + ) def __init__(self): self.metadata_storage = MongoDbMetadataStorage(self.MONGO_DB_URI) - self.binary_storage = FileSystemBinaryStorage(self.ROOT_PATH / 'data' / 'storage') - self.inventory = JsonInventory(self.ROOT_PATH / 'config' / 'inventory.json') - self.station_config = JsonStationConfig(self.ROOT_PATH / 'config' / 'station_configs', - self.inventory, self.ROOT_PATH / 'data') - self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / 'data') - self.model_forward = TFServingWrapper(self.SERVING_MODEL_URL, self.inventory, self.station_config) + self.binary_storage = FileSystemBinaryStorage( + self.ROOT_PATH / "data" / "storage" + ) + self.inventory = JsonInventory(self.ROOT_PATH / "config" / "inventory.json") + self.station_config = JsonStationConfig( + self.ROOT_PATH / "config" / "station_configs", + self.inventory, + self.ROOT_PATH / "data", + ) + self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / "data") + self.model_forward = TFServingWrapper( + self.SERVING_MODEL_URL, self.inventory, self.station_config + ) self.telemetry_sink = AzureIotHubTelemetrySink() diff --git a/edge_orchestrator/edge_orchestrator/environment/test.py b/edge_orchestrator/edge_orchestrator/environment/test.py index 2c2d4ea7..319a939a 100644 --- a/edge_orchestrator/edge_orchestrator/environment/test.py +++ b/edge_orchestrator/edge_orchestrator/environment/test.py @@ -3,27 +3,48 @@ from edge_orchestrator.domain.models.edge_station import EdgeStation from edge_orchestrator.environment.config import Config -from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import FileSystemBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import ( + FileSystemBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import MongoDbMetadataStorage -from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import TFServingWrapper -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import PostgresTelemetrySink -from tests.conftest import TEST_STATION_CONFIGS_FOLDER_PATH, TEST_INVENTORY_PATH, TEST_DATA_FOLDER_PATH +from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import ( + MongoDbMetadataStorage, +) +from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import ( + TFServingWrapper, +) +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import ( + PostgresTelemetrySink, +) +from tests.conftest import ( + TEST_DATA_FOLDER_PATH, + TEST_INVENTORY_PATH, + TEST_STATION_CONFIGS_FOLDER_PATH, +) class Test(Config): - ROOT_PATH = Path('/tests') - MONGO_DB_URI = os.environ.get('MONGO_DB_URI', 'mongodb://edge_db:27017/') - POSTGRES_DB_URI = os.environ.get('POSTGRES_DB_URI', 'postgresql://vio:vio@hub_monitoring_db:5432/vio') - SERVING_MODEL_URL = os.environ.get('SERVING_MODEL_URL', 'http://edge_model_serving:8501') + ROOT_PATH = Path("/tests") + MONGO_DB_URI = os.environ.get("MONGO_DB_URI", "mongodb://edge_db:27017/") + POSTGRES_DB_URI = os.environ.get( + "POSTGRES_DB_URI", "postgresql://vio:vio@hub_monitoring_db:5432/vio" + ) + SERVING_MODEL_URL = os.environ.get( + "SERVING_MODEL_URL", "http://edge_model_serving:8501" + ) def __init__(self): self.metadata_storage = MongoDbMetadataStorage(self.MONGO_DB_URI) - self.binary_storage = FileSystemBinaryStorage(TEST_DATA_FOLDER_PATH / 'storage') + self.binary_storage = FileSystemBinaryStorage(TEST_DATA_FOLDER_PATH / "storage") self.inventory = JsonInventory(TEST_INVENTORY_PATH) - self.station_config = JsonStationConfig(TEST_STATION_CONFIGS_FOLDER_PATH, - self.inventory, TEST_DATA_FOLDER_PATH) + self.station_config = JsonStationConfig( + TEST_STATION_CONFIGS_FOLDER_PATH, self.inventory, TEST_DATA_FOLDER_PATH + ) self.edge_station = EdgeStation(self.station_config, TEST_DATA_FOLDER_PATH) - self.model_forward = TFServingWrapper(self.SERVING_MODEL_URL, self.inventory, self.station_config) + self.model_forward = TFServingWrapper( + self.SERVING_MODEL_URL, self.inventory, self.station_config + ) self.telemetry_sink = PostgresTelemetrySink(self.POSTGRES_DB_URI) diff --git a/edge_orchestrator/edge_orchestrator/environment/upload_with_gcp_bucket.py b/edge_orchestrator/edge_orchestrator/environment/upload_with_gcp_bucket.py index 941aacfb..a107277e 100644 --- a/edge_orchestrator/edge_orchestrator/environment/upload_with_gcp_bucket.py +++ b/edge_orchestrator/edge_orchestrator/environment/upload_with_gcp_bucket.py @@ -2,23 +2,40 @@ from edge_orchestrator.domain.models.edge_station import EdgeStation from edge_orchestrator.environment.config import Config -from edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage import GCPBinaryStorage -from edge_orchestrator.infrastructure.metadata_storage.gcp_metadata_storage import GCPMetadataStorage +from edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage import ( + GCPBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import TFServingWrapper -from edge_orchestrator.infrastructure.telemetry_sink.fake_telemetry_sink import FakeTelemetrySink +from edge_orchestrator.infrastructure.metadata_storage.gcp_metadata_storage import ( + GCPMetadataStorage, +) +from edge_orchestrator.infrastructure.model_forward.tf_serving_wrapper import ( + TFServingWrapper, +) +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from edge_orchestrator.infrastructure.telemetry_sink.fake_telemetry_sink import ( + FakeTelemetrySink, +) class UploadWithGCPBucket(Config): - SERVING_MODEL_URL = os.environ.get('SERVING_MODEL_URL', 'http://edge_model_serving:8501') + SERVING_MODEL_URL = os.environ.get( + "SERVING_MODEL_URL", "http://edge_model_serving:8501" + ) def __init__(self): self.metadata_storage = GCPMetadataStorage() self.binary_storage = GCPBinaryStorage() - self.inventory = JsonInventory(self.ROOT_PATH / 'config' / 'inventory.json') - self.station_config = JsonStationConfig(self.ROOT_PATH / 'config' / 'station_configs', - self.inventory, self.ROOT_PATH / 'data') - self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / 'data') - self.model_forward = TFServingWrapper(self.SERVING_MODEL_URL, self.inventory, self.station_config) + self.inventory = JsonInventory(self.ROOT_PATH / "config" / "inventory.json") + self.station_config = JsonStationConfig( + self.ROOT_PATH / "config" / "station_configs", + self.inventory, + self.ROOT_PATH / "data", + ) + self.edge_station = EdgeStation(self.station_config, self.ROOT_PATH / "data") + self.model_forward = TFServingWrapper( + self.SERVING_MODEL_URL, self.inventory, self.station_config + ) self.telemetry_sink = FakeTelemetrySink() diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/azure_container_binary_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/azure_container_binary_storage.py index 9df5755d..d50105e6 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/azure_container_binary_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/azure_container_binary_storage.py @@ -10,34 +10,46 @@ class AzureContainerBinaryStorage(BinaryStorage): - def __init__(self): - self.azure_container_name = os.getenv('AZURE_CONTAINER_NAME') - az_storage_connection_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING') - self._blob_service_client = BlobServiceClient.from_connection_string(az_storage_connection_str) + self.azure_container_name = os.getenv("AZURE_CONTAINER_NAME") + az_storage_connection_str = os.getenv("AZURE_STORAGE_CONNECTION_STRING") + self._blob_service_client = BlobServiceClient.from_connection_string( + az_storage_connection_str + ) try: self._blob_service_client.create_container(self.azure_container_name) except ResourceExistsError: pass - self._container_client = self._blob_service_client.get_container_client(self.azure_container_name) - self._transport_params = {'client': self._blob_service_client} + self._container_client = self._blob_service_client.get_container_client( + self.azure_container_name + ) + self._transport_params = {"client": self._blob_service_client} def save_item_binaries(self, item: Item): for camera_id, binary in item.binaries.items(): - with open(f'azure://{self.azure_container_name}/{item.id}/{camera_id}.jpg', - 'wb', transport_params=self._transport_params) as f: + with open( + f"azure://{self.azure_container_name}/{item.id}/{camera_id}.jpg", + "wb", + transport_params=self._transport_params, + ) as f: f.write(binary) def get_item_binary(self, item_id: str, camera_id: str) -> bytes: - with open(f'azure://{self.azure_container_name}/{item_id}/{camera_id}.jpg', - 'rb', transport_params=self._transport_params) as f: + with open( + f"azure://{self.azure_container_name}/{item_id}/{camera_id}.jpg", + "rb", + transport_params=self._transport_params, + ) as f: return f.read() def get_item_binaries(self, item_id: str) -> List[str]: binaries = [] for blob in self._container_client.list_blobs(): - if item_id in blob['name']: - with open(f"azure://{self.azure_container_name}/{blob['name']}", - 'rb', transport_params=self._transport_params) as f: + if item_id in blob["name"]: + with open( + f"azure://{self.azure_container_name}/{blob['name']}", + "rb", + transport_params=self._transport_params, + ) as f: binaries.append(f.read()) return binaries diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/filesystem_binary_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/filesystem_binary_storage.py index 57da23c8..47e335c1 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/filesystem_binary_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/filesystem_binary_storage.py @@ -6,7 +6,6 @@ class FileSystemBinaryStorage(BinaryStorage): - def __init__(self, src_directory_path: Path): self.folder = src_directory_path @@ -15,18 +14,18 @@ def save_item_binaries(self, item: Item): path.mkdir(parents=True, exist_ok=True) for camera_id, binary in item.binaries.items(): filepath = _get_filepath(self.folder, item.id, camera_id) - with filepath.open('wb') as f: + with filepath.open("wb") as f: f.write(binary) def get_item_binary(self, item_id: str, camera_id: str) -> bytes: filepath = _get_filepath(self.folder, item_id, camera_id) - with filepath.open('rb') as f: + with filepath.open("rb") as f: return f.read() def get_item_binaries(self, item_id: str) -> List[str]: filepath = self.folder / item_id - return [binary_path.name for binary_path in filepath.glob('*')] + return [binary_path.name for binary_path in filepath.glob("*")] def _get_filepath(folder: Path, item_id: str, camera_id: str) -> Path: - return folder / item_id / (camera_id + '.jpg') + return folder / item_id / (camera_id + ".jpg") diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/gcp_binary_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/gcp_binary_storage.py index b0075ec8..23824f01 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/gcp_binary_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/gcp_binary_storage.py @@ -1,16 +1,17 @@ import os from typing import List +from google.cloud import storage + from edge_orchestrator.domain.models.item import Item from edge_orchestrator.domain.ports.binary_storage import BinaryStorage -from google.cloud import storage class GCPBinaryStorage(BinaryStorage): def __init__(self): self.storage_client = storage.Client() - self.prefix = os.environ.get('EDGE_NAME', '') - self.bucket = self.storage_client.get_bucket(os.getenv('GCP_BUCKET_NAME')) + self.prefix = os.environ.get("EDGE_NAME", "") + self.bucket = self.storage_client.get_bucket(os.getenv("GCP_BUCKET_NAME")) def save_item_binaries(self, item: Item) -> None: for camera_id, binary in item.binaries.items(): diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/memory_binary_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/memory_binary_storage.py index 1bc09a4f..0d7d653d 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/memory_binary_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/binary_storage/memory_binary_storage.py @@ -5,7 +5,6 @@ class MemoryBinaryStorage(BinaryStorage): - def __init__(self): self.binaries = {} diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/camera/fake_camera.py b/edge_orchestrator/edge_orchestrator/infrastructure/camera/fake_camera.py index cddfef34..2d6237a0 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/camera/fake_camera.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/camera/fake_camera.py @@ -8,20 +8,19 @@ class FakeCamera(Camera): - def __init__(self, id: str, settings: Dict[str, Union[str, Dict]]): super().__init__(id, settings) self.id = id self.settings = settings - self.data_folder_path = Config.ROOT_PATH / 'data' - self.image_extensions = ['*.jpg', '*.png'] + self.data_folder_path = Config.ROOT_PATH / "data" + self.image_extensions = ["*.jpg", "*.png"] def capture(self) -> bytes: random_image_path = self.select_random_image() - return random_image_path.open('rb').read() + return random_image_path.open("rb").read() def select_random_image(self) -> Path: - source = self.data_folder_path / self.settings['source'] + source = self.data_folder_path / self.settings["source"] selected_images = [] for extension in self.image_extensions: selected_images += list(source.glob(extension)) diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/camera/raspberry_pi_camera.py b/edge_orchestrator/edge_orchestrator/infrastructure/camera/raspberry_pi_camera.py index 859db371..2929b6cd 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/camera/raspberry_pi_camera.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/camera/raspberry_pi_camera.py @@ -1,24 +1,23 @@ +import importlib from pathlib import Path from typing import Dict, Union -import importlib from edge_orchestrator.domain.models.camera import Camera class RaspberryPiCamera(Camera): - def __init__(self, id: str, settings: Dict[str, Union[str, Dict]]): super().__init__(id, settings) self.id = id self.settings = settings def capture(self) -> bytes: - pi_camera = getattr(importlib.import_module('picamera'), 'PiCamera') + pi_camera = getattr(importlib.import_module("picamera"), "PiCamera") with pi_camera() as camera: camera.resolution = (640, 640) camera.capture("./test.jpg") img = Path("./test.jpg") - return img.open('rb').read() + return img.open("rb").read() def apply_settings(self, custom_settings: Dict): self.settings = custom_settings diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/camera/usb_camera.py b/edge_orchestrator/edge_orchestrator/infrastructure/camera/usb_camera.py index 71f563dc..6b052ab2 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/camera/usb_camera.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/camera/usb_camera.py @@ -1,26 +1,25 @@ +import subprocess from pathlib import Path from typing import Dict, Union -import subprocess from edge_orchestrator import logger from edge_orchestrator.domain.models.camera import Camera class UsbCamera(Camera): - def __init__(self, id: str, settings: Dict[str, Union[str, Dict]]): super().__init__(id, settings) self.id = id self.settings = settings def capture(self) -> bytes: - resolution = '640x640' - source = self.settings['source'] - img_save_path = f"{source}.jpg".replace('/', '') - cmd = f'fswebcam -r {resolution} -S 3 --jpeg 50 --save {img_save_path} -d {source}' + resolution = "640x640" + source = self.settings["source"] + img_save_path = f"{source}.jpg".replace("/", "") + cmd = f"fswebcam -r {resolution} -S 3 --jpeg 50 --save {img_save_path} -d {source}" cmd_feedback = subprocess.run([cmd], shell=True) logger.info(f"Camera exit code: {cmd_feedback.returncode}") - return Path(img_save_path).open('rb').read() + return Path(img_save_path).open("rb").read() def apply_settings(self, custom_settings: Dict): self.settings = custom_settings diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/inventory/json_inventory.py b/edge_orchestrator/edge_orchestrator/infrastructure/inventory/json_inventory.py index f3b81266..1f1435e9 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/inventory/json_inventory.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/inventory/json_inventory.py @@ -1,31 +1,30 @@ import json from pathlib import Path -from typing import List, Dict, Union +from typing import Dict, List, Union from edge_orchestrator.domain.ports.inventory import Inventory class JsonInventory(Inventory): - def __init__(self, inventory_path: Path): if not inventory_path.exists(): raise FileNotFoundError(f'No inventory file found at "{inventory_path}"') - with open(inventory_path, 'r') as inventory_file: + with open(inventory_path, "r") as inventory_file: content = json.load(inventory_file) - self.cameras = content['cameras'] - self.models = content['models'] - self.camera_rules = content['camera_rules'] - self.item_rules = content['item_rules'] + self.cameras = content["cameras"] + self.models = content["models"] + self.camera_rules = content["camera_rules"] + self.item_rules = content["item_rules"] def get_cameras(self) -> List[str]: - return self.inventory['cameras'] + return self.inventory["cameras"] def get_models(self) -> Dict[str, Dict[str, Union[str, int]]]: - return self.inventory['models'] + return self.inventory["models"] def get_camera_rules(self) -> List[str]: - return self.inventory['camera_rules'] + return self.inventory["camera_rules"] def get_item_rules(self) -> List[str]: - return self.inventory['item_rules'] + return self.inventory["item_rules"] diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/azure_container_metadata_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/azure_container_metadata_storage.py index 4666155c..a755e947 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/azure_container_metadata_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/azure_container_metadata_storage.py @@ -12,24 +12,34 @@ class AzureContainerMetadataStorage(MetadataStorage): def __init__(self): - self.azure_container_name = os.getenv('AZURE_CONTAINER_NAME') - az_storage_connection_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING') - self._blob_service_client = BlobServiceClient.from_connection_string(az_storage_connection_str) + self.azure_container_name = os.getenv("AZURE_CONTAINER_NAME") + az_storage_connection_str = os.getenv("AZURE_STORAGE_CONNECTION_STRING") + self._blob_service_client = BlobServiceClient.from_connection_string( + az_storage_connection_str + ) try: self._blob_service_client.create_container(self.azure_container_name) except ResourceExistsError: pass - self._container_client = self._blob_service_client.get_container_client(self.azure_container_name) - self._transport_params = {'client': self._blob_service_client} + self._container_client = self._blob_service_client.get_container_client( + self.azure_container_name + ) + self._transport_params = {"client": self._blob_service_client} def save_item_metadata(self, item: Item): - with open(f'azure://{self.azure_container_name}/{item.id}/metadata.json', - 'wb', transport_params=self._transport_params) as f: - f.write(json.dumps(item.get_metadata()).encode('utf-8')) + with open( + f"azure://{self.azure_container_name}/{item.id}/metadata.json", + "wb", + transport_params=self._transport_params, + ) as f: + f.write(json.dumps(item.get_metadata()).encode("utf-8")) def get_item_metadata(self, item_id: str) -> Dict: - with open(f'azure://{self.azure_container_name}/{item_id}/metadata.json', - 'rb', transport_params=self._transport_params) as f: + with open( + f"azure://{self.azure_container_name}/{item_id}/metadata.json", + "rb", + transport_params=self._transport_params, + ) as f: return json.loads(f.read()) def get_item_state(self, item_id: str) -> str: @@ -39,9 +49,12 @@ def get_item_state(self, item_id: str) -> str: def get_all_items_metadata(self) -> List[Dict]: metadata = [] for blob in self._container_client.list_blobs(): - if 'metadata.json' in blob['name']: - with open(f'azure://{self.azure_container_name}/{blob["name"]}', - 'rb', transport_params=self._transport_params) as f: + if "metadata.json" in blob["name"]: + with open( + f'azure://{self.azure_container_name}/{blob["name"]}', + "rb", + transport_params=self._transport_params, + ) as f: metadata.append(json.load(f)) metadata.append(json.load(f)) return metadata diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/filesystem_metadata_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/filesystem_metadata_storage.py index 31fbccd4..f052fd5e 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/filesystem_metadata_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/filesystem_metadata_storage.py @@ -13,12 +13,12 @@ def __init__(self, src_directory_path: Path): def save_item_metadata(self, item: Item): (self.folder / item.id).mkdir(parents=True, exist_ok=True) filepath = _get_filepath(self.folder, item.id) - with filepath.open('w') as f: + with filepath.open("w") as f: json.dump(item.get_metadata(), f) def get_item_metadata(self, item_id: str) -> Dict: filepath = _get_filepath(self.folder, item_id) - with filepath.open('r') as f: + with filepath.open("r") as f: item_metadata = json.load(f) return item_metadata @@ -28,12 +28,12 @@ def get_item_state(self, item_id: str) -> str: def get_all_items_metadata(self) -> List[Dict]: metadata = [] - for metadata_path in self.folder.glob('**/metadata.json'): - with metadata_path.open('r') as f: + for metadata_path in self.folder.glob("**/metadata.json"): + with metadata_path.open("r") as f: metadata_item = json.load(f) metadata.append(metadata_item) return metadata -def _get_filepath(folder: Path, item_id: str, filename: str = 'metadata.json') -> Path: +def _get_filepath(folder: Path, item_id: str, filename: str = "metadata.json") -> Path: return folder / item_id / filename diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/gcp_metadata_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/gcp_metadata_storage.py index 22e22373..aaa80559 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/gcp_metadata_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/gcp_metadata_storage.py @@ -1,6 +1,7 @@ -import os import json +import os from typing import Dict, List + from google.cloud import storage from edge_orchestrator.domain.models.item import Item @@ -10,14 +11,12 @@ class GCPMetadataStorage(MetadataStorage): def __init__(self): self.storage_client = storage.Client() - self.prefix = os.environ.get('EDGE_NAME', '') - self.bucket = self.storage_client.get_bucket(os.getenv('GCP_BUCKET_NAME')) + self.prefix = os.environ.get("EDGE_NAME", "") + self.bucket = self.storage_client.get_bucket(os.getenv("GCP_BUCKET_NAME")) def save_item_metadata(self, item: Item): item_metadata = json.dumps(item.get_metadata()) - blob = self.bucket.blob( - os.path.join(self.prefix, item.id, "metadata.json") - ) + blob = self.bucket.blob(os.path.join(self.prefix, item.id, "metadata.json")) blob.upload_from_string(item_metadata, content_type="application/json") def get_item_metadata(self, item_id: str) -> Dict: diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/mongodb_metadata_storage.py b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/mongodb_metadata_storage.py index 165f7f9e..28e27a21 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/mongodb_metadata_storage.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/metadata_storage/mongodb_metadata_storage.py @@ -1,5 +1,7 @@ -from typing import List, Dict +from typing import Dict, List + import pymongo + from edge_orchestrator.domain.models.item import Item from edge_orchestrator.domain.ports.metadata_storage import MetadataStorage @@ -7,24 +9,26 @@ class MongoDbMetadataStorage(MetadataStorage): def __init__(self, mongodb_uri: str): self.client = pymongo.MongoClient(mongodb_uri) - self.db = self.client['orchestratorDB'] - self.items_metadata = self.db['items'] + self.db = self.client["orchestratorDB"] + self.items_metadata = self.db["items"] def save_item_metadata(self, item: Item): - self.items_metadata.update_one({'_id': item.id}, {'$set': item.get_metadata(False)}, upsert=True) + self.items_metadata.update_one( + {"_id": item.id}, {"$set": item.get_metadata(False)}, upsert=True + ) def get_item_metadata(self, item_id: str) -> Dict: - mongo_output = self.items_metadata.find_one({'_id': item_id}) - mongo_output['id'] = mongo_output.pop('_id') + mongo_output = self.items_metadata.find_one({"_id": item_id}) + mongo_output["id"] = mongo_output.pop("_id") return mongo_output def get_item_state(self, item_id: str) -> str: - item = self.items_metadata.find_one({'_id': item_id}) + item = self.items_metadata.find_one({"_id": item_id}) return item["state"] def get_all_items_metadata(self) -> List[Dict]: items = [] for item in self.items_metadata.find(): - item['id'] = item.pop('_id') + item["id"] = item.pop("_id") items.append(item) return items diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/fake_model_forward.py b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/fake_model_forward.py index a04c42c2..33600d6c 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/fake_model_forward.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/fake_model_forward.py @@ -1,48 +1,50 @@ import random from typing import Dict + import numpy as np from edge_orchestrator.domain.models.model_infos import ModelInfos, ModelTypes -from edge_orchestrator.domain.ports.model_forward import ModelForward, Labels +from edge_orchestrator.domain.ports.model_forward import Labels, ModelForward class FakeModelForward(ModelForward): - async def perform_inference(self, model: ModelInfos, binary_data: bytes, binary_name: str) -> Dict: + async def perform_inference( + self, model: ModelInfos, binary_data: bytes, binary_name: str + ) -> Dict: inference_output = {} if model.category == ModelTypes.CLASSIFICATION.value: inference_output = { - binary_name: - { - 'label': random.choice([Labels.OK.value, Labels.KO.value]), - 'probability': np.random.uniform(0, 1) - } + binary_name: { + "label": random.choice([Labels.OK.value, Labels.KO.value]), + "probability": np.random.uniform(0, 1), + } } elif model.category == ModelTypes.OBJECT_DETECTION.value: inference_output = { - f'{binary_name}_object_1': { - 'location': [4, 112, 244, 156], - 'objectness': np.random.uniform(0, 1), + f"{binary_name}_object_1": { + "location": [4, 112, 244, 156], + "objectness": np.random.uniform(0, 1), + }, + f"{binary_name}_object_2": { + "location": [4, 112, 244, 156], + "objectness": np.random.uniform(0, 1), }, - f'{binary_name}_object_2': { - 'location': [4, 112, 244, 156], - 'objectness': np.random.uniform(0, 1) - } } elif model.category == ModelTypes.OBJECT_DETECTION_WITH_CLASSIFICATION.value: inference_output = { - f'{binary_name}_object_1': { - 'location': [4, 112, 244, 156], - 'objectness': np.random.uniform(0, 1), - 'label': random.choice([Labels.OK.value, Labels.KO.value]), - 'probability': np.random.uniform(0, 1) + f"{binary_name}_object_1": { + "location": [4, 112, 244, 156], + "objectness": np.random.uniform(0, 1), + "label": random.choice([Labels.OK.value, Labels.KO.value]), + "probability": np.random.uniform(0, 1), + }, + f"{binary_name}_object_2": { + "location": [4, 112, 244, 156], + "objectness": np.random.uniform(0, 1), + "label": random.choice([Labels.OK.value, Labels.KO.value]), + "probability": np.random.uniform(0, 1), }, - f'{binary_name}_object_2': { - 'location': [4, 112, 244, 156], - 'objectness': np.random.uniform(0, 1), - 'label': random.choice([Labels.OK.value, Labels.KO.value]), - 'probability': np.random.uniform(0, 1) - } } return inference_output diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_classification_wrapper.py b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_classification_wrapper.py index ddf5d0fc..09d6b191 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_classification_wrapper.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_classification_wrapper.py @@ -10,44 +10,54 @@ class TFServingClassificationWrapper(ModelForward): - def __init__(self, base_url): self.base_url = base_url self.class_names = None - async def perform_inference(self, model: ModelInfos, binary_data: bytes, binary_name: str) -> dict: + async def perform_inference( + self, model: ModelInfos, binary_data: bytes, binary_name: str + ) -> dict: self.class_names = model.class_names processed_img = self.perform_pre_processing(model, binary_data) - payload = {'inputs': processed_img.tolist()} - model_url = f'{self.base_url}/v1/models/{model.name}/versions/{model.version}:predict' - logger.info(f'Getting prediction using: {model_url}') + payload = {"inputs": processed_img.tolist()} + model_url = ( + f"{self.base_url}/v1/models/{model.name}/versions/{model.version}:predict" + ) + logger.info(f"Getting prediction using: {model_url}") try: async with aiohttp.ClientSession() as session: async with session.post(model_url, json=payload) as response: json_data = await response.json() - logger.info('json DONE') - inference_output = self.perform_post_processing(model, json_data['outputs'], binary_name) + logger.info("json DONE") + inference_output = self.perform_post_processing( + model, json_data["outputs"], binary_name + ) return inference_output except Exception as e: logger.exception(e) - inference_output = 'NO_DECISION' + inference_output = "NO_DECISION" return inference_output def perform_pre_processing(self, model: ModelInfos, binary: bytes): - data = np.ndarray(shape=(1, model.image_resolution[0], model.image_resolution[1], 3), dtype=np.float32) + data = np.ndarray( + shape=(1, model.image_resolution[0], model.image_resolution[1], 3), + dtype=np.float32, + ) img = Image.open(io.BytesIO(binary)) - img = ImageOps.fit(img, (model.image_resolution[0], model.image_resolution[1]), Image.ANTIALIAS) + img = ImageOps.fit( + img, (model.image_resolution[0], model.image_resolution[1]), Image.ANTIALIAS + ) img_array = np.asarray(img) normalized_image_array = (img_array.astype(np.float32) / 127.0) - 1 data[0] = normalized_image_array return data - def perform_post_processing(self, model: ModelInfos, json_outputs: list, binary_name: str) -> dict: - + def perform_post_processing( + self, model: ModelInfos, json_outputs: list, binary_name: str + ) -> dict: return { - binary_name: - { - 'label': model.class_names[np.argmax(json_outputs[0])], - 'probability': float(np.max(json_outputs[0])) - } + binary_name: { + "label": model.class_names[np.argmax(json_outputs[0])], + "probability": float(np.max(json_outputs[0])), + } } diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_and_classification_wrapper.py b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_and_classification_wrapper.py index c136ac24..474c3c67 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_and_classification_wrapper.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_and_classification_wrapper.py @@ -12,28 +12,31 @@ class TFServingDetectionClassificationWrapper(ModelForward): - def __init__(self, base_url, class_names_path: Path, image_shape=None): self.base_url = base_url self.image_shape = image_shape self.class_names_path = class_names_path - async def perform_inference(self, model: ModelInfos, binary_data: bytes, binary_name: str) -> Dict[str, Dict]: + async def perform_inference( + self, model: ModelInfos, binary_data: bytes, binary_name: str + ) -> Dict[str, Dict]: processed_img = self.perform_pre_processing(binary_data) - logger.debug(f'Processed image size: {processed_img.shape}') - payload = {'inputs': processed_img.tolist()} - model_url = f'{self.base_url}/v1/models/{model.name}/versions/{model.version}:predict' + logger.debug(f"Processed image size: {processed_img.shape}") + payload = {"inputs": processed_img.tolist()} + model_url = ( + f"{self.base_url}/v1/models/{model.name}/versions/{model.version}:predict" + ) try: async with aiohttp.ClientSession() as session: async with session.post(model_url, json=payload) as response: json_data = await response.json() - logger.info('json DONE') - inference_output = self.perform_post_processing(model, json_data['outputs']) + logger.info("json DONE") + inference_output = self.perform_post_processing(model, json_data["outputs"]) return inference_output except Exception as e: logger.exception(e) - inference_output = 'NO_DECISION' + inference_output = "NO_DECISION" return inference_output def perform_pre_processing(self, binary: bytes): @@ -49,32 +52,38 @@ def perform_post_processing(self, model: ModelInfos, json_outputs: dict) -> dict boxes_coordinates, objectness_scores, detection_classes = ( json_outputs[model.boxes_coordinates][0], json_outputs[model.objectness_scores][0], - json_outputs[model.detection_classes][0]) + json_outputs[model.detection_classes][0], + ) try: class_names = [c.strip() for c in open(self.class_names_path).readlines()] except Exception as e: logger.exception(e) - logger.info('cannot open class names files at location {}'.format(self.class_names_path)) + logger.info( + "cannot open class names files at location {}".format( + self.class_names_path + ) + ) for box_index, box_coordinates_in_current_image in enumerate(boxes_coordinates): # crop_image expects the box coordinates to be (xmin, ymin, xmax, ymax) # Mobilenet returns the coordinates as (ymin, xmin, ymax, xmax) # Hence, the switch here - box_coordinates_in_current_image = [int(box_coordinates_in_current_image[1] * self.image_shape[1]), - int(box_coordinates_in_current_image[0] * self.image_shape[0]), - int(box_coordinates_in_current_image[3] * self.image_shape[1]), - int(box_coordinates_in_current_image[2] * self.image_shape[0]) - ] + box_coordinates_in_current_image = [ + int(box_coordinates_in_current_image[1] * self.image_shape[1]), + int(box_coordinates_in_current_image[0] * self.image_shape[0]), + int(box_coordinates_in_current_image[3] * self.image_shape[1]), + int(box_coordinates_in_current_image[2] * self.image_shape[0]), + ] box_objectness_score_in_current_image = objectness_scores[box_index] boxes_detected_in_current_image_labels = detection_classes[box_index] if box_objectness_score_in_current_image >= model.objectness_threshold: - inference_output[f'object_{box_index + 1}'] = { - 'location': box_coordinates_in_current_image, - 'score': box_objectness_score_in_current_image, - 'label': class_names[int(boxes_detected_in_current_image_labels)] + inference_output[f"object_{box_index + 1}"] = { + "location": box_coordinates_in_current_image, + "score": box_objectness_score_in_current_image, + "label": class_names[int(boxes_detected_in_current_image_labels)], } return inference_output diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_wrapper.py b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_wrapper.py index 61ea10f0..5b6d0b26 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_wrapper.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_detection_wrapper.py @@ -12,35 +12,40 @@ class TFServingDetectionWrapper(ModelForward): - def __init__(self, base_url, class_names_path: Path, image_shape=None): self.base_url = base_url self.class_names_path = class_names_path self.image_shape = image_shape - async def perform_inference(self, model: ModelInfos, binary_data: bytes, binary_name: str) -> Dict[str, Dict]: + async def perform_inference( + self, model: ModelInfos, binary_data: bytes, binary_name: str + ) -> Dict[str, Dict]: processed_img = self.perform_pre_processing(model, binary_data) - logger.debug(f'Processed image size: {processed_img.shape}') - payload = {'inputs': processed_img.tolist()} - model_url = f'{self.base_url}/v1/models/{model.name}/versions/{model.version}:predict' - logger.info(f'Get prediction at {model_url}') + logger.debug(f"Processed image size: {processed_img.shape}") + payload = {"inputs": processed_img.tolist()} + model_url = ( + f"{self.base_url}/v1/models/{model.name}/versions/{model.version}:predict" + ) + logger.info(f"Get prediction at {model_url}") try: async with aiohttp.ClientSession() as session: async with session.post(model_url, json=payload) as response: json_data = await response.json() - logger.debug(f'response received {json_data}') - inference_output = self.perform_post_processing(model, json_data['outputs']) + logger.debug(f"response received {json_data}") + inference_output = self.perform_post_processing(model, json_data["outputs"]) return inference_output except Exception as e: logger.exception(e) - inference_output = 'NO_DECISION' + inference_output = "NO_DECISION" return inference_output def perform_pre_processing(self, model: ModelInfos, binary: bytes): img = Image.open(io.BytesIO(binary)) img_array = np.asarray(img) self.image_shape = img_array.shape[:2] - resized_image = img.resize((model.image_resolution[0], model.image_resolution[1]), Image.ANTIALIAS) + resized_image = img.resize( + (model.image_resolution[0], model.image_resolution[1]), Image.ANTIALIAS + ) img = np.expand_dims(resized_image, axis=0).astype(np.uint8) return img @@ -52,18 +57,26 @@ def perform_post_processing(self, model: ModelInfos, json_outputs: dict) -> dict boxes_coordinates, objectness_scores, detection_classes = ( json_outputs[model.boxes_coordinates][0], json_outputs[model.objectness_scores][0], - json_outputs[model.detection_classes][0] + json_outputs[model.detection_classes][0], ) try: class_names = [c.strip() for c in open(self.class_names_path).readlines()] except Exception as e: logger.exception(e) - logger.info('cannot open class names files at location {}'.format(self.class_names_path)) + logger.info( + "cannot open class names files at location {}".format( + self.class_names_path + ) + ) for class_to_detect in model.class_to_detect: - class_to_detect_position = np.where(np.array(class_names) == class_to_detect) - detection_class_positions = np.where(np.array(detection_classes) == float(class_to_detect_position[0] + 1)) + class_to_detect_position = np.where( + np.array(class_names) == class_to_detect + ) + detection_class_positions = np.where( + np.array(detection_classes) == float(class_to_detect_position[0] + 1) + ) for box_index in detection_class_positions[0]: box_coordinates_in_current_image = boxes_coordinates[box_index] @@ -80,12 +93,14 @@ def perform_post_processing(self, model: ModelInfos, json_outputs: dict) -> dict box_coordinates_in_current_image = [x_min, y_min, x_max, y_max] box_objectness_score_in_current_image = objectness_scores[box_index] - logger.debug(f"box_coordinates_in_current_image: {box_coordinates_in_current_image}") + logger.debug( + f"box_coordinates_in_current_image: {box_coordinates_in_current_image}" + ) if box_objectness_score_in_current_image >= model.objectness_threshold: - inference_output[f'object_{box_index + 1}'] = { - 'label': class_to_detect, - 'location': box_coordinates_in_current_image, - 'score': box_objectness_score_in_current_image + inference_output[f"object_{box_index + 1}"] = { + "label": class_to_detect, + "location": box_coordinates_in_current_image, + "score": box_objectness_score_in_current_image, } return inference_output diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_wrapper.py b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_wrapper.py index 7f776635..962cbdc6 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_wrapper.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/model_forward/tf_serving_wrapper.py @@ -4,12 +4,12 @@ from edge_orchestrator.infrastructure.model_forward.tf_serving_classification_wrapper import ( TFServingClassificationWrapper, ) +from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_and_classification_wrapper import ( + TFServingDetectionClassificationWrapper, +) from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_wrapper import ( TFServingDetectionWrapper, ) -from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_and_classification_wrapper import ( - TFServingDetectionClassificationWrapper -) class TFServingWrapper(ModelForward): @@ -18,19 +18,21 @@ def __init__(self, serving_model_url, inventory, station_config): self.inventory = inventory self.station_config = station_config - async def perform_inference(self, - model: ModelInfos, - binary_data: bytes, - binary_name: str) -> dict: + async def perform_inference( + self, model: ModelInfos, binary_data: bytes, binary_name: str + ) -> dict: if model.category == ModelTypes.CLASSIFICATION.value: - return await TFServingClassificationWrapper(self.serving_model_url) \ - .perform_inference(model, binary_data, binary_name) + return await TFServingClassificationWrapper( + self.serving_model_url + ).perform_inference(model, binary_data, binary_name) elif model.category == ModelTypes.OBJECT_DETECTION.value: - return await TFServingDetectionWrapper(self.serving_model_url, model.class_names_path) \ - .perform_inference(model, binary_data, binary_name) + return await TFServingDetectionWrapper( + self.serving_model_url, model.class_names_path + ).perform_inference(model, binary_data, binary_name) elif model.category == ModelTypes.OBJECT_DETECTION_WITH_CLASSIFICATION.value: - return await TFServingDetectionClassificationWrapper(self.serving_model_url, model.class_names_path)\ - .perform_inference(model, binary_data, binary_name) + return await TFServingDetectionClassificationWrapper( + self.serving_model_url, model.class_names_path + ).perform_inference(model, binary_data, binary_name) else: logger.error( f"Enter a valid model category, model category entered and invalid : {model.category}" diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/station_config/json_station_config.py b/edge_orchestrator/edge_orchestrator/infrastructure/station_config/json_station_config.py index 6de0fe1f..035dae50 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/station_config/json_station_config.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/station_config/json_station_config.py @@ -1,7 +1,7 @@ -import os import json +import os from pathlib import Path -from typing import List, Dict, Union, Type +from typing import Dict, List, Type, Union from edge_orchestrator import logger from edge_orchestrator.domain.models.camera import Camera @@ -9,32 +9,37 @@ from edge_orchestrator.domain.ports.inventory import Inventory from edge_orchestrator.domain.ports.station_config import StationConfig from edge_orchestrator.infrastructure.camera.fake_camera import FakeCamera -from edge_orchestrator.infrastructure.camera.raspberry_pi_camera import RaspberryPiCamera +from edge_orchestrator.infrastructure.camera.raspberry_pi_camera import ( + RaspberryPiCamera, +) from edge_orchestrator.infrastructure.camera.usb_camera import UsbCamera class JsonStationConfig(StationConfig): - - def __init__(self, station_configs_folder: Path, inventory: Inventory, data_folder: Path): + def __init__( + self, station_configs_folder: Path, inventory: Inventory, data_folder: Path + ): self.inventory = inventory self.data_folder = data_folder if not station_configs_folder.exists(): - raise FileNotFoundError(f'No station config folder found at "{station_configs_folder}"') + raise FileNotFoundError( + f'No station config folder found at "{station_configs_folder}"' + ) self.station_configs_folder = station_configs_folder self.all_configs = {} self.load() self.active_config = None - config_name = os.environ.get('ACTIVE_CONFIG_NAME', None) + config_name = os.environ.get("ACTIVE_CONFIG_NAME", None) if config_name is not None: self.set_station_config(config_name) def load(self): self.all_configs = {} for config in self.station_configs_folder.glob("*.json"): - with open(config, 'r') as station_config_file: + with open(config, "r") as station_config_file: content = json.load(station_config_file) self.all_configs[config.with_suffix("").name] = content self._check_station_config_based_on_inventory(content) @@ -45,59 +50,66 @@ def set_station_config(self, config_name: str): self.active_config = self.all_configs[self.active_config_name] logger.info(f"Activated the configuration {self.active_config_name}") except KeyError: - raise KeyError(f'{config_name} is unknown. Valid configs are {list(self.all_configs.keys())}') + raise KeyError( + f"{config_name} is unknown. Valid configs are {list(self.all_configs.keys())}" + ) def get_model_pipeline_for_camera(self, camera_id: str) -> List[ModelInfos]: model_pipeline = [] - model_pipeline_config = self.active_config['cameras'].get(camera_id)['models_graph'] + model_pipeline_config = self.active_config["cameras"].get(camera_id)[ + "models_graph" + ] if model_pipeline_config: for model_id, model in model_pipeline_config.items(): model_infos = ModelInfos.from_model_graph_node( - camera_id, model_id, model, self.inventory, self.data_folder) + camera_id, model_id, model, self.inventory, self.data_folder + ) model_pipeline.append(model_infos) else: logger.info(f'No models found for camera "{camera_id}"') return model_pipeline def get_cameras(self) -> List[str]: - return list(self.active_config['cameras'].keys()) + return list(self.active_config["cameras"].keys()) def get_camera_type(self, camera_id: str) -> Type[Camera]: - camera_config = self.active_config['cameras'].get(camera_id) - if camera_config['type'] == 'fake': + camera_config = self.active_config["cameras"].get(camera_id) + if camera_config["type"] == "fake": return FakeCamera - elif camera_config['type'] == 'pi_camera': + elif camera_config["type"] == "pi_camera": return RaspberryPiCamera - elif camera_config['type'] == 'usb_camera': + elif camera_config["type"] == "usb_camera": return UsbCamera else: raise ValueError(f"Camera type ({camera_config['type']}) is not supported.") def get_camera_settings(self, camera_id: str) -> Dict[str, Union[str, int]]: camera_settings = {} - camera_config = self.active_config['cameras'].get(camera_id) + camera_config = self.active_config["cameras"].get(camera_id) if camera_config: - camera_settings['brightness'] = camera_config.get('brightness') - camera_settings['exposition'] = camera_config.get('exposition') - camera_settings['position'] = camera_config.get('position') - camera_settings['source'] = camera_config.get('source') + camera_settings["brightness"] = camera_config.get("brightness") + camera_settings["exposition"] = camera_config.get("exposition") + camera_settings["position"] = camera_config.get("position") + camera_settings["source"] = camera_config.get("source") return camera_settings def _check_station_config_based_on_inventory(self, content): self._check_business_rule(content, "station") - for camera_id, camera_conf in content['cameras'].items(): + for camera_id, camera_conf in content["cameras"].items(): camera_type = camera_conf["type"] if camera_type not in self.inventory.cameras: - raise ValueError(f'Camera type {camera_type} is not supported.') + raise ValueError(f"Camera type {camera_type} is not supported.") self._check_business_rule(camera_conf, "camera") for model_id, model_conf in camera_conf["models_graph"].items(): model = model_conf["metadata"] if model not in self.inventory.models: - raise ValueError(f'Model type {model} is not supported.') + raise ValueError(f"Model type {model} is not supported.") self._check_business_rule(model_conf, "model") def _check_business_rule(self, conf: Dict, conf_level: str): if "business_rule" in conf: business_rule = conf["business_rule"] if business_rule not in self.inventory.business_rules: - raise ValueError(f'{conf_level.capitalize()} business rule ({business_rule}) is not supported.') + raise ValueError( + f"{conf_level.capitalize()} business rule ({business_rule}) is not supported." + ) diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/azure_iot_hub_telemetry_sink.py b/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/azure_iot_hub_telemetry_sink.py index 9b0c2e86..121057f7 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/azure_iot_hub_telemetry_sink.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/azure_iot_hub_telemetry_sink.py @@ -1,13 +1,13 @@ import json from typing import Dict -from edge_orchestrator.domain.ports.telemetry_sink import TelemetrySink -from azure.iot.device.aio import IoTHubModuleClient from azure.iot.device import Message +from azure.iot.device.aio import IoTHubModuleClient +from edge_orchestrator.domain.ports.telemetry_sink import TelemetrySink -class AzureIotHubTelemetrySink(TelemetrySink): +class AzureIotHubTelemetrySink(TelemetrySink): def __init__(self): self.client = IoTHubModuleClient.create_from_edge_environment() diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/fake_telemetry_sink.py b/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/fake_telemetry_sink.py index 73140783..71e4d68f 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/fake_telemetry_sink.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/fake_telemetry_sink.py @@ -2,6 +2,5 @@ class FakeTelemetrySink(TelemetrySink): - async def send(self, message: str): pass diff --git a/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/postgresql_telemetry_sink.py b/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/postgresql_telemetry_sink.py index 4e45c038..b61d0924 100644 --- a/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/postgresql_telemetry_sink.py +++ b/edge_orchestrator/edge_orchestrator/infrastructure/telemetry_sink/postgresql_telemetry_sink.py @@ -1,24 +1,23 @@ +import time from datetime import datetime from random import randrange +from typing import Dict from urllib.parse import urlparse from uuid import uuid4 import psycopg2 -import time -from typing import Dict from edge_orchestrator import logger from edge_orchestrator.domain.ports.telemetry_sink import TelemetrySink class PostgresTelemetrySink(TelemetrySink): - def __init__(self, connection_url: str, timeout: int = 30, interval: int = 2): self.connection_url = connection_url self._connection = None self._timeout = timeout self._interval = interval - self._device_id = f'device_{randrange(42)}' + self._device_id = f"device_{randrange(42)}" @property def connection(self): @@ -26,41 +25,65 @@ def connection(self): return self._connection result = urlparse(self.connection_url) - username, password, hostname, port = result.username, result.password, result.hostname, result.port + username, password, hostname, port = ( + result.username, + result.password, + result.hostname, + result.port, + ) database = result.path[1:] nb_retry = self._timeout // self._interval for i in range(nb_retry): try: - self._connection = psycopg2.connect(dbname=database, user=username, password=password, - host=hostname, port=port) - logger.debug(f'Telemetry Postgres DB took ‘{i * self._interval}‘sec to start and be migrated') + self._connection = psycopg2.connect( + dbname=database, + user=username, + password=password, + host=hostname, + port=port, + ) + logger.debug( + f"Telemetry Postgres DB took ‘{i * self._interval}‘sec to start and be migrated" + ) return self._connection except psycopg2.OperationalError: time.sleep(self._interval) else: - raise TimeoutError(f'Unable to connect to Telemetry Postgres DB using {self.connection_url} after {self._timeout:.0f} seconds') # noqa + raise TimeoutError( + f"Unable to connect to Telemetry Postgres DB using {self.connection_url} after " + f"{self._timeout:.0f} seconds" + ) async def send(self, message: Dict): try: _id = uuid4().__str__() device_id = self._device_id - decision = message['decision'] + decision = message["decision"] timestamp = datetime.now() - item_id = message['item_id'] - config = message['config'] + item_id = message["item_id"] + config = message["config"] self._insert_message(_id, device_id, decision, timestamp, item_id, config) except psycopg2.DatabaseError as e: - logger.error(f'Message was not correctly inserted into telemetry table : {e}') + logger.error( + f"Message was not correctly inserted into telemetry table : {e}" + ) - def _insert_message(self, _id: str, device_id: str, decision: str, timestamp: datetime, item_id: str, - config: str): + def _insert_message( + self, + _id: str, + device_id: str, + decision: str, + timestamp: datetime, + item_id: str, + config: str, + ): with self.connection.cursor() as curs: curs.execute( - 'INSERT INTO iothub.telemetry ' - '(id, device_id, business_decision, timestamp, item_id, config) VALUES (%s, %s, %s, %s, %s, %s)', - (_id, device_id, decision, timestamp, item_id, config) + "INSERT INTO iothub.telemetry " + "(id, device_id, business_decision, timestamp, item_id, config) VALUES (%s, %s, %s, %s, %s, %s)", + (_id, device_id, decision, timestamp, item_id, config), ) self.connection.commit() - logger.warning(f'Telemetry message for item ‘{item_id}‘ stored with id ‘{_id}‘') + logger.warning(f"Telemetry message for item ‘{item_id}‘ stored with id ‘{_id}‘") diff --git a/edge_orchestrator/setup.cfg b/edge_orchestrator/setup.cfg index ab613840..db47cce0 100644 --- a/edge_orchestrator/setup.cfg +++ b/edge_orchestrator/setup.cfg @@ -13,8 +13,9 @@ exclude = .venv max-line-length = 120 inline-quotes = single -multiline-quotes = ''' +multiline-quotes = """ avoid-escape = True +extend-ignore = E203, W503 [tool:pytest] testpaths = tests/unit_tests diff --git a/edge_orchestrator/setup.py b/edge_orchestrator/setup.py index 30042dca..d97b7a85 100644 --- a/edge_orchestrator/setup.py +++ b/edge_orchestrator/setup.py @@ -1,60 +1,61 @@ from setuptools import setup, find_packages -long_description = "The edge_orchestrator orchestrates the following steps as soon as it is triggered: " \ - "image capture, image backup, metadata backup, model inference on images and saving results." +long_description = ( + "The edge_orchestrator orchestrates the following steps as soon as it is triggered: " + "image capture, image backup, metadata backup, model inference on images and saving results." +) setup( - name='edge_orchestrator', - version='0.2.0', - author='ROLO, BAPO, KSA, YDR', - author_email='rolo@octo.com', - description='vio-edge: orchestrator module', + name="edge_orchestrator", + version="0.2.0", + author="ROLO, BAPO, KSA, YDR", + author_email="rolo@octo.com", + description="vio-edge: orchestrator module", long_description=long_description, - long_description_content_type='text/markdown', - url='https://github.com/octo-technology/VIO', + long_description_content_type="text/markdown", + url="https://github.com/octo-technology/VIO", classifiers=[ - 'Programming Language :: Python :: 3', - 'License :: OSI Approved :: Apache2.0', - 'Operating System :: OS Independent', + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache2.0", + "Operating System :: OS Independent", ], - packages=find_packages(exclude=['tests*', 'data']), - package_data={'edge_orchestrator': ['logger.cfg']}, + packages=find_packages(exclude=["tests*", "data"]), + package_data={"edge_orchestrator": ["logger.cfg"]}, # data_files=[('config', ['config/inventory.json'])], install_requires=[ - 'aiohttp==3.8.3', - 'azure-iot-device==2.12.0', - 'fastapi==0.80.0', - 'numpy==1.24.1', - 'Pillow==9.3.0', - 'psycopg2-binary==2.9.5', - 'pymongo==4.3.3', - 'uvicorn==0.20.0', - 'smart_open[azure]==6.3.0', - 'google-cloud-storage==2.2.1', - 'python-multipart==0.0.5' + "aiohttp==3.8.3", + "azure-iot-device==2.12.0", + "fastapi==0.80.0", + "numpy==1.24.1", + "Pillow==9.3.0", + "psycopg2-binary==2.9.5", + "pymongo==4.3.3", + "uvicorn==0.20.0", + "smart_open[azure]==6.3.0", + "google-cloud-storage==2.2.1", + "python-multipart==0.0.5", ], extras_require={ - 'dev': [ - 'alembic==1.9.2', - 'autopep8==2.0.1', - 'behave==1.2.6', - 'flake8==6.0.0', - 'freezegun==1.2.2', - 'pytest==7.2.1', - 'pytest-asyncio==0.20.3', - 'pytest-cov==4.0.0', - 'python-dotenv==0.21.1', - 'requests[security]==2.28.2', - 'testcontainers==3.7.1' + "dev": [ + "alembic==1.9.2", + "autopep8==2.0.1", + "behave==1.2.6", + "black==23.3.0", + "flake8==6.0.0", + "freezegun==1.2.2", + "pytest==7.2.1", + "pytest-asyncio==0.20.3", + "pytest-cov==4.0.0", + "python-dotenv==0.21.1", + "requests[security]==2.28.2", + "testcontainers==3.7.1", ], - 'raspberry': [ - 'picamera==1.13' - ] + "raspberry": ["picamera==1.13"], }, - python_requires='>=3.8.5', + python_requires=">=3.8.5", entry_points={ - 'console_scripts': [ - 'edge_orchestrator = edge_orchestrator.__main__:main', + "console_scripts": [ + "edge_orchestrator = edge_orchestrator.__main__:main", ], }, ) diff --git a/edge_orchestrator/tests/conftest.py b/edge_orchestrator/tests/conftest.py index ef0f3101..85598cbf 100644 --- a/edge_orchestrator/tests/conftest.py +++ b/edge_orchestrator/tests/conftest.py @@ -1,22 +1,31 @@ import os from pathlib import Path -os.environ['API_CONFIG'] = 'test' -TEST_DATA_FOLDER_PATH = Path(__file__).parent / 'data' -TEST_CONFIG_FOLDER_PATH = Path(__file__).parent / 'config' -TEST_STATION_CONFIGS_FOLDER_PATH = TEST_CONFIG_FOLDER_PATH / 'station_configs' -TEST_STATION_CONFIG_PATH = TEST_STATION_CONFIGS_FOLDER_PATH / 'station_config_TEST.json' -TEST_STATION_CONFIG_2_PATH = TEST_STATION_CONFIGS_FOLDER_PATH / 'station_config_TEST2.json' -TEST_INVENTORY_PATH = TEST_CONFIG_FOLDER_PATH / 'inventory_TEST.json' +os.environ["API_CONFIG"] = "test" +TEST_DATA_FOLDER_PATH = Path(__file__).parent / "data" +TEST_CONFIG_FOLDER_PATH = Path(__file__).parent / "config" +TEST_STATION_CONFIGS_FOLDER_PATH = TEST_CONFIG_FOLDER_PATH / "station_configs" +TEST_STATION_CONFIG_PATH = TEST_STATION_CONFIGS_FOLDER_PATH / "station_config_TEST.json" +TEST_STATION_CONFIG_2_PATH = ( + TEST_STATION_CONFIGS_FOLDER_PATH / "station_config_TEST2.json" +) +TEST_INVENTORY_PATH = TEST_CONFIG_FOLDER_PATH / "inventory_TEST.json" ROOT_REPOSITORY_PATH = Path(__file__).parents[2] -pytest_plugins = ['tests.fixtures.binaries', 'tests.fixtures.cameras_metadata', - 'tests.fixtures.items', 'tests.fixtures.metadata', - 'tests.fixtures.containers', 'tests.fixtures.items_config'] +pytest_plugins = [ + "tests.fixtures.binaries", + "tests.fixtures.cameras_metadata", + "tests.fixtures.items", + "tests.fixtures.metadata", + "tests.fixtures.containers", + "tests.fixtures.items_config", +] -EDGE_DB_IMG = 'mongo:5.0.2' -HUB_MONITORING_DB_IMG = 'postgres:15.1' -EDGE_MODEL_SERVING = {'image_name': 'ghcr.io/octo-technology/vio/edge_model_serving:latest', - 'container_volume_path': '/tf_serving', - 'host_volume_path_suffix': 'edge_model_serving'} +EDGE_DB_IMG = "mongo:5.0.2" +HUB_MONITORING_DB_IMG = "postgres:15.1" +EDGE_MODEL_SERVING = { + "image_name": "ghcr.io/octo-technology/vio/edge_model_serving:latest", + "container_volume_path": "/tf_serving", + "host_volume_path_suffix": "edge_model_serving", +} EDGE_TFLITE_SERVING_IMG = "ghcr.io/octo-technology/vio/edge_tflite_serving:latest" diff --git a/edge_orchestrator/tests/data/fake_item/inputs.json b/edge_orchestrator/tests/data/fake_item/inputs.json index c345e98f..80156482 100644 --- a/edge_orchestrator/tests/data/fake_item/inputs.json +++ b/edge_orchestrator/tests/data/fake_item/inputs.json @@ -1,4 +1,4 @@ { - "serial_number": "serial_number_test", - "category": "category_test" -} \ No newline at end of file + "serial_number": "serial_number_test", + "category": "category_test" +} diff --git a/edge_orchestrator/tests/data/item_1/inputs.json b/edge_orchestrator/tests/data/item_1/inputs.json index c345e98f..80156482 100644 --- a/edge_orchestrator/tests/data/item_1/inputs.json +++ b/edge_orchestrator/tests/data/item_1/inputs.json @@ -1,4 +1,4 @@ { - "serial_number": "serial_number_test", - "category": "category_test" -} \ No newline at end of file + "serial_number": "serial_number_test", + "category": "category_test" +} diff --git a/edge_orchestrator/tests/data/item_2/inputs.json b/edge_orchestrator/tests/data/item_2/inputs.json index 7bfc4979..80156482 100644 --- a/edge_orchestrator/tests/data/item_2/inputs.json +++ b/edge_orchestrator/tests/data/item_2/inputs.json @@ -1,4 +1,4 @@ { - "serial_number": "serial_number_test", - "category": "category_test" + "serial_number": "serial_number_test", + "category": "category_test" } diff --git a/edge_orchestrator/tests/fixtures/binaries.py b/edge_orchestrator/tests/fixtures/binaries.py index 9a403791..61a45ee5 100644 --- a/edge_orchestrator/tests/fixtures/binaries.py +++ b/edge_orchestrator/tests/fixtures/binaries.py @@ -3,46 +3,63 @@ from tests.conftest import TEST_DATA_FOLDER_PATH -@fixture(scope='function') +@fixture(scope="function") def my_binaries_0(): - with (TEST_DATA_FOLDER_PATH / 'item_0' / 'camera_id1.jpg').open('br') as f1, \ - (TEST_DATA_FOLDER_PATH / 'item_0' / 'camera_id2.jpg').open('br') as f2: + with (TEST_DATA_FOLDER_PATH / "item_0" / "camera_id1.jpg").open("br") as f1, ( + TEST_DATA_FOLDER_PATH / "item_0" / "camera_id2.jpg" + ).open("br") as f2: picture_1 = f1.read() picture_2 = f2.read() - return {'camera_id1': picture_1, 'camera_id2': picture_2} + return {"camera_id1": picture_1, "camera_id2": picture_2} -@fixture(scope='function') +@fixture(scope="function") def my_binaries_1(): - with (TEST_DATA_FOLDER_PATH / 'item_1' / 'camera_id1.jpg').open('br') as f: + with (TEST_DATA_FOLDER_PATH / "item_1" / "camera_id1.jpg").open("br") as f: picture = f.read() - return {'camera_id1': picture, 'camera_id2': picture, 'camera_id3': picture, 'camera_id4': picture} + return { + "camera_id1": picture, + "camera_id2": picture, + "camera_id3": picture, + "camera_id4": picture, + } -@fixture(scope='function') +@fixture(scope="function") def my_binaries_2(): - with (TEST_DATA_FOLDER_PATH / 'item_2' / 'camera_id1.jpg').open('br') as f1, \ - (TEST_DATA_FOLDER_PATH / 'item_2' / 'camera_id2.jpg').open('br') as f2: + with (TEST_DATA_FOLDER_PATH / "item_2" / "camera_id1.jpg").open("br") as f1, ( + TEST_DATA_FOLDER_PATH / "item_2" / "camera_id2.jpg" + ).open("br") as f2: picture_2 = f1.read() picture_3 = f2.read() - return {'camera_id2': picture_2, 'camera_id3': picture_3} + return {"camera_id2": picture_2, "camera_id3": picture_3} -@fixture(scope='function') +@fixture(scope="function") def my_fake_binaries(): - with (TEST_DATA_FOLDER_PATH / 'fake_item' / 'image1.jpg').open('br') as f1, \ - (TEST_DATA_FOLDER_PATH / 'fake_item' / 'image5.jpg').open('br') as f2, \ - (TEST_DATA_FOLDER_PATH / 'fake_item' / 'image2.jpg').open('br') as f3, \ - (TEST_DATA_FOLDER_PATH / 'fake_item' / 'image7.jpg').open('br') as f4: + with (TEST_DATA_FOLDER_PATH / "fake_item" / "image1.jpg").open("br") as f1, ( + TEST_DATA_FOLDER_PATH / "fake_item" / "image5.jpg" + ).open("br") as f2, (TEST_DATA_FOLDER_PATH / "fake_item" / "image2.jpg").open( + "br" + ) as f3, ( + TEST_DATA_FOLDER_PATH / "fake_item" / "image7.jpg" + ).open( + "br" + ) as f4: picture_1 = f1.read() picture_2 = f2.read() picture_3 = f3.read() picture_4 = f4.read() - return {'camera_id1': picture_1, 'camera_id2': picture_2, 'camera_id3': picture_3, 'camera_id4': picture_4} + return { + "camera_id1": picture_1, + "camera_id2": picture_2, + "camera_id3": picture_3, + "camera_id4": picture_4, + } -@fixture(scope='function') +@fixture(scope="function") def my_fake_binaries_2(): - with (TEST_DATA_FOLDER_PATH / 'marker_images' / '160.jpg').open('br') as f: + with (TEST_DATA_FOLDER_PATH / "marker_images" / "160.jpg").open("br") as f: picture = f.read() - return {'camera_id3': picture} + return {"camera_id3": picture} diff --git a/edge_orchestrator/tests/fixtures/cameras_metadata.py b/edge_orchestrator/tests/fixtures/cameras_metadata.py index 2808e16b..de2044bd 100644 --- a/edge_orchestrator/tests/fixtures/cameras_metadata.py +++ b/edge_orchestrator/tests/fixtures/cameras_metadata.py @@ -1,22 +1,31 @@ from _pytest.fixtures import fixture -@fixture(scope='function') +@fixture(scope="function") def my_cameras_metadata_0(right_camera_metadata, left_camera_metadata): - return {'camera_1': right_camera_metadata, 'camera_2': left_camera_metadata} + return {"camera_1": right_camera_metadata, "camera_2": left_camera_metadata} -@fixture(scope='function') -def my_cameras_metadata_1(right_camera_metadata, left_camera_metadata, top_camera_metadata, bottom_camera_metadata): - return {'camera_id1': right_camera_metadata, 'camera_id2': left_camera_metadata, - 'camera_id3': top_camera_metadata, 'camera_id4': bottom_camera_metadata} +@fixture(scope="function") +def my_cameras_metadata_1( + right_camera_metadata, + left_camera_metadata, + top_camera_metadata, + bottom_camera_metadata, +): + return { + "camera_id1": right_camera_metadata, + "camera_id2": left_camera_metadata, + "camera_id3": top_camera_metadata, + "camera_id4": bottom_camera_metadata, + } -@fixture(scope='function') +@fixture(scope="function") def my_cameras_metadata_2(top_camera_metadata): - return {'camera_3': top_camera_metadata} + return {"camera_3": top_camera_metadata} -@fixture(scope='function') +@fixture(scope="function") def my_cameras_metadata_3(back_camera_metadata): - return {'camera_id3': back_camera_metadata} + return {"camera_id3": back_camera_metadata} diff --git a/edge_orchestrator/tests/fixtures/containers.py b/edge_orchestrator/tests/fixtures/containers.py index 2b9b8112..1d0402ae 100644 --- a/edge_orchestrator/tests/fixtures/containers.py +++ b/edge_orchestrator/tests/fixtures/containers.py @@ -1,20 +1,24 @@ import logging import os +from typing import Generator, Optional, Tuple, Union import docker import pymongo -from _pytest.fixtures import SubRequest -from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest, fixture from alembic import command as alembic_command from alembic.config import Config from testcontainers.core import config from testcontainers.core.container import DockerContainer from testcontainers.mongodb import MongoDbContainer from testcontainers.postgres import PostgresContainer -from typing import Union, Tuple, Generator, Optional -from tests.conftest import ROOT_REPOSITORY_PATH, EDGE_DB_IMG, HUB_MONITORING_DB_IMG, EDGE_MODEL_SERVING, \ - EDGE_TFLITE_SERVING_IMG +from tests.conftest import ( + EDGE_DB_IMG, + EDGE_MODEL_SERVING, + EDGE_TFLITE_SERVING_IMG, + HUB_MONITORING_DB_IMG, + ROOT_REPOSITORY_PATH, +) from tests.tf_serving_container import TfServingContainer config.MAX_TRIES = 5 @@ -25,22 +29,32 @@ def check_image_presence_or_pull_it_from_registry(image_name: str): image_tags = [tag for image in client.images.list() for tag in image.tags] if image_name not in image_tags: auth_config = None - logging.info(f'Pulling docker image {image_name} from registry when running tests for the first time...') - if image_name.startswith('ghcr.io/octo-technology'): - if os.environ.get('REGISTRY_USERNAME') and os.environ.get('REGISTRY_PASSWORD'): - auth_config = {'username': os.environ.get('REGISTRY_USERNAME'), - 'password': os.environ.get('REGISTRY_PASSWORD')} + logging.info( + f"Pulling docker image {image_name} from registry when running tests for the first time..." + ) + if image_name.startswith("ghcr.io/octo-technology"): + if os.environ.get("REGISTRY_USERNAME") and os.environ.get( + "REGISTRY_PASSWORD" + ): + auth_config = { + "username": os.environ.get("REGISTRY_USERNAME"), + "password": os.environ.get("REGISTRY_PASSWORD"), + } else: - raise PermissionError('Please set your registry credentials with the following env vars: ' - 'REGISTRY_USERNAME & REGISTRY_PASSWORD') + raise PermissionError( + "Please set your registry credentials with the following env vars: " + "REGISTRY_USERNAME & REGISTRY_PASSWORD" + ) client.images.pull(image_name, auth_config=auth_config) -def start_test_db(image_name: str, connection_url: Optional[str]) -> Tuple[str, MongoDbContainer]: +def start_test_db( + image_name: str, connection_url: Optional[str] +) -> Tuple[str, MongoDbContainer]: container = None if connection_url is None: check_image_presence_or_pull_it_from_registry(image_name) - if 'mongo' in image_name: + if "mongo" in image_name: container = MongoDbContainer(image_name) else: container = PostgresContainer(image_name) @@ -54,96 +68,111 @@ def stop_test_container(container: DockerContainer): container.stop() -@fixture(scope='session') +@fixture(scope="session") def setup_test_mongo_db() -> str: - connection_url, mongo_db_container = start_test_db(image_name=EDGE_DB_IMG, - connection_url=os.environ.get('MONGO_DB_URI')) + connection_url, mongo_db_container = start_test_db( + image_name=EDGE_DB_IMG, connection_url=os.environ.get("MONGO_DB_URI") + ) yield connection_url stop_test_container(mongo_db_container) -@fixture(scope='function') +@fixture(scope="function") def test_mongo_db_uri(setup_test_mongo_db) -> str: yield setup_test_mongo_db client = pymongo.MongoClient(setup_test_mongo_db) - client.drop_database('orchestratorDB') + client.drop_database("orchestratorDB") -@fixture(scope='session') +@fixture(scope="session") def setup_test_postgres_db() -> str: - connection_url, postgres_db_container = start_test_db(image_name=HUB_MONITORING_DB_IMG, - connection_url=os.environ.get('POSTGRES_DB_URI')) + connection_url, postgres_db_container = start_test_db( + image_name=HUB_MONITORING_DB_IMG, + connection_url=os.environ.get("POSTGRES_DB_URI"), + ) apply_db_migrations(connection_url) yield connection_url stop_test_container(postgres_db_container) -@fixture(scope='function') +@fixture(scope="function") def test_postgres_db_uri(setup_test_postgres_db) -> str: yield setup_test_postgres_db -def start_test_tf_serving(image_name: str, starting_log: str, exposed_model_name: str, - tf_serving_host: Union[str, None] = os.environ.get('TENSORFLOW_SERVING_HOST'), - tf_serving_port: Union[int, None] = os.environ.get('TENSORFLOW_SERVING_PORT'), - host_volume_path: str = None, - container_volume_path: str = None) -> Tuple[str, TfServingContainer]: +def start_test_tf_serving( + image_name: str, + starting_log: str, + exposed_model_name: str, + tf_serving_host: Union[str, None] = os.environ.get("TENSORFLOW_SERVING_HOST"), + tf_serving_port: Union[int, None] = os.environ.get("TENSORFLOW_SERVING_PORT"), + host_volume_path: str = None, + container_volume_path: str = None, +) -> Tuple[str, TfServingContainer]: container = None if tf_serving_host is None or tf_serving_port is None: port_to_expose = 8501 check_image_presence_or_pull_it_from_registry(image_name) - container = TfServingContainer(image=image_name, - port_to_expose=port_to_expose, - env={'MODEL_NAME': exposed_model_name}, - host_volume_path=host_volume_path, - container_volume_path=container_volume_path) + container = TfServingContainer( + image=image_name, + port_to_expose=port_to_expose, + env={"MODEL_NAME": exposed_model_name}, + host_volume_path=host_volume_path, + container_volume_path=container_volume_path, + ) container.start(starting_log) tf_serving_host = container.get_container_host_ip() tf_serving_port = container.get_exposed_port(port_to_expose) - return f'http://{tf_serving_host}:{tf_serving_port}', container + return f"http://{tf_serving_host}:{tf_serving_port}", container -@fixture(scope='session') +@fixture(scope="session") def setup_test_tensorflow_serving(request: SubRequest) -> Generator[str, None, None]: connection_url, tensorflow_serving_container = start_test_tf_serving( - image_name=EDGE_MODEL_SERVING['image_name'], - starting_log=r'Entering the event loop ...', + image_name=EDGE_MODEL_SERVING["image_name"], + starting_log=r"Entering the event loop ...", exposed_model_name=request.param, - host_volume_path=((ROOT_REPOSITORY_PATH / EDGE_MODEL_SERVING['host_volume_path_suffix']).as_posix()), - container_volume_path=EDGE_MODEL_SERVING['container_volume_path'] + host_volume_path=( + ( + ROOT_REPOSITORY_PATH / EDGE_MODEL_SERVING["host_volume_path_suffix"] + ).as_posix() + ), + container_volume_path=EDGE_MODEL_SERVING["container_volume_path"], ) yield connection_url stop_test_container(tensorflow_serving_container) -@fixture(scope='session') +@fixture(scope="session") def setup_test_tflite_serving(request: SubRequest) -> Generator[str, None, None]: connection_url, tflite_serving_container = start_test_tf_serving( image_name=EDGE_TFLITE_SERVING_IMG, - starting_log=r'Uvicorn running on', + starting_log=r"Uvicorn running on", exposed_model_name=request.param, - tf_serving_host=os.environ.get('TFLITE_SERVING_HOST'), - tf_serving_port=os.environ.get('TFLITE_SERVING_PORT') + tf_serving_host=os.environ.get("TFLITE_SERVING_HOST"), + tf_serving_port=os.environ.get("TFLITE_SERVING_PORT"), ) yield connection_url stop_test_container(tflite_serving_container) -@fixture(scope='function') +@fixture(scope="function") def test_tensorflow_serving_base_url(setup_test_tensorflow_serving) -> str: return setup_test_tensorflow_serving -@fixture(scope='function') +@fixture(scope="function") def test_tflite_serving_base_url(setup_test_tflite_serving) -> str: return setup_test_tflite_serving def apply_db_migrations(connection_url: str) -> bool: - os.environ['DB_CONNECTION_URL'] = connection_url - path_to_migration = ROOT_REPOSITORY_PATH / 'hub_monitoring/db_migrations' - alembic_cfg = Config(path_to_migration / 'alembic.ini') - alembic_cfg.set_main_option('script_location', (path_to_migration / 'alembic').as_posix()) - alembic_command.upgrade(alembic_cfg, 'head') + os.environ["DB_CONNECTION_URL"] = connection_url + path_to_migration = ROOT_REPOSITORY_PATH / "hub_monitoring/db_migrations" + alembic_cfg = Config(path_to_migration / "alembic.ini") + alembic_cfg.set_main_option( + "script_location", (path_to_migration / "alembic").as_posix() + ) + alembic_command.upgrade(alembic_cfg, "head") return True diff --git a/edge_orchestrator/tests/fixtures/items.py b/edge_orchestrator/tests/fixtures/items.py index 22eda439..bcfd8511 100644 --- a/edge_orchestrator/tests/fixtures/items.py +++ b/edge_orchestrator/tests/fixtures/items.py @@ -6,36 +6,66 @@ from edge_orchestrator.domain.models.item import Item -@fixture(scope='function') -@freeze_time(lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0)) +@fixture(scope="function") +@freeze_time( + lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0) +) def my_item_0(my_cameras_metadata_0, my_binaries_0): - return Item(serial_number='123', category='tacos', cameras_metadata=my_cameras_metadata_0, - binaries=my_binaries_0) + return Item( + serial_number="123", + category="tacos", + cameras_metadata=my_cameras_metadata_0, + binaries=my_binaries_0, + ) -@fixture(scope='function') -@freeze_time(lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0)) +@fixture(scope="function") +@freeze_time( + lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0) +) def my_item_1(my_cameras_metadata_1, my_binaries_1): - return Item(serial_number='serial_number_test', category='category_test', cameras_metadata=my_cameras_metadata_1, - binaries=my_binaries_1) + return Item( + serial_number="serial_number_test", + category="category_test", + cameras_metadata=my_cameras_metadata_1, + binaries=my_binaries_1, + ) -@fixture(scope='function') -@freeze_time(lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0)) +@fixture(scope="function") +@freeze_time( + lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0) +) def my_fake_item(my_cameras_metadata_1, my_fake_binaries): - return Item(serial_number='serial_number_test', category='category_test', cameras_metadata=my_cameras_metadata_1, - binaries=my_fake_binaries) + return Item( + serial_number="serial_number_test", + category="category_test", + cameras_metadata=my_cameras_metadata_1, + binaries=my_fake_binaries, + ) -@fixture(scope='function') -@freeze_time(lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0)) +@fixture(scope="function") +@freeze_time( + lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0) +) def my_fake_item_2(my_cameras_metadata_3, my_fake_binaries_2): - return Item(serial_number='serial_number_test', category='category_test', cameras_metadata=my_cameras_metadata_3, - binaries=my_fake_binaries_2) + return Item( + serial_number="serial_number_test", + category="category_test", + cameras_metadata=my_cameras_metadata_3, + binaries=my_fake_binaries_2, + ) -@fixture(scope='function') -@freeze_time(lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0)) +@fixture(scope="function") +@freeze_time( + lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0) +) def my_item_2(my_cameras_metadata_2, my_binaries_2): - return Item(serial_number='123', category='tacos', cameras_metadata=my_cameras_metadata_2, - binaries=my_binaries_2) + return Item( + serial_number="123", + category="tacos", + cameras_metadata=my_cameras_metadata_2, + binaries=my_binaries_2, + ) diff --git a/edge_orchestrator/tests/fixtures/items_config.py b/edge_orchestrator/tests/fixtures/items_config.py index 3bb54b9e..530bfe0e 100644 --- a/edge_orchestrator/tests/fixtures/items_config.py +++ b/edge_orchestrator/tests/fixtures/items_config.py @@ -1,147 +1,133 @@ from _pytest.fixtures import fixture -@fixture(scope='function') +@fixture(scope="function") def test_items_config(): return { - 'test_item_category_A': { - 'camera_id1': { - 'business_rule': { - 'shape': 'square', - 'threshold': 0.02 - }, - 'models_graph': { - 'model_1': { - 'depends_on': [], - 'metadata': { - 'category': 'classification', - 'name': 'inception', - 'pb_file_path': 'modelforward/inception', - 'version': '1' - } + "test_item_category_A": { + "camera_id1": { + "business_rule": {"shape": "square", "threshold": 0.02}, + "models_graph": { + "model_1": { + "depends_on": [], + "metadata": { + "category": "classification", + "name": "inception", + "pb_file_path": "modelforward/inception", + "version": "1", + }, } }, - 'settings': { - 'exposition': 100, - 'position': 'front_camera', - 'type': 'fake' - }}, - 'camera_id2': { - 'business_rule': { - 'shape': 'square', - 'threshold': 0.02 + "settings": { + "exposition": 100, + "position": "front_camera", + "type": "fake", }, - 'models_graph': { - 'model_1': { - 'depends_on': [], - 'metadata': { - 'category': 'object_detection_with_classification', - 'name': 'yolov3_harnais', - 'pb_file_path': 'modelforward/yolov3_harnais', - 'version': '1' - } + }, + "camera_id2": { + "business_rule": {"shape": "square", "threshold": 0.02}, + "models_graph": { + "model_1": { + "depends_on": [], + "metadata": { + "category": "object_detection_with_classification", + "name": "yolov3_harnais", + "pb_file_path": "modelforward/yolov3_harnais", + "version": "1", + }, } }, - 'settings': { - 'exposition': 100, - 'position': 'left_camera', - 'type': 'fake' - } - }, - 'camera_id3': { - 'business_rule': { - 'shape': 'square', - 'threshold': 0.02 + "settings": { + "exposition": 100, + "position": "left_camera", + "type": "fake", }, - 'models_graph': { - 'model_1': { - 'depends_on': [], - 'metadata': { - 'category': 'object_detection', - 'name': 'yolov3_harnais', - 'pb_file_path': 'modelforward/yolov3_harnais', - 'version': '1' - } + }, + "camera_id3": { + "business_rule": {"shape": "square", "threshold": 0.02}, + "models_graph": { + "model_1": { + "depends_on": [], + "metadata": { + "category": "object_detection", + "name": "yolov3_harnais", + "pb_file_path": "modelforward/yolov3_harnais", + "version": "1", + }, + }, + "model_2": { + "depends_on": ["model_1"], + "metadata": { + "category": "classification", + "name": "inception", + "pb_file_path": "modelforward/inception", + "version": "1", + }, }, - 'model_2': { - 'depends_on': ['model_1'], - 'metadata': { - 'category': 'classification', - 'name': 'inception', - 'pb_file_path': 'modelforward/inception', - 'version': '1' - } - } }, - 'settings': { - 'exposition': 100, - 'position': 'right_camera', - 'type': 'fake' - } - }, - 'camera_id4': { - 'business_rule': { - 'shape': 'square', - 'threshold': 0.02 + "settings": { + "exposition": 100, + "position": "right_camera", + "type": "fake", }, - 'models_graph': { - 'model_1': { - 'depends_on': [], - 'metadata': { - 'category': 'object_detection', - 'name': 'yolov3_harnais', - 'pb_file_path': 'modelforward/yolov3_harnais', - 'version': '1' - } + }, + "camera_id4": { + "business_rule": {"shape": "square", "threshold": 0.02}, + "models_graph": { + "model_1": { + "depends_on": [], + "metadata": { + "category": "object_detection", + "name": "yolov3_harnais", + "pb_file_path": "modelforward/yolov3_harnais", + "version": "1", + }, + }, + "model_2": { + "depends_on": ["model_1"], + "metadata": { + "category": "classification", + "name": "inception", + "pb_file_path": "modelforward/inception", + "version": "1", + }, }, - 'model_2': { - 'depends_on': ['model_1'], - 'metadata': { - 'category': 'classification', - 'name': 'inception', - 'pb_file_path': 'modelforward/inception', - 'version': '1' - } - } }, - 'settings': { - 'exposition': 100, - 'position': 'back_camera', - 'type': 'fake' - } - } - }, - 'test_item_category_B': { - 'camera_id1': { - 'business_rule': { - 'shape': 'square', - 'threshold': 0.02 + "settings": { + "exposition": 100, + "position": "back_camera", + "type": "fake", }, - 'models_graph': { - 'model_1': { - 'depends_on': [], - 'metadata': { - 'category': 'object_detection', - 'name': 'yolov3_harnais', - 'pb_file_path': 'modelforward/yolov3_harnais', - 'version': '1' - } + }, + }, + "test_item_category_B": { + "camera_id1": { + "business_rule": {"shape": "square", "threshold": 0.02}, + "models_graph": { + "model_1": { + "depends_on": [], + "metadata": { + "category": "object_detection", + "name": "yolov3_harnais", + "pb_file_path": "modelforward/yolov3_harnais", + "version": "1", + }, + }, + "model_2": { + "depends_on": ["model_1"], + "metadata": { + "category": "classification", + "name": "inception", + "pb_file_path": "modelforward/inception", + "version": "1", + }, }, - 'model_2': { - 'depends_on': ['model_1'], - 'metadata': { - 'category': 'classification', - 'name': 'inception', - 'pb_file_path': 'modelforward/inception', - 'version': '1' - } - } }, - 'settings': { - 'exposition': 100, - 'position': 'right_camera', - 'type': 'fake' - } + "settings": { + "exposition": 100, + "position": "right_camera", + "type": "fake", + }, } - } + }, } diff --git a/edge_orchestrator/tests/fixtures/metadata.py b/edge_orchestrator/tests/fixtures/metadata.py index cca1a75e..b024dcbd 100644 --- a/edge_orchestrator/tests/fixtures/metadata.py +++ b/edge_orchestrator/tests/fixtures/metadata.py @@ -1,26 +1,31 @@ from _pytest.fixtures import fixture -@fixture(scope='function') +@fixture(scope="function") def right_camera_metadata(): return {"brightness": 100, "exposition": 100, "position": "right"} -@fixture(scope='function') +@fixture(scope="function") def left_camera_metadata(): return {"brightness": 100, "exposition": 100, "position": "left"} -@fixture(scope='function') +@fixture(scope="function") def top_camera_metadata(): return {"brightness": 100, "exposition": 100, "position": "top"} -@fixture(scope='function') +@fixture(scope="function") def bottom_camera_metadata(): return {"brightness": 100, "exposition": 100, "position": "bottom"} -@fixture(scope='function') +@fixture(scope="function") def back_camera_metadata(): - return {'brightness': None, 'exposition': 100, 'position': 'back', 'source': 'marker_images'} + return { + "brightness": None, + "exposition": 100, + "position": "back", + "source": "marker_images", + } diff --git a/edge_orchestrator/tests/functional_tests/environment.py b/edge_orchestrator/tests/functional_tests/environment.py index 1f7eb63a..e465e95d 100644 --- a/edge_orchestrator/tests/functional_tests/environment.py +++ b/edge_orchestrator/tests/functional_tests/environment.py @@ -5,36 +5,57 @@ from behave.runner import Context from fastapi.testclient import TestClient -from tests.conftest import ROOT_REPOSITORY_PATH, TEST_DATA_FOLDER_PATH, EDGE_DB_IMG, HUB_MONITORING_DB_IMG, \ - EDGE_MODEL_SERVING -from tests.fixtures.containers import start_test_db, start_test_tf_serving, stop_test_container, apply_db_migrations +from tests.conftest import ( + EDGE_DB_IMG, + EDGE_MODEL_SERVING, + HUB_MONITORING_DB_IMG, + ROOT_REPOSITORY_PATH, + TEST_DATA_FOLDER_PATH, +) +from tests.fixtures.containers import ( + apply_db_migrations, + start_test_db, + start_test_tf_serving, + stop_test_container, +) def before_all(context: Context): context.test_directory = Path(__file__).parent.parent - context.mongo_db_uri, context.mongo_db_container = start_test_db(image_name=EDGE_DB_IMG, - connection_url=os.environ.get('MONGO_DB_URI')) - context.postgres_db_uri, context.postgres_db_container = start_test_db(image_name=HUB_MONITORING_DB_IMG, - connection_url=os.environ.get( - 'POSTGRES_DB_URI')) + context.mongo_db_uri, context.mongo_db_container = start_test_db( + image_name=EDGE_DB_IMG, connection_url=os.environ.get("MONGO_DB_URI") + ) + context.postgres_db_uri, context.postgres_db_container = start_test_db( + image_name=HUB_MONITORING_DB_IMG, + connection_url=os.environ.get("POSTGRES_DB_URI"), + ) apply_db_migrations(context.postgres_db_uri) - context.tensorflow_serving_url, context.tensorflow_serving_container = start_test_tf_serving( - image_name=EDGE_MODEL_SERVING['image_name'], - starting_log=r'Entering the event loop ...', + ( + context.tensorflow_serving_url, + context.tensorflow_serving_container, + ) = start_test_tf_serving( + image_name=EDGE_MODEL_SERVING["image_name"], + starting_log=r"Entering the event loop ...", exposed_model_name="marker_quality_control", - host_volume_path=((ROOT_REPOSITORY_PATH / EDGE_MODEL_SERVING['host_volume_path_suffix']).as_posix()), - container_volume_path=EDGE_MODEL_SERVING['container_volume_path']) - os.environ['API_CONFIG'] = 'test' - os.environ['MONGO_DB_URI'] = context.mongo_db_uri - os.environ['POSTGRES_DB_URI'] = context.postgres_db_uri - os.environ['SERVING_MODEL_URL'] = context.tensorflow_serving_url + host_volume_path=( + ( + ROOT_REPOSITORY_PATH / EDGE_MODEL_SERVING["host_volume_path_suffix"] + ).as_posix() + ), + container_volume_path=EDGE_MODEL_SERVING["container_volume_path"], + ) + os.environ["API_CONFIG"] = "test" + os.environ["MONGO_DB_URI"] = context.mongo_db_uri + os.environ["POSTGRES_DB_URI"] = context.postgres_db_uri + os.environ["SERVING_MODEL_URL"] = context.tensorflow_serving_url from edge_orchestrator.application.server import server + context.test_client = TestClient(server()) def after_all(context: Context): - rmtree(TEST_DATA_FOLDER_PATH / 'storage') + rmtree(TEST_DATA_FOLDER_PATH / "storage") if context.mongo_db_container: stop_test_container(context.mongo_db_container) if context.postgres_db_container: diff --git a/edge_orchestrator/tests/functional_tests/steps/common_steps.py b/edge_orchestrator/tests/functional_tests/steps/common_steps.py index 6ce76e4b..8cdad67b 100644 --- a/edge_orchestrator/tests/functional_tests/steps/common_steps.py +++ b/edge_orchestrator/tests/functional_tests/steps/common_steps.py @@ -1,10 +1,10 @@ import re -from typing import Dict, Union, Optional +from time import strptime +from typing import Dict, Optional, Union from behave import given from behave.runner import Context from starlette.status import HTTP_200_OK -from time import strptime @given("the app is up and running") @@ -13,48 +13,73 @@ def app_up_and_running(context: Context): assert response.status_code == HTTP_200_OK -def assert_metadata_almost_equal(actual_item_metadata: Dict[str, Union[Dict, str]], - expected_item_metadata: Dict[str, Union[Dict, str]]): - for expected_item_key, expected_item_value_or_pattern in expected_item_metadata.items(): - if expected_item_key == 'id': - assert re.match(expected_item_value_or_pattern, actual_item_metadata[expected_item_key]) - elif expected_item_key == 'received_time': - assert strptime(actual_item_metadata[expected_item_key], expected_item_value_or_pattern) - elif expected_item_key == 'inferences': - assert_classification_inference_almost_equal(actual_item_metadata[expected_item_key], - expected_item_value_or_pattern) - elif expected_item_key == 'decision': - assert_decision_is_valid(actual_item_metadata[expected_item_key], expected_item_value_or_pattern) - elif expected_item_key == 'state': - assert_state_is_valid(actual_item_metadata[expected_item_key], expected_item_value_or_pattern) +def assert_metadata_almost_equal( + actual_item_metadata: Dict[str, Union[Dict, str]], + expected_item_metadata: Dict[str, Union[Dict, str]], +): + for ( + expected_item_key, + expected_item_value_or_pattern, + ) in expected_item_metadata.items(): + if expected_item_key == "id": + assert re.match( + expected_item_value_or_pattern, actual_item_metadata[expected_item_key] + ) + elif expected_item_key == "received_time": + assert strptime( + actual_item_metadata[expected_item_key], expected_item_value_or_pattern + ) + elif expected_item_key == "inferences": + assert_classification_inference_almost_equal( + actual_item_metadata[expected_item_key], expected_item_value_or_pattern + ) + elif expected_item_key == "decision": + assert_decision_is_valid( + actual_item_metadata[expected_item_key], expected_item_value_or_pattern + ) + elif expected_item_key == "state": + assert_state_is_valid( + actual_item_metadata[expected_item_key], expected_item_value_or_pattern + ) else: - assert expected_item_value_or_pattern == actual_item_metadata[expected_item_key] + assert ( + expected_item_value_or_pattern + == actual_item_metadata[expected_item_key] + ) -def assert_classification_inference_almost_equal(actual_inference: Dict[str, Dict], - expected_item_value_or_pattern: Dict[str, Dict]): +def assert_classification_inference_almost_equal( + actual_inference: Dict[str, Dict], expected_item_value_or_pattern: Dict[str, Dict] +): for camera_id, cam_inferences in expected_item_value_or_pattern.items(): assert camera_id in actual_inference for model_id, inference in cam_inferences.items(): assert model_id in actual_inference[camera_id] - assert re.match(inference['full_image']['label'], - actual_inference[camera_id][model_id]['full_image']['label']) - assert re.match(inference['full_image']['probability'], - str(actual_inference[camera_id][model_id]['full_image'][ - 'probability'])) + assert re.match( + inference["full_image"]["label"], + actual_inference[camera_id][model_id]["full_image"]["label"], + ) + assert re.match( + inference["full_image"]["probability"], + str(actual_inference[camera_id][model_id]["full_image"]["probability"]), + ) def assert_state_is_valid(actual_state: str, expected_state: Optional[str] = None): from edge_orchestrator.domain.use_cases.supervisor import SupervisorState + if expected_state: assert re.match(expected_state, actual_state) else: assert actual_state in [state.value for state in SupervisorState] -def assert_decision_is_valid(actual_decision: str, expected_decision: Optional[str] = None): +def assert_decision_is_valid( + actual_decision: str, expected_decision: Optional[str] = None +): if expected_decision: assert re.match(expected_decision, actual_decision) else: from edge_orchestrator.domain.models.decision import Decision + assert actual_decision in [decision.value for decision in Decision] diff --git a/edge_orchestrator/tests/functional_tests/steps/supervisor_configs_routes.py b/edge_orchestrator/tests/functional_tests/steps/supervisor_configs_routes.py index b0cb6be3..93ac0417 100644 --- a/edge_orchestrator/tests/functional_tests/steps/supervisor_configs_routes.py +++ b/edge_orchestrator/tests/functional_tests/steps/supervisor_configs_routes.py @@ -1,6 +1,6 @@ import json -from behave import when, then, use_step_matcher +from behave import then, use_step_matcher, when from behave.runner import Context from starlette.status import HTTP_200_OK @@ -21,8 +21,9 @@ def client_requests_all_configurations(context: Context): @when("the client activates configuration '([a-zA-Z0-9-_]+)'") def client_set_active_configuration_as(context: Context, config_name: str): - context.response = context.test_client.post("/api/v1/configs/active", - json={"config_name": config_name}) + context.response = context.test_client.post( + "/api/v1/configs/active", json={"config_name": config_name} + ) assert context.response.status_code == HTTP_200_OK @@ -36,7 +37,7 @@ def active_configuration_is(context: Context): def client_receives_all_available_configuration(context: Context): configs = {} for row in context.table: - filepath = context.test_directory / row['config_filepath'] + filepath = context.test_directory / row["config_filepath"] with filepath.open("r") as f: configs[filepath.stem] = json.load(f) for config_name, config in context.response.json().items(): diff --git a/edge_orchestrator/tests/functional_tests/steps/supervisor_inventory_route.py b/edge_orchestrator/tests/functional_tests/steps/supervisor_inventory_route.py index f6821c44..81ee3405 100644 --- a/edge_orchestrator/tests/functional_tests/steps/supervisor_inventory_route.py +++ b/edge_orchestrator/tests/functional_tests/steps/supervisor_inventory_route.py @@ -1,6 +1,6 @@ import json -from behave import when, then, use_step_matcher +from behave import then, use_step_matcher, when from behave.runner import Context from starlette.status import HTTP_200_OK diff --git a/edge_orchestrator/tests/functional_tests/steps/supervisor_items_routes.py b/edge_orchestrator/tests/functional_tests/steps/supervisor_items_routes.py index 67933d92..8b83dbad 100644 --- a/edge_orchestrator/tests/functional_tests/steps/supervisor_items_routes.py +++ b/edge_orchestrator/tests/functional_tests/steps/supervisor_items_routes.py @@ -1,45 +1,49 @@ -from behave import given, when, then, use_step_matcher +from behave import given, then, use_step_matcher, when from behave.runner import Context -use_step_matcher('re') +use_step_matcher("re") @given("item '([a-zA-Z0-9-_]+)' is stored") def client_trigger_item_capture_and_storage(context: Context, item_id: str): - response = context.test_client.post('/api/v1/trigger', - json={'category': 'station_config_TEST', 'serial_number': 'serial_number'}) + response = context.test_client.post( + "/api/v1/trigger", + json={"category": "station_config_TEST", "serial_number": "serial_number"}, + ) assert response.status_code == 200 - context.item_id = response.json()['item_id'] + context.item_id = response.json()["item_id"] -@when('the client requests the items metadata list') +@when("the client requests the items metadata list") def client_get_items(context: Context): - context.response = context.test_client.get('/api/v1/items') + context.response = context.test_client.get("/api/v1/items") assert context.response.status_code == 200 @when("the item '([a-zA-Z0-9-_]+)' metadata is requested") def client_get_item_metadata(context: Context, item_id: str): - response = context.test_client.get('/api/v1/items/' + context.item_id) + response = context.test_client.get("/api/v1/items/" + context.item_id) assert response.status_code == 200 context.json_response = response.json() @when("one item '([a-zA-Z0-9-_]+)' binary from camera '([a-zA-Z0-9-_]+)' is requested") def client_get_item_binary(context: Context, item_id: str, camera_id: str): - response = context.test_client.get('/api/v1/items/' + context.item_id + '/binaries/' + camera_id) + response = context.test_client.get( + "/api/v1/items/" + context.item_id + "/binaries/" + camera_id + ) assert response.status_code == 200 context.binary_response = response.content -@then('the client receives the items metadata list') +@then("the client receives the items metadata list") def check_response(context: Context): assert isinstance(context.response.json(), list) @then("the item '([a-zA-Z0-9-_]+)' metadata is read") def check_item_metadata_is_read(context: Context, item_id: str): - response = context.test_client.get('/api/v1/items/' + context.item_id) + response = context.test_client.get("/api/v1/items/" + context.item_id) assert response.status_code == 200 response_json = response.json() assert response_json["serial_number"] == context.table[0]["serial_number"] diff --git a/edge_orchestrator/tests/functional_tests/steps/supervisor_trigger_route.py b/edge_orchestrator/tests/functional_tests/steps/supervisor_trigger_route.py index ff895e82..b28a3fc9 100644 --- a/edge_orchestrator/tests/functional_tests/steps/supervisor_trigger_route.py +++ b/edge_orchestrator/tests/functional_tests/steps/supervisor_trigger_route.py @@ -3,58 +3,67 @@ from urllib.parse import urlparse import psycopg2 -from behave import given, when, then, use_step_matcher +from behave import given, then, use_step_matcher, when from behave.runner import Context from starlette.status import HTTP_200_OK from edge_orchestrator.domain.models.decision import Decision -from tests.functional_tests.steps.common_steps import assert_metadata_almost_equal, assert_decision_is_valid, \ - assert_state_is_valid +from tests.functional_tests.steps.common_steps import ( + assert_decision_is_valid, + assert_metadata_almost_equal, + assert_state_is_valid, +) -use_step_matcher('re') +use_step_matcher("re") @given("the config '([a-zA-Z0-9-_]+)' is activated") def config_is_activated(context: Context, config_name: str): - with (context.test_directory / 'config' / 'station_configs' / f'{config_name}.json').open('r') as f: + with ( + context.test_directory / "config" / "station_configs" / f"{config_name}.json" + ).open("r") as f: config = json.load(f) - context.execute_steps(u''' + context.execute_steps( + """ When the client activates configuration '{station_config_TEST}' Then the active configuration is \"\"\" {config} \"\"\" - '''.format(station_config_TEST=config_name, config=json.dumps(config))) + """.format( + station_config_TEST=config_name, config=json.dumps(config) + ) + ) -@given('the following cameras are registered in the configuration') +@given("the following cameras are registered in the configuration") def following_cameras_are_registered_in_the_configuration(context: Context): - response = context.test_client.get('/api/v1/configs/active') + response = context.test_client.get("/api/v1/configs/active") assert response.status_code == HTTP_200_OK response_content = response.json() cameras = {} for row in context.table: - current_camera_conf = response_content['cameras'][row['camera_id']] - cameras[row['camera_id']] = current_camera_conf - assert current_camera_conf['type'] == row['camera_type'] - assert current_camera_conf['source'] == row['source'] + current_camera_conf = response_content["cameras"][row["camera_id"]] + cameras[row["camera_id"]] = current_camera_conf + assert current_camera_conf["type"] == row["camera_type"] + assert current_camera_conf["source"] == row["source"] context.cameras = cameras - assert len(response_content['cameras'].keys()) == len(context.table.rows) + assert len(response_content["cameras"].keys()) == len(context.table.rows) -@when('the client triggers a visual inspection') +@when("the client triggers a visual inspection") def client_triggers_visual_inspection(context: Context): - response = context.test_client.post('/api/v1/trigger') + response = context.test_client.post("/api/v1/trigger") assert response.status_code == HTTP_200_OK - context.item_id = response.json()['item_id'] + context.item_id = response.json()["item_id"] -@then('item metadata like the following are captured') +@then("item metadata like the following are captured") def item_metadata_like_following_are_captured(context: Context): - response = context.test_client.get(f'/api/v1/items/{context.item_id}') + response = context.test_client.get(f"/api/v1/items/{context.item_id}") assert response.status_code == HTTP_200_OK actual_item_metadata = response.json() @@ -62,74 +71,94 @@ def item_metadata_like_following_are_captured(context: Context): assert_metadata_almost_equal(actual_item_metadata, expected_item_metadata) -@then('the item binaries are stored') +@then("the item binaries are stored") def check_item_binaries_are_stored(context: Context): - response_1 = context.test_client.get(f'/api/v1/items/{context.item_id}/binaries') + response_1 = context.test_client.get(f"/api/v1/items/{context.item_id}/binaries") assert response_1.status_code == HTTP_200_OK response_1_content = response_1.json() for row in context.table: assert f'{row["binary_name"]}.{row["binary_extension"]}' in response_1_content - path_to_tests_images = context.test_directory / 'data' / context.cameras[row['binary_name']][ - 'source'] - tests_images = [filepath.open('rb').read() for filepath in path_to_tests_images.iterdir() - if filepath.suffix == '.jpg'] - - response_2 = context.test_client.get(f'/api/v1/items/{context.item_id}/binaries/{row["binary_name"]}') + path_to_tests_images = ( + context.test_directory + / "data" + / context.cameras[row["binary_name"]]["source"] + ) + tests_images = [ + filepath.open("rb").read() + for filepath in path_to_tests_images.iterdir() + if filepath.suffix == ".jpg" + ] + + response_2 = context.test_client.get( + f'/api/v1/items/{context.item_id}/binaries/{row["binary_name"]}' + ) assert response_2.status_code == HTTP_200_OK assert response_2.content in tests_images assert len(response_1_content) == len(context.table.rows) -@then('the item inference is computed') +@then("the item inference is computed") def check_inference_is_computed(context: Context): - response = context.test_client.get(f'/api/v1/items/{context.item_id}') + response = context.test_client.get(f"/api/v1/items/{context.item_id}") assert response.status_code == HTTP_200_OK expected_item_metadata = json.loads(context.text) - context.execute_steps(u''' + context.execute_steps( + """ Then item metadata like the following are captured \"\"\" {item_metadata} \"\"\" - '''.format(item_metadata=json.dumps(expected_item_metadata))) + """.format( + item_metadata=json.dumps(expected_item_metadata) + ) + ) -@then('the item decision is made') +@then("the item decision is made") def check_business_value_is_made(context: Context): - response = context.test_client.get(f'/api/v1/items/{context.item_id}') + response = context.test_client.get(f"/api/v1/items/{context.item_id}") assert response.status_code == HTTP_200_OK - assert_decision_is_valid(response.json()['decision']) + assert_decision_is_valid(response.json()["decision"]) -@then('the item state is set to done') +@then("the item state is set to done") def check_state_is_done(context: Context): - response = context.test_client.get(f'/api/v1/items/{context.item_id}') + response = context.test_client.get(f"/api/v1/items/{context.item_id}") assert response.status_code == HTTP_200_OK - assert_state_is_valid(response.json()['state'], 'Done') + assert_state_is_valid(response.json()["state"], "Done") -@then('the item metadata are stored') +@then("the item metadata are stored") def check_metadata_is_stored(context: Context): - response = context.test_client.get('/api/v1/items') + response = context.test_client.get("/api/v1/items") assert response.status_code == HTTP_200_OK - assert context.item_id in [item['id'] for item in response.json()] + assert context.item_id in [item["id"] for item in response.json()] @then("a telemetry message is stored") def check_telemetry_message_is_stored(context): result = urlparse(context.postgres_db_uri) - username, password, hostname, port = result.username, result.password, result.hostname, result.port + username, password, hostname, port = ( + result.username, + result.password, + result.hostname, + result.port, + ) database = result.path[1:] - connection = psycopg2.connect(dbname=database, user=username, password=password, - host=hostname, port=port) + connection = psycopg2.connect( + dbname=database, user=username, password=password, host=hostname, port=port + ) with connection.cursor() as curs: - curs.execute('SELECT * FROM iothub.telemetry WHERE item_id = %s;', (context.item_id,)) + curs.execute( + "SELECT * FROM iothub.telemetry WHERE item_id = %s;", (context.item_id,) + ) res = curs.fetchone() _id, device_id, decision, timestamp, item_id, config_res = res assert isinstance(_id, str) - assert device_id.startswith('device_') + assert device_id.startswith("device_") assert Decision(decision) assert isinstance(timestamp, datetime) assert isinstance(item_id, str) - assert config_res == 'station_config_TEST' + assert config_res == "station_config_TEST" diff --git a/edge_orchestrator/tests/functional_tests/supervisor_trigger_route.feature b/edge_orchestrator/tests/functional_tests/supervisor_trigger_route.feature index 39a6eb48..a61f3368 100644 --- a/edge_orchestrator/tests/functional_tests/supervisor_trigger_route.feature +++ b/edge_orchestrator/tests/functional_tests/supervisor_trigger_route.feature @@ -4,8 +4,8 @@ Feature: The client trigger a visual inspection and request the resulting metada Given the app is up and running And the config 'station_config_TEST' is activated And the following cameras are registered in the configuration - | camera_id | camera_type | source | - | camera_id3 | fake | marker_images | + | camera_id | camera_type | source | + | camera_id3 | fake | marker_images | When the client triggers a visual inspection Then item metadata like the following are captured """ diff --git a/edge_orchestrator/tests/integration_tests/application/conftest.py b/edge_orchestrator/tests/integration_tests/application/conftest.py index 2fb002cc..2473da88 100644 --- a/edge_orchestrator/tests/integration_tests/application/conftest.py +++ b/edge_orchestrator/tests/integration_tests/application/conftest.py @@ -1,3 +1,3 @@ import os -os.environ['API_CONFIG'] = 'default' +os.environ["API_CONFIG"] = "default" diff --git a/edge_orchestrator/tests/integration_tests/application/test_server.py b/edge_orchestrator/tests/integration_tests/application/test_server.py index 6aeecd34..8d8a37db 100644 --- a/edge_orchestrator/tests/integration_tests/application/test_server.py +++ b/edge_orchestrator/tests/integration_tests/application/test_server.py @@ -2,26 +2,30 @@ from fastapi.testclient import TestClient -from tests.conftest import TEST_DATA_FOLDER_PATH -from edge_orchestrator.application.server import server from edge_orchestrator.api_config import get_metadata_storage +from edge_orchestrator.application.server import server +from tests.conftest import TEST_DATA_FOLDER_PATH class TestServer: def test_upload_route__should_return_expected_logs_when_received_paylod_with_binary_image( - self, - caplog): + self, caplog + ): # Given client = TestClient(server()) - test_file = 'camera_id1.jpg' - test_file_path = TEST_DATA_FOLDER_PATH / 'item_2' / test_file - expected_logs = ["Starting Save Binaries", - "Entering try Save Binaries", - "End of Save Binaries"] + test_file = "camera_id1.jpg" + test_file_path = TEST_DATA_FOLDER_PATH / "item_2" / test_file + expected_logs = [ + "Starting Save Binaries", + "Entering try Save Binaries", + "End of Save Binaries", + ] # When with open(test_file_path, "rb") as f: - actual_response = client.post("/api/v1/upload", files={"image": ("filename", f, "image/jpeg")}) + actual_response = client.post( + "/api/v1/upload", files={"image": ("filename", f, "image/jpeg")} + ) actual_logs = [] for record in caplog.records: @@ -33,19 +37,28 @@ def test_upload_route__should_return_expected_logs_when_received_paylod_with_bin assert actual_logs == expected_logs def test_get_item_metadata__should_return_expected_paylod_when_received_specific_item_id( - self, - my_item_0, - caplog): + self, my_item_0, caplog + ): # Given metadata_storage = get_metadata_storage() metadata_storage.save_item_metadata(my_item_0) client = TestClient(server()) test_item_id = my_item_0.id - keys_expected = ['serial_number', 'category', 'station_config', 'cameras', 'received_time', 'inferences', - 'decision', 'state', 'error', 'id'] + keys_expected = [ + "serial_number", + "category", + "station_config", + "cameras", + "received_time", + "inferences", + "decision", + "state", + "error", + "id", + ] # When - actual_response = client.get(f'/api/v1/items/{test_item_id}') + actual_response = client.get(f"/api/v1/items/{test_item_id}") # Then assert actual_response.status_code == 200 diff --git a/edge_orchestrator/tests/integration_tests/infrastructure/metadata_storage/test_mongodb_metadata_storage.py b/edge_orchestrator/tests/integration_tests/infrastructure/metadata_storage/test_mongodb_metadata_storage.py index 11d54adc..a2da4d9f 100644 --- a/edge_orchestrator/tests/integration_tests/infrastructure/metadata_storage/test_mongodb_metadata_storage.py +++ b/edge_orchestrator/tests/integration_tests/infrastructure/metadata_storage/test_mongodb_metadata_storage.py @@ -1,12 +1,15 @@ -from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import MongoDbMetadataStorage +from edge_orchestrator.infrastructure.metadata_storage.mongodb_metadata_storage import ( + MongoDbMetadataStorage, +) class TestMongoDbItemStorage: - - def test_save_item_metadata_should_write_item_metadata_in_mongo_db(self, test_mongo_db_uri, my_item_0): + def test_save_item_metadata_should_write_item_metadata_in_mongo_db( + self, test_mongo_db_uri, my_item_0 + ): # Given metadata_storage = MongoDbMetadataStorage(mongodb_uri=test_mongo_db_uri) - my_item_0.id = 'd1adfc08-cb98-46d6-ae9c-b07c5d16a2ec' + my_item_0.id = "d1adfc08-cb98-46d6-ae9c-b07c5d16a2ec" # When metadata_storage.save_item_metadata(my_item_0) @@ -15,56 +18,73 @@ def test_save_item_metadata_should_write_item_metadata_in_mongo_db(self, test_mo all_items_metadata = [item for item in metadata_storage.items_metadata.find()] assert all_items_metadata == [ { - '_id': 'd1adfc08-cb98-46d6-ae9c-b07c5d16a2ec', - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_1': {'brightness': 100, 'exposition': 100, 'position': 'right'}, - 'camera_2': {'brightness': 100, 'exposition': 100, 'position': 'left'}}, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None + "_id": "d1adfc08-cb98-46d6-ae9c-b07c5d16a2ec", + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_1": { + "brightness": 100, + "exposition": 100, + "position": "right", + }, + "camera_2": { + "brightness": 100, + "exposition": 100, + "position": "left", + }, + }, + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, } ] - def test_get_item_metadata_should_return_requested_item_metadata(self, test_mongo_db_uri, my_item_0): + def test_get_item_metadata_should_return_requested_item_metadata( + self, test_mongo_db_uri, my_item_0 + ): # Given - my_item_0.id = 'd1adfc08-cb98-46d6-ae9c-b07c5d16a2ec' + my_item_0.id = "d1adfc08-cb98-46d6-ae9c-b07c5d16a2ec" metadata_storage = MongoDbMetadataStorage(mongodb_uri=test_mongo_db_uri) - metadata_storage.items_metadata.update_one({'_id': my_item_0.id}, {'$set': my_item_0.get_metadata(False)}, - upsert=True) + metadata_storage.items_metadata.update_one( + {"_id": my_item_0.id}, {"$set": my_item_0.get_metadata(False)}, upsert=True + ) # When item_metadata = metadata_storage.get_item_metadata(my_item_0.id) # Then assert item_metadata == { - 'id': 'd1adfc08-cb98-46d6-ae9c-b07c5d16a2ec', - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_1': {'brightness': 100, 'exposition': 100, 'position': 'right'}, - 'camera_2': {'brightness': 100, 'exposition': 100, 'position': 'left'}}, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None + "id": "d1adfc08-cb98-46d6-ae9c-b07c5d16a2ec", + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_1": {"brightness": 100, "exposition": 100, "position": "right"}, + "camera_2": {"brightness": 100, "exposition": 100, "position": "left"}, + }, + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, } - def test_get_all_items_metadata_should_return_all_items(self, test_mongo_db_uri, my_item_0, my_item_2): + def test_get_all_items_metadata_should_return_all_items( + self, test_mongo_db_uri, my_item_0, my_item_2 + ): # Given - my_item_0.id = 'd1adfc08-cb98-46d6-ae9c-b07c5d16a2ec' - my_item_2.id = 'af6b4922-8e4a-4dbc-ac9b-b5fd56ceaf25' + my_item_0.id = "d1adfc08-cb98-46d6-ae9c-b07c5d16a2ec" + my_item_2.id = "af6b4922-8e4a-4dbc-ac9b-b5fd56ceaf25" metadata_storage = MongoDbMetadataStorage(mongodb_uri=test_mongo_db_uri) - metadata_storage.items_metadata.update_one({'_id': my_item_0.id}, {'$set': my_item_0.get_metadata(False)}, - upsert=True) - metadata_storage.items_metadata.update_one({'_id': my_item_2.id}, {'$set': my_item_2.get_metadata(False)}, - upsert=True) + metadata_storage.items_metadata.update_one( + {"_id": my_item_0.id}, {"$set": my_item_0.get_metadata(False)}, upsert=True + ) + metadata_storage.items_metadata.update_one( + {"_id": my_item_2.id}, {"$set": my_item_2.get_metadata(False)}, upsert=True + ) # When item_metadata = metadata_storage.get_all_items_metadata() @@ -72,31 +92,44 @@ def test_get_all_items_metadata_should_return_all_items(self, test_mongo_db_uri, # Then assert item_metadata == [ { - 'id': 'd1adfc08-cb98-46d6-ae9c-b07c5d16a2ec', - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_1': {'brightness': 100, 'exposition': 100, 'position': 'right'}, - 'camera_2': {'brightness': 100, 'exposition': 100, 'position': 'left'}}, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None + "id": "d1adfc08-cb98-46d6-ae9c-b07c5d16a2ec", + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_1": { + "brightness": 100, + "exposition": 100, + "position": "right", + }, + "camera_2": { + "brightness": 100, + "exposition": 100, + "position": "left", + }, + }, + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, }, { - 'id': 'af6b4922-8e4a-4dbc-ac9b-b5fd56ceaf25', - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_3': {'brightness': 100, 'exposition': 100, 'position': 'top'} + "id": "af6b4922-8e4a-4dbc-ac9b-b5fd56ceaf25", + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_3": { + "brightness": 100, + "exposition": 100, + "position": "top", + } }, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None - } + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, + }, ] diff --git a/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py b/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py index c4ced6f6..bda5b6f3 100644 --- a/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py +++ b/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py @@ -1,6 +1,7 @@ -import pytest import os +import pytest + from edge_orchestrator.domain.models.model_infos import ModelInfos from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_wrapper import ( TFServingDetectionWrapper, @@ -13,11 +14,13 @@ class TestTFServingDetectionWrapper: @pytest.mark.parametrize( "setup_test_tensorflow_serving", ["mobilenet_v1_640x640"], indirect=True ) - async def test_perform_inference_should_detected_a_cat(self, test_tensorflow_serving_base_url, my_binaries_0): + async def test_perform_inference_should_detected_a_cat( + self, test_tensorflow_serving_base_url, my_binaries_0 + ): # Given tf_serving_model_forwarder = TFServingDetectionWrapper( base_url=test_tensorflow_serving_base_url, - class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels" + class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels", ) model_inference_version = ModelInfos( @@ -36,11 +39,15 @@ async def test_perform_inference_should_detected_a_cat(self, test_tensorflow_ser class_names_path=os.path.join( TEST_DATA_FOLDER_PATH, "test_detection_labels" ), - objectness_threshold=0.5 + objectness_threshold=0.5, ) expected_model_output = { - 'object_1': {'label': 'cat', 'location': [370, 2, 738, 340], 'score': 0.652510464} + "object_1": { + "label": "cat", + "location": [370, 2, 738, 340], + "score": 0.652510464, + } } # When @@ -53,17 +60,20 @@ async def test_perform_inference_should_detected_a_cat(self, test_tensorflow_ser for object_id, output in actual_model_output.items(): assert output["label"] == expected_model_output[object_id]["label"] assert output["location"] == expected_model_output[object_id]["location"] - assert round(output["score"], 5) == round(expected_model_output[object_id]["score"], 5) + assert round(output["score"], 5) == round( + expected_model_output[object_id]["score"], 5 + ) @pytest.mark.parametrize( "setup_test_tensorflow_serving", ["mobilenet_v1_640x640"], indirect=True ) - async def test_perform_inference_should_detected_a_cat_and_a_dog(self, test_tensorflow_serving_base_url, - my_binaries_0): # noqa + async def test_perform_inference_should_detected_a_cat_and_a_dog( + self, test_tensorflow_serving_base_url, my_binaries_0 + ): # noqa # Given tf_serving_model_forwarder = TFServingDetectionWrapper( base_url=test_tensorflow_serving_base_url, - class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels" + class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels", ) model_inference_version = ModelInfos( @@ -82,12 +92,20 @@ async def test_perform_inference_should_detected_a_cat_and_a_dog(self, test_tens class_names_path=os.path.join( TEST_DATA_FOLDER_PATH, "test_detection_labels" ), - objectness_threshold=0.5 + objectness_threshold=0.5, ) expected_model_output = { - 'object_1': {'label': 'dog', 'location': [234, 13, 778, 911], 'score': 0.717056394}, - 'object_2': {'label': 'cat', 'location': [796, 124, 1371, 935], 'score': 0.682666183} + "object_1": { + "label": "dog", + "location": [234, 13, 778, 911], + "score": 0.717056394, + }, + "object_2": { + "label": "cat", + "location": [796, 124, 1371, 935], + "score": 0.682666183, + }, } # When @@ -100,4 +118,6 @@ async def test_perform_inference_should_detected_a_cat_and_a_dog(self, test_tens for object_id, output in actual_model_output.items(): assert output["label"] == expected_model_output[object_id]["label"] assert output["location"] == expected_model_output[object_id]["location"] - assert round(output["score"], 5) == round(expected_model_output[object_id]["score"], 5) + assert round(output["score"], 5) == round( + expected_model_output[object_id]["score"], 5 + ) diff --git a/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tflite_serving_detection_wrapper.py b/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tflite_serving_detection_wrapper.py index bbc6abe6..f46776b8 100644 --- a/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tflite_serving_detection_wrapper.py +++ b/edge_orchestrator/tests/integration_tests/infrastructure/model_forward/test_tflite_serving_detection_wrapper.py @@ -1,6 +1,7 @@ -import pytest import os +import pytest + from edge_orchestrator.domain.models.model_infos import ModelInfos from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_wrapper import ( TFServingDetectionWrapper, @@ -14,12 +15,12 @@ class TestTFServingDetectionWrapper: "setup_test_tflite_serving", ["mobilenet_ssd_v2_coco"], indirect=True ) async def test_perform_inference_should_detect_a_bear( - self, test_tflite_serving_base_url, my_binaries_0 + self, test_tflite_serving_base_url, my_binaries_0 ): # Given tf_serving_model_forwarder = TFServingDetectionWrapper( base_url=test_tflite_serving_base_url, - class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels" + class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels", ) model_inference_version = ModelInfos( @@ -38,11 +39,15 @@ async def test_perform_inference_should_detect_a_bear( class_names_path=os.path.join( TEST_DATA_FOLDER_PATH, "test_detection_labels" ), - objectness_threshold=0.5 + objectness_threshold=0.5, ) expected_model_output = { - 'object_1': {'label': 'bear', 'location': [376, 4, 722, 339], 'score': 0.87890625} + "object_1": { + "label": "bear", + "location": [376, 4, 722, 339], + "score": 0.87890625, + } } # When @@ -55,18 +60,20 @@ async def test_perform_inference_should_detect_a_bear( for object_id, output in actual_model_output.items(): assert output["label"] == expected_model_output[object_id]["label"] assert output["location"] == expected_model_output[object_id]["location"] - assert round(output["score"], 5) == round(expected_model_output[object_id]["score"], 5) + assert round(output["score"], 5) == round( + expected_model_output[object_id]["score"], 5 + ) @pytest.mark.parametrize( "setup_test_tflite_serving", ["mobilenet_ssd_v2_coco"], indirect=True ) async def test_perform_inference_should_detect_a_cat_and_a_dog( - self, test_tflite_serving_base_url, my_binaries_0 + self, test_tflite_serving_base_url, my_binaries_0 ): # Given tf_serving_model_forwarder = TFServingDetectionWrapper( base_url=test_tflite_serving_base_url, - class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels" + class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels", ) model_inference_version = ModelInfos( @@ -85,12 +92,20 @@ async def test_perform_inference_should_detect_a_cat_and_a_dog( class_names_path=os.path.join( TEST_DATA_FOLDER_PATH, "test_detection_labels" ), - objectness_threshold=0.5 + objectness_threshold=0.5, ) expected_model_output = { - 'object_1': {'label': 'cat', 'location': [774, 132, 1377, 946], 'score': 0.93359375}, - 'object_2': {'label': 'dog', 'location': [225, 6, 796, 915], 'score': 0.91015625} + "object_1": { + "label": "cat", + "location": [774, 132, 1377, 946], + "score": 0.93359375, + }, + "object_2": { + "label": "dog", + "location": [225, 6, 796, 915], + "score": 0.91015625, + }, } # When @@ -103,4 +118,6 @@ async def test_perform_inference_should_detect_a_cat_and_a_dog( for object_id, output in actual_model_output.items(): assert output["label"] == expected_model_output[object_id]["label"] assert output["location"] == expected_model_output[object_id]["location"] - assert round(output["score"], 5) == round(expected_model_output[object_id]["score"], 5) + assert round(output["score"], 5) == round( + expected_model_output[object_id]["score"], 5 + ) diff --git a/edge_orchestrator/tests/integration_tests/infrastructure/station_config/test_json_station_config.py b/edge_orchestrator/tests/integration_tests/infrastructure/station_config/test_json_station_config.py index c4fcdbe4..b2e42e33 100644 --- a/edge_orchestrator/tests/integration_tests/infrastructure/station_config/test_json_station_config.py +++ b/edge_orchestrator/tests/integration_tests/infrastructure/station_config/test_json_station_config.py @@ -1,23 +1,34 @@ import datetime as dt +import os import pytest -import os from freezegun import freeze_time from edge_orchestrator.domain.models.model_infos import ModelInfos from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.station_config.json_station_config import JsonStationConfig -from tests.conftest import TEST_INVENTORY_PATH, TEST_STATION_CONFIGS_FOLDER_PATH, TEST_DATA_FOLDER_PATH +from edge_orchestrator.infrastructure.station_config.json_station_config import ( + JsonStationConfig, +) +from tests.conftest import ( + TEST_DATA_FOLDER_PATH, + TEST_INVENTORY_PATH, + TEST_STATION_CONFIGS_FOLDER_PATH, +) -@freeze_time(lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0)) +@freeze_time( + lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0) +) class TestJsonStationConfig: - - def test_get_models_for_camera_should_return_one_model_infos_when_camera_config_has_one_model(self): + def test_get_models_for_camera_should_return_one_model_infos_when_camera_config_has_one_model( + self, + ): # Given inventory = JsonInventory(TEST_INVENTORY_PATH) - json_station_config = JsonStationConfig(TEST_STATION_CONFIGS_FOLDER_PATH, inventory, TEST_DATA_FOLDER_PATH) - camera_id = 'camera_id3' + json_station_config = JsonStationConfig( + TEST_STATION_CONFIGS_FOLDER_PATH, inventory, TEST_DATA_FOLDER_PATH + ) + camera_id = "camera_id3" # When with pytest.raises(TypeError) as error: @@ -25,40 +36,46 @@ def test_get_models_for_camera_should_return_one_model_infos_when_camera_config_ assert str(error.value) == "'NoneType' object is not subscriptable" - def test_get_models_for_camera_should_return_two_model_infos_when_camera_config_has_two_models(self): + def test_get_models_for_camera_should_return_two_model_infos_when_camera_config_has_two_models( + self, + ): # Given inventory = JsonInventory(TEST_INVENTORY_PATH) - json_station_config = JsonStationConfig(TEST_STATION_CONFIGS_FOLDER_PATH, inventory, TEST_DATA_FOLDER_PATH) - json_station_config.set_station_config('station_config_TEST2') - camera_id = 'camera_id3' + json_station_config = JsonStationConfig( + TEST_STATION_CONFIGS_FOLDER_PATH, inventory, TEST_DATA_FOLDER_PATH + ) + json_station_config.set_station_config("station_config_TEST2") + camera_id = "camera_id3" expected = [ ModelInfos( - id='model_1', + id="model_1", depends_on=[], - name='mobilenet_v1_640x640', - category='object_detection', + name="mobilenet_v1_640x640", + category="object_detection", class_names=None, - class_names_path=os.path.join(TEST_DATA_FOLDER_PATH, 'test_detection_labels'), - version='1', - camera_id='camera_id3', - boxes_coordinates='detection_boxes', - objectness_scores='detection_scores', - number_of_boxes='num_detections', - class_to_detect='foo', - detection_classes='detection_classes', - objectness_threshold=0.5 + class_names_path=os.path.join( + TEST_DATA_FOLDER_PATH, "test_detection_labels" + ), + version="1", + camera_id="camera_id3", + boxes_coordinates="detection_boxes", + objectness_scores="detection_scores", + number_of_boxes="num_detections", + class_to_detect="foo", + detection_classes="detection_classes", + objectness_threshold=0.5, ), ModelInfos( - id='model_2', - depends_on=['model_1'], - name='inception', - category='classification', - class_names=['OK', 'KO'], - version='1', - camera_id='camera_id3', - image_resolution=[224, 224] - ) + id="model_2", + depends_on=["model_1"], + name="inception", + category="classification", + class_names=["OK", "KO"], + version="1", + camera_id="camera_id3", + image_resolution=[224, 224], + ), ] # When diff --git a/edge_orchestrator/tests/integration_tests/infrastructure/telemetry_sink/test_postgresql_telemetry_sink.py b/edge_orchestrator/tests/integration_tests/infrastructure/telemetry_sink/test_postgresql_telemetry_sink.py index 5e6d6b66..57bb0e17 100644 --- a/edge_orchestrator/tests/integration_tests/infrastructure/telemetry_sink/test_postgresql_telemetry_sink.py +++ b/edge_orchestrator/tests/integration_tests/infrastructure/telemetry_sink/test_postgresql_telemetry_sink.py @@ -4,19 +4,24 @@ import pytest from freezegun import freeze_time -from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import PostgresTelemetrySink +from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import ( + PostgresTelemetrySink, +) @pytest.mark.asyncio class TestPostgresTelemetrySink: - - @freeze_time(lambda: dt.datetime(year=2023, month=1, day=27, hour=15, minute=0, second=0)) - async def test_insert_and_select_query_given_one_telemetry_message(self, test_postgres_db_uri): + @freeze_time( + lambda: dt.datetime(year=2023, month=1, day=27, hour=15, minute=0, second=0) + ) + async def test_insert_and_select_query_given_one_telemetry_message( + self, test_postgres_db_uri + ): # Given telemetry_msg = { - 'decision': 'OK', - 'item_id': '999-1b2-888', - 'config': 'config1' + "decision": "OK", + "item_id": "999-1b2-888", + "config": "config1", } random.seed(42) telemetry_sink = PostgresTelemetrySink(test_postgres_db_uri) @@ -26,18 +31,18 @@ async def test_insert_and_select_query_given_one_telemetry_message(self, test_po # Then with telemetry_sink.connection.cursor() as curs: - curs.execute('SELECT * FROM iothub.telemetry') + curs.execute("SELECT * FROM iothub.telemetry") res = curs.fetchone() _id, device_id, decision, timestamp, item_id, config_res = res - assert device_id == 'device_40' - assert decision == 'OK' + assert device_id == "device_40" + assert decision == "OK" assert timestamp == dt.datetime(2023, 1, 27, 15, 0) - assert item_id == '999-1b2-888' - assert config_res == 'config1' + assert item_id == "999-1b2-888" + assert config_res == "config1" def test_timeout_error_should_be_raised_with_no_postgresql_db(self): # Given - vio = 'postgresql://vio:vio@localhost:2345/vio' + vio = "postgresql://vio:vio@localhost:2345/vio" telemetry_sink = PostgresTelemetrySink(vio, timeout=2, interval=2) # When @@ -45,4 +50,8 @@ def test_timeout_error_should_be_raised_with_no_postgresql_db(self): telemetry_sink.connection # Then - assert str(error.value) == "Unable to connect to Telemetry Postgres DB using postgresql://vio:vio@localhost:2345/vio after 2 seconds" # noqa + assert ( + str(error.value) + == "Unable to connect to Telemetry Postgres DB using postgresql://vio:vio@localhost:2345/vio " + "after 2 seconds" + ) diff --git a/edge_orchestrator/tests/tests_pyramid.py b/edge_orchestrator/tests/tests_pyramid.py index e72e4329..09720c5e 100644 --- a/edge_orchestrator/tests/tests_pyramid.py +++ b/edge_orchestrator/tests/tests_pyramid.py @@ -8,64 +8,89 @@ PARENT_DIR = Path(__file__).parent -@click.command('measure-tests-pyramid-and-create-gitlab-badges') -@click.option('--badges-generation/--no-badges-generation', default=False) +@click.command("measure-tests-pyramid-and-create-gitlab-badges") +@click.option("--badges-generation/--no-badges-generation", default=False) def measure_tests_pyramid_and_create_gitlab_badges(badges_generation: bool): - number_of_func_tests, number_of_int_tests, number_of_unit_tests = compute_number_of_each_test_type() - total_number_of_tests = number_of_unit_tests + number_of_int_tests + number_of_func_tests + ( + number_of_func_tests, + number_of_int_tests, + number_of_unit_tests, + ) = compute_number_of_each_test_type() + total_number_of_tests = ( + number_of_unit_tests + number_of_int_tests + number_of_func_tests + ) percentage_of_unit_tests = percentage(number_of_unit_tests, total_number_of_tests) percentage_of_int_tests = percentage(number_of_int_tests, total_number_of_tests) percentage_of_func_tests = percentage(number_of_func_tests, total_number_of_tests) - print(f'''\n - \033[96m /\\. - /|_\\\`. [*] functional tests: {number_of_func_tests}/{total_number_of_tests} --> {percentage_of_func_tests:.2f} % - \033[94m /__|_\\/\`. - /__|__|\\/.\`. [*] integration tests: {number_of_int_tests}/{total_number_of_tests} --> {percentage_of_int_tests:.2f} % - \033[92m /_|__|__|\\/\`/\`. - /|__|___|__\\/\`/ - /__|___|___|_\\/ [*] unit tests: {number_of_unit_tests}/{total_number_of_tests} --> {percentage_of_unit_tests:.2f} %\033[0m -\n''') # noqa - - check_if_pyramid_is_ok(number_of_unit_tests, number_of_int_tests, number_of_func_tests) + print( + f"""\n + \033[96m /\\ + /|_\\ [*] functional tests: {number_of_func_tests}/{total_number_of_tests} --> {percentage_of_func_tests:.2f} % + \033[94m /__|_\\ + /__|__|\\ [*] integration tests: {number_of_int_tests}/{total_number_of_tests} --> {percentage_of_int_tests:.2f} % + \033[92m /_|__|__|\\ + /|__|___|__\\ [*] unit tests: {number_of_unit_tests}/{total_number_of_tests} --> {percentage_of_unit_tests:.2f} % + /__|___|___|_\\\033[0m +\n""" # noqa + ) + + check_if_pyramid_is_ok( + number_of_unit_tests, number_of_int_tests, number_of_func_tests + ) if badges_generation: import anybadge - badges_dir = PARENT_DIR.parent / 'badges' + badges_dir = PARENT_DIR.parent / "badges" badges_dir.mkdir(parents=True, exist_ok=True) - unit_test_file = (badges_dir / 'badge_unit_tests.svg').as_posix() - int_test_file = (badges_dir / 'badge_int_tests.svg').as_posix() - func_test_file = (badges_dir / 'badge_func_tests.svg').as_posix() + unit_test_file = (badges_dir / "badge_unit_tests.svg").as_posix() + int_test_file = (badges_dir / "badge_int_tests.svg").as_posix() + func_test_file = (badges_dir / "badge_func_tests.svg").as_posix() func_color, int_color, unit_color = get_color_according_to_percentage( - percentage_of_func_tests, percentage_of_int_tests, percentage_of_unit_tests) - - anybadge.Badge(label='unit-tests-ratio', value=f'{percentage_of_unit_tests:.2f} %', - default_color=unit_color).write_badge(unit_test_file, overwrite=True) - anybadge.Badge(label='integration-tests-ratio', value=f'{percentage_of_int_tests:.2f} %', - default_color=int_color).write_badge(int_test_file, overwrite=True) - anybadge.Badge(label='functional-tests-ratio', value=f'{percentage_of_func_tests:.2f} %', - default_color=func_color).write_badge(func_test_file, overwrite=True) + percentage_of_func_tests, percentage_of_int_tests, percentage_of_unit_tests + ) + + anybadge.Badge( + label="unit-tests-ratio", + value=f"{percentage_of_unit_tests:.2f} %", + default_color=unit_color, + ).write_badge(unit_test_file, overwrite=True) + anybadge.Badge( + label="integration-tests-ratio", + value=f"{percentage_of_int_tests:.2f} %", + default_color=int_color, + ).write_badge(int_test_file, overwrite=True) + anybadge.Badge( + label="functional-tests-ratio", + value=f"{percentage_of_func_tests:.2f} %", + default_color=func_color, + ).write_badge(func_test_file, overwrite=True) def compute_number_of_each_test_type( - path_to_unit_tests: Path = PARENT_DIR / 'unit_tests', - path_to_integration_tests: Path = PARENT_DIR / 'integration_tests', - path_to_functional_tests: Path = PARENT_DIR / 'functional_tests', - test_function_pattern: re.Pattern = re.compile('def test_'), - test_scenario_pattern: re.Pattern = re.compile('Scenario:')) -> Tuple[int, int, int]: + path_to_unit_tests: Path = PARENT_DIR / "unit_tests", + path_to_integration_tests: Path = PARENT_DIR / "integration_tests", + path_to_functional_tests: Path = PARENT_DIR / "functional_tests", + test_function_pattern: re.Pattern = re.compile("def test_"), + test_scenario_pattern: re.Pattern = re.compile("Scenario:"), +) -> Tuple[int, int, int]: number_of_unit_tests = count_tests(path_to_unit_tests, test_function_pattern) number_of_int_tests = count_tests(path_to_integration_tests, test_function_pattern) - number_of_func_tests = count_tests(path_to_functional_tests, test_scenario_pattern, 'feature') + number_of_func_tests = count_tests( + path_to_functional_tests, test_scenario_pattern, "feature" + ) return number_of_func_tests, number_of_int_tests, number_of_unit_tests -def count_tests(path: Path, test_function_pattern: re.Pattern, file_extension='py') -> int: +def count_tests( + path: Path, test_function_pattern: re.Pattern, file_extension="py" +) -> int: counter = 0 - for filepath in path.glob(f'**/*.{file_extension}'): - with filepath.open('r') as f: + for filepath in path.glob(f"**/*.{file_extension}"): + with filepath.open("r") as f: one_line = f.readline() while one_line: if re.search(test_function_pattern, one_line): @@ -78,23 +103,29 @@ def percentage(number_of_unit_tests: int, total_number_of_tests: int) -> float: return number_of_unit_tests / total_number_of_tests * 100 -def get_color_according_to_percentage(percentage_of_func_tests, percentage_of_int_tests, percentage_of_unit_tests): - func_color, int_color, unit_color = 'green', 'green', 'green' +def get_color_according_to_percentage( + percentage_of_func_tests, percentage_of_int_tests, percentage_of_unit_tests +): + func_color, int_color, unit_color = "green", "green", "green" if percentage_of_func_tests <= 1 or percentage_of_func_tests >= 20: - func_color = 'brightred' + func_color = "brightred" if percentage_of_int_tests <= 10 or percentage_of_int_tests >= 40: - int_color = 'brightred' + int_color = "brightred" if percentage_of_unit_tests <= 40 or percentage_of_func_tests >= 90: - unit_color = 'brightred' + unit_color = "brightred" return func_color, int_color, unit_color -def check_if_pyramid_is_ok(number_of_unit_tests: int, number_of_int_tests: int, number_of_func_tests: int): - if number_of_func_tests > number_of_int_tests or \ - number_of_func_tests > number_of_unit_tests or \ - number_of_int_tests > number_of_unit_tests: - logging.warning('Tests pyramid is unbalanced') +def check_if_pyramid_is_ok( + number_of_unit_tests: int, number_of_int_tests: int, number_of_func_tests: int +): + if ( + number_of_func_tests > number_of_int_tests + or number_of_func_tests > number_of_unit_tests + or number_of_int_tests > number_of_unit_tests + ): + logging.warning("Tests pyramid is unbalanced") -if __name__ == '__main__': +if __name__ == "__main__": measure_tests_pyramid_and_create_gitlab_badges() diff --git a/edge_orchestrator/tests/tf_serving_container.py b/edge_orchestrator/tests/tf_serving_container.py index 43b64b2c..122ea409 100644 --- a/edge_orchestrator/tests/tf_serving_container.py +++ b/edge_orchestrator/tests/tf_serving_container.py @@ -5,22 +5,29 @@ class TfServingContainer(DockerContainer): - - def __init__(self, image: str, port_to_expose: int, env: Dict, host_volume_path: str = None, - container_volume_path: str = None): + def __init__( + self, + image: str, + port_to_expose: int, + env: Dict, + host_volume_path: str = None, + container_volume_path: str = None, + ): super(TfServingContainer, self).__init__(image) self.port_to_expose = port_to_expose self.with_exposed_ports(self.port_to_expose) for key, value in env.items(): self.with_env(key, value) if host_volume_path and container_volume_path: - self.with_volume_mapping(host=host_volume_path, container=container_volume_path) + self.with_volume_mapping( + host=host_volume_path, container=container_volume_path + ) @wait_container_is_ready() def _connect(self, default_starting_log: str): wait_for_logs(self, default_starting_log, timeout=10) - def start(self, starting_log: str = r'Uvicorn running on'): + def start(self, starting_log: str = r"Uvicorn running on"): super().start() self._connect(starting_log) return self diff --git a/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_camera_business_rules.py b/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_camera_business_rules.py index d66b1ecd..01d89d23 100644 --- a/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_camera_business_rules.py +++ b/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_camera_business_rules.py @@ -1,41 +1,62 @@ -from edge_orchestrator.domain.models.camera import get_last_inference_by_camera, get_camera_rule +from edge_orchestrator.domain.models.camera import ( + get_camera_rule, + get_last_inference_by_camera, +) from edge_orchestrator.domain.use_cases.supervisor import get_labels class TestCameraBusinessRule: def test_camera_decision_should_return_KO_when_expected_label_is_OK(self): # noqa # Given - inferences = {'camera_id3': {'model_id4': {'full_image': {'label': 'KO', 'probability': 0.999930501}}}} + inferences = { + "camera_id3": { + "model_id4": {"full_image": {"label": "KO", "probability": 0.999930501}} + } + } # When camera_decisions = {} for camera in inferences: - camera_rule_name = 'expected_label_rule' + camera_rule_name = "expected_label_rule" camera_rule_parameters = {"expected_label": ["OK"]} last_model_inferences = get_last_inference_by_camera(inferences[camera]) labels_of_last_model_inferences = get_labels(last_model_inferences) - item_camera_rule = get_camera_rule(camera_rule_name)(**camera_rule_parameters) - camera_decision = item_camera_rule.get_camera_decision(labels_of_last_model_inferences) + item_camera_rule = get_camera_rule(camera_rule_name)( + **camera_rule_parameters + ) + camera_decision = item_camera_rule.get_camera_decision( + labels_of_last_model_inferences + ) - camera_decisions[f'{camera}'] = camera_decision.value + camera_decisions[f"{camera}"] = camera_decision.value # Then - assert camera_decisions == {'camera_id3': 'KO'} + assert camera_decisions == {"camera_id3": "KO"} - def test_camera_decision_should_return_OK_when_minimum_one_person_is_detected(self): # noqa + def test_camera_decision_should_return_OK_when_minimum_one_person_is_detected( + self, + ): # noqa # Given inferences = { - 'camera_id3': { - 'model_id5': { - 'object_1': { - 'location': [155, 413, 381, 709], 'score': 0.773778856, 'label': 'person'}, - 'object_2': { - 'location': [422, 10, 719, 720], 'score': 0.709803939, 'label': 'bicycle'}, - 'object_3': { - 'location': [623, 430, 757, 648], 'score': 0.523171604, - 'label': 'couch'} + "camera_id3": { + "model_id5": { + "object_1": { + "location": [155, 413, 381, 709], + "score": 0.773778856, + "label": "person", + }, + "object_2": { + "location": [422, 10, 719, 720], + "score": 0.709803939, + "label": "bicycle", + }, + "object_3": { + "location": [623, 430, 757, 648], + "score": 0.523171604, + "label": "couch", + }, } } } @@ -43,77 +64,132 @@ def test_camera_decision_should_return_OK_when_minimum_one_person_is_detected(se # When camera_decisions = {} for camera in inferences: - camera_rule_name = 'min_nb_objects_rule' - camera_rule_parameters = { - "class_to_detect": ["person"], - "min_threshold": 1 - } + camera_rule_name = "min_nb_objects_rule" + camera_rule_parameters = {"class_to_detect": ["person"], "min_threshold": 1} last_model_inferences = get_last_inference_by_camera(inferences[camera]) labels_of_last_model_inferences = get_labels(last_model_inferences) - item_camera_rule = get_camera_rule(camera_rule_name)(**camera_rule_parameters) - camera_decision = item_camera_rule.get_camera_decision(labels_of_last_model_inferences) + item_camera_rule = get_camera_rule(camera_rule_name)( + **camera_rule_parameters + ) + camera_decision = item_camera_rule.get_camera_decision( + labels_of_last_model_inferences + ) - camera_decisions[f'{camera}'] = camera_decision.value + camera_decisions[f"{camera}"] = camera_decision.value # Then - assert camera_decisions == {'camera_id3': 'OK'} + assert camera_decisions == {"camera_id3": "OK"} - def test_camera_decision_should_return_OK_when_minimum_one_face_is_detected_with_two_object_detection_models(self): # noqa + def test_camera_decision_should_return_OK_when_minimum_one_face_is_detected_with_two_object_detection_models( + self, + ): # noqa # Given inferences = { - 'camera_id3': { - {'model_id1': { - 'object_1': {'label': 'person', 'location': [351, 110, 508, 361], 'score': 0.98046875}, - 'object_2': {'label': 'person', 'location': [233, 73, 385, 397], 'score': 0.91015625}, - 'object_3': {'label': 'person', 'location': [7, 3, 240, 509], 'score': 0.87890625}, - 'object_4': {'label': 'person', 'location': [493, 93, 678, 389], 'score': 0.87890625}, - 'object_5': {'label': 'person', 'location': [135, 35, 276, 417], 'score': 0.83984375}, - 'object_6': {'label': 'person', 'location': [520, 47, 804, 527], 'score': 0.58203125}}, - 'model_id6': {'object_1': {'label': 'face', 'location': [555, 97, 611, 207], 'score': 0.98046875}, - 'object_2': {'label': 'face', 'location': [645, 46, 727, 180], 'score': 0.5}}} + "camera_id3": { + { + "model_id1": { + "object_1": { + "label": "person", + "location": [351, 110, 508, 361], + "score": 0.98046875, + }, + "object_2": { + "label": "person", + "location": [233, 73, 385, 397], + "score": 0.91015625, + }, + "object_3": { + "label": "person", + "location": [7, 3, 240, 509], + "score": 0.87890625, + }, + "object_4": { + "label": "person", + "location": [493, 93, 678, 389], + "score": 0.87890625, + }, + "object_5": { + "label": "person", + "location": [135, 35, 276, 417], + "score": 0.83984375, + }, + "object_6": { + "label": "person", + "location": [520, 47, 804, 527], + "score": 0.58203125, + }, + }, + "model_id6": { + "object_1": { + "label": "face", + "location": [555, 97, 611, 207], + "score": 0.98046875, + }, + "object_2": { + "label": "face", + "location": [645, 46, 727, 180], + "score": 0.5, + }, + }, + } } } # When camera_decisions = {} - camera = 'camera_id3' - camera_rule_name = 'min_nb_objects_rule' - camera_rule_parameters = { - "class_to_detect": ["face"], - "min_threshold": 1 - } + camera = "camera_id3" + camera_rule_name = "min_nb_objects_rule" + camera_rule_parameters = {"class_to_detect": ["face"], "min_threshold": 1} - item_camera_rule = get_camera_rule(camera_rule_name)(**camera_rule_parameters) + item_camera_rule = get_camera_rule(camera_rule_name)( + **camera_rule_parameters + ) camera_decision = item_camera_rule.get_camera_decision(inferences[camera]) - camera_decisions[f'{camera}'] = camera_decision.value + camera_decisions[f"{camera}"] = camera_decision.value # Then - assert camera_decisions == {'camera_id3': 'OK'} - + assert camera_decisions == {"camera_id3": "OK"} - def test_camera_decision_should_return_OK_when_minimum_one_connected_cellphone_is_detected_with_one_object_detection_and_one_classification_model(self): # noqa + def test_camera_decision_should_return_OK_when_minimum_one_connected_cellphone_is_detected_with_one_object_detection_and_one_classification_model( # noqa + self, + ): # Given - inferences = {'camera_id1': {'model_id1': { - 'object_3': {'label': 'cell phone', 'location': [427, 227, 467, 278], 'score': 0.41796875}}, - 'model_id6': { - 'object_3': {'label': 'unconnected', 'probability': 0.9975850582122803}}}} + inferences = { + "camera_id1": { + "model_id1": { + "object_3": { + "label": "cell phone", + "location": [427, 227, 467, 278], + "score": 0.41796875, + } + }, + "model_id6": { + "object_3": { + "label": "unconnected", + "probability": 0.9975850582122803, + } + }, + } + } # When camera_decisions = {} - camera = 'camera_id1' - camera_rule_name = 'min_nb_objects_rule' + camera = "camera_id1" + camera_rule_name = "min_nb_objects_rule" camera_rule_parameters = { "class_to_detect": ["connected"], - "min_threshold": 1 + "min_threshold": 1, } - item_camera_rule = get_camera_rule(camera_rule_name)(**camera_rule_parameters) + item_camera_rule = get_camera_rule(camera_rule_name)( + **camera_rule_parameters + ) camera_decision = item_camera_rule.get_camera_decision(inferences[camera]) - camera_decisions[f'{camera}'] = camera_decision.value + camera_decisions[f"{camera}"] = camera_decision.value # Then - assert camera_decisions == {'camera_id1': 'KO'} + assert camera_decisions == {"camera_id1": "KO"} diff --git a/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_item_business_rules.py b/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_item_business_rules.py index 1c812a3a..f2c28194 100644 --- a/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_item_business_rules.py +++ b/edge_orchestrator/tests/unit_tests/domain/models/business_rules/test_item_business_rules.py @@ -3,12 +3,14 @@ class TestItemBusinessRule: - def test_item_decision_should_return_decision_ko_when_one_or_more_than_one_camera_decision_is_ko(self): # noqa + def test_item_decision_should_return_decision_ko_when_one_or_more_than_one_camera_decision_is_ko( + self, + ): # noqa # Given - camera_decisions = {'camera_id1': 'KO', 'camera_id2': 'OK', 'camera_id3': 'OK'} + camera_decisions = {"camera_id1": "KO", "camera_id2": "OK", "camera_id3": "OK"} # When - item_rule_name = 'min_threshold_KO_rule' + item_rule_name = "min_threshold_KO_rule" item_rule_parameters = {"threshold": 1} item_rule = get_item_rule(item_rule_name)(**item_rule_parameters) @@ -17,12 +19,14 @@ def test_item_decision_should_return_decision_ko_when_one_or_more_than_one_camer # Then assert item_decision == Decision.KO - def test_item_decision_should_return_decision_ok_when_more_than_50_pct_of_camera_decisions_are_ok(self): # noqa + def test_item_decision_should_return_decision_ok_when_more_than_50_pct_of_camera_decisions_are_ok( + self, + ): # noqa # Given - camera_decisions = {'camera_id1': 'OK', 'camera_id2': 'OK', 'camera_id3': 'OK'} + camera_decisions = {"camera_id1": "OK", "camera_id2": "OK", "camera_id3": "OK"} # When - item_rule_name = 'threshold_ratio_rule' + item_rule_name = "threshold_ratio_rule" item_rule_parameters = {"min_threshold": 0.5} item_rule = get_item_rule(item_rule_name)(**item_rule_parameters) diff --git a/edge_orchestrator/tests/unit_tests/domain/models/test_edge_station.py b/edge_orchestrator/tests/unit_tests/domain/models/test_edge_station.py index 26eba462..7c3dba3b 100644 --- a/edge_orchestrator/tests/unit_tests/domain/models/test_edge_station.py +++ b/edge_orchestrator/tests/unit_tests/domain/models/test_edge_station.py @@ -12,8 +12,9 @@ class TestEdgeStation: - - def test_register_cameras_raises_exception_when_no_active_configuration_is_set(self): + def test_register_cameras_raises_exception_when_no_active_configuration_is_set( + self, + ): # Given station_config: StationConfig = get_station_config() @@ -27,7 +28,7 @@ def test_register_cameras_raises_exception_when_no_active_configuration_is_set(s def test_capture_should_raise_exception_when_cameras_are_not_registered(self): # Given station_config: StationConfig = get_station_config() - station_config.set_station_config('station_config_TEST') + station_config.set_station_config("station_config_TEST") edge_station = EdgeStation(station_config, TEST_DATA_FOLDER_PATH) @@ -36,25 +37,25 @@ def test_capture_should_raise_exception_when_cameras_are_not_registered(self): edge_station.capture() assert str(error.value) == "'EdgeStation' object has no attribute 'cameras'" - @freeze_time(lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0)) - @patch.object(FakeCamera, 'capture') - @patch('edge_orchestrator.domain.models.item.generate_id') - def test_capture_should_instantiate_item_with_1_binary(self, - generate_id_mocked, - capture_mocked, - my_fake_item_2, - my_fake_binaries_2): + @freeze_time( + lambda: dt.datetime(year=2021, month=5, day=19, hour=15, minute=0, second=0) + ) + @patch.object(FakeCamera, "capture") + @patch("edge_orchestrator.domain.models.item.generate_id") + def test_capture_should_instantiate_item_with_1_binary( + self, generate_id_mocked, capture_mocked, my_fake_item_2, my_fake_binaries_2 + ): # Given # random.seed(123) - expected_id = 'my_fake_item_id' + expected_id = "my_fake_item_id" generate_id_mocked.return_value = expected_id capture_mocked.return_value = my_fake_binaries_2 my_fake_item_2.id = expected_id station_config: StationConfig = get_station_config() - station_config.set_station_config('station_config_TEST') + station_config.set_station_config("station_config_TEST") edge_station = EdgeStation(station_config, TEST_DATA_FOLDER_PATH) diff --git a/edge_orchestrator/tests/unit_tests/domain/models/test_item.py b/edge_orchestrator/tests/unit_tests/domain/models/test_item.py index 0da97a21..a411eed6 100644 --- a/edge_orchestrator/tests/unit_tests/domain/models/test_item.py +++ b/edge_orchestrator/tests/unit_tests/domain/models/test_item.py @@ -2,14 +2,16 @@ class TestItem: - def test_item_from_nothing_should_instantiate_empty_item_with_serial_number_and_category_hardcoded(self): + def test_item_from_nothing_should_instantiate_empty_item_with_serial_number_and_category_hardcoded( + self, + ): # When item = Item.from_nothing() # Then assert item.id is not None - assert item.serial_number == 'serial_number' - assert item.category == 'category' + assert item.serial_number == "serial_number" + assert item.category == "category" assert item.binaries == {} assert item.cameras_metadata == {} assert item.inferences == {} diff --git a/edge_orchestrator/tests/unit_tests/domain/test_supervisor.py b/edge_orchestrator/tests/unit_tests/domain/test_supervisor.py index d19cc069..bb9a59bd 100644 --- a/edge_orchestrator/tests/unit_tests/domain/test_supervisor.py +++ b/edge_orchestrator/tests/unit_tests/domain/test_supervisor.py @@ -8,16 +8,30 @@ from edge_orchestrator.domain.models.item import Item from edge_orchestrator.domain.models.model_infos import ModelInfos from edge_orchestrator.domain.use_cases.supervisor import Supervisor, crop_image -from edge_orchestrator.infrastructure.binary_storage.memory_binary_storage import MemoryBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.memory_binary_storage import ( + MemoryBinaryStorage, +) from edge_orchestrator.infrastructure.inventory.json_inventory import JsonInventory -from edge_orchestrator.infrastructure.metadata_storage.memory_metadata_storage import MemoryMetadataStorage -from edge_orchestrator.infrastructure.model_forward.fake_model_forward import FakeModelForward +from edge_orchestrator.infrastructure.metadata_storage.memory_metadata_storage import ( + MemoryMetadataStorage, +) +from edge_orchestrator.infrastructure.model_forward.fake_model_forward import ( + FakeModelForward, +) from edge_orchestrator.infrastructure.station_config.json_station_config import ( JsonStationConfig, ) -from edge_orchestrator.infrastructure.telemetry_sink.fake_telemetry_sink import FakeTelemetrySink -from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import PostgresTelemetrySink -from tests.conftest import TEST_DATA_FOLDER_PATH, TEST_INVENTORY_PATH, TEST_STATION_CONFIGS_FOLDER_PATH +from edge_orchestrator.infrastructure.telemetry_sink.fake_telemetry_sink import ( + FakeTelemetrySink, +) +from edge_orchestrator.infrastructure.telemetry_sink.postgresql_telemetry_sink import ( + PostgresTelemetrySink, +) +from tests.conftest import ( + TEST_DATA_FOLDER_PATH, + TEST_INVENTORY_PATH, + TEST_STATION_CONFIGS_FOLDER_PATH, +) @pytest.mark.asyncio @@ -113,7 +127,7 @@ async def test_2_models_in_serie(self): "camera_id", ["camera_id1", "camera_id2", "camera_id3", "camera_id4"] ) async def test_get_prediction_for_camera_should_return_2_predicted_objects_by_one_object_detection_model( - self, model_config_mocked, camera_id, my_item_1 + self, model_config_mocked, camera_id, my_item_1 ): # Given np.random.seed(42) @@ -156,7 +170,7 @@ async def test_get_prediction_for_camera_should_return_2_predicted_objects_by_on "camera_id", ["camera_id1", "camera_id2", "camera_id3", "camera_id4"] ) async def test_get_prediction_for_camera_should_return_1_predicted_object_by_one_classification_model( - self, model_config_mocked, camera_id, my_item_1 + self, model_config_mocked, camera_id, my_item_1 ): # Given np.random.seed(42) @@ -191,11 +205,11 @@ async def test_get_prediction_for_camera_should_return_1_predicted_object_by_one "camera_id", ["camera_id1", "camera_id2", "camera_id3", "camera_id4"] ) async def test_get_prediction_for_camera_returns_2_objects_with_label_for_object_detection_followed_by_classif( - # noqa - self, - model_config_mocked, - camera_id, - my_item_1, + # noqa + self, + model_config_mocked, + camera_id, + my_item_1, ): # Given np.random.seed(42) @@ -258,11 +272,11 @@ async def test_get_prediction_for_camera_returns_2_objects_with_label_for_object "camera_id", ["camera_id1", "camera_id2", "camera_id3", "camera_id4"] ) async def test_get_prediction_for_camera_returns_2_objects_with_label_for_object_detection_with_classif_model( - # noqa - self, - model_config_mocked, - camera_id, - my_item_1, + # noqa + self, + model_config_mocked, + camera_id, + my_item_1, ): # Given np.random.seed(42) @@ -310,11 +324,11 @@ async def test_get_prediction_for_camera_returns_2_objects_with_label_for_object "camera_id", ["camera_id1", "camera_id2", "camera_id3", "camera_id4"] ) async def test_get_prediction_for_camera_should_return_1_output_by_model_and_among_them_1_is_classification( - # noqa - self, - model_config_mocked, - camera_id, - my_item_1, + # noqa + self, + model_config_mocked, + camera_id, + my_item_1, ): # Given np.random.seed(42) @@ -411,11 +425,11 @@ async def test_get_prediction_for_camera_should_return_1_output_by_model_and_amo "camera_id", ["camera_id1", "camera_id2", "camera_id3", "camera_id4"] ) async def test_get_prediction_for_camera_should_return_1_output_by_model_and_among_them_2_are_classification( - # noqa - self, - model_config_mocked, - camera_id, - my_item_1, + # noqa + self, + model_config_mocked, + camera_id, + my_item_1, ): # Given np.random.seed(42) @@ -537,7 +551,7 @@ def test_apply_crop_function_with_correct_box_should_resize_the_picture(self): assert actual == expected_cropped_picture def test_apply_crop_function_with_incorrect_box_should_log_an_error_and_return_the_same_picture( - self, caplog + self, caplog ): # Given original_picture = ( @@ -553,59 +567,82 @@ def test_apply_crop_function_with_incorrect_box_should_log_an_error_and_return_t # Then assert actual == original_picture assert ( - caplog.records[0].msg - == "Informations for cropping are incorrect, the initial picture is used" + caplog.records[0].msg + == "Informations for cropping are incorrect, the initial picture is used" ) assert caplog.records[1].msg == "xmin (=554) is greater than xmax (=553)" - @patch.object(PostgresTelemetrySink, 'send') - async def test_set_decision_should_send_final_decision_to_telemetry_sink(self, mock_send): + @patch.object(PostgresTelemetrySink, "send") + async def test_set_decision_should_send_final_decision_to_telemetry_sink( + self, mock_send + ): # Given - item = Item(serial_number='', category='', cameras_metadata={}, binaries={}) - item.id = 'item_id' + item = Item(serial_number="", category="", cameras_metadata={}, binaries={}) + item.id = "item_id" inventory = JsonInventory(TEST_INVENTORY_PATH) - station_config = JsonStationConfig(TEST_STATION_CONFIGS_FOLDER_PATH, - inventory, TEST_DATA_FOLDER_PATH) - station_config.set_station_config('station_config_TEST') - supervisor = Supervisor(station_config=station_config, metadata_storage=MemoryMetadataStorage(), - model_forward=FakeModelForward(), binary_storage=MemoryBinaryStorage()) + station_config = JsonStationConfig( + TEST_STATION_CONFIGS_FOLDER_PATH, inventory, TEST_DATA_FOLDER_PATH + ) + station_config.set_station_config("station_config_TEST") + supervisor = Supervisor( + station_config=station_config, + metadata_storage=MemoryMetadataStorage(), + model_forward=FakeModelForward(), + binary_storage=MemoryBinaryStorage(), + ) # When await supervisor.inspect(item) # Then - msg_dict = {'item_id': 'item_id', 'config': 'station_config_TEST', 'decision': 'OK'} + msg_dict = { + "item_id": "item_id", + "config": "station_config_TEST", + "decision": "OK", + } mock_send.assert_called_once_with(msg_dict) - async def test_inspect_should_log_information_about_item_processing(self, caplog, my_fake_item): + async def test_inspect_should_log_information_about_item_processing( + self, caplog, my_fake_item + ): # Given - expected_messages = ['Activated the configuration station_config_TEST', - 'Starting Capture', - 'Entering try Capture', - 'End of Capture', - 'Starting Save Binaries', - 'Entering try Save Binaries', - 'End of Save Binaries', - 'Starting Inference', - 'Entering try Inference', - 'Getting inference for model model_id4', - 'End of Inference', - 'Starting Decision', - 'Entering try Decision', - 'End of Decision'] + expected_messages = [ + "Activated the configuration station_config_TEST", + "Starting Capture", + "Entering try Capture", + "End of Capture", + "Starting Save Binaries", + "Entering try Save Binaries", + "End of Save Binaries", + "Starting Inference", + "Entering try Inference", + "Getting inference for model model_id4", + "End of Inference", + "Starting Decision", + "Entering try Decision", + "End of Decision", + ] inventory = JsonInventory(TEST_INVENTORY_PATH) - station_config = JsonStationConfig(TEST_STATION_CONFIGS_FOLDER_PATH, - inventory, TEST_DATA_FOLDER_PATH) - station_config.set_station_config('station_config_TEST') - supervisor = Supervisor(station_config=station_config, metadata_storage=MemoryMetadataStorage(), - model_forward=FakeModelForward(), binary_storage=MemoryBinaryStorage(), - telemetry_sink=FakeTelemetrySink()) + station_config = JsonStationConfig( + TEST_STATION_CONFIGS_FOLDER_PATH, inventory, TEST_DATA_FOLDER_PATH + ) + station_config.set_station_config("station_config_TEST") + supervisor = Supervisor( + station_config=station_config, + metadata_storage=MemoryMetadataStorage(), + model_forward=FakeModelForward(), + binary_storage=MemoryBinaryStorage(), + telemetry_sink=FakeTelemetrySink(), + ) # When with caplog.at_level(logging.INFO, logger="edge_orchestrator"): await supervisor.inspect(my_fake_item) # Then - actual_messages = [logger_msg for logger_name, logger_level, logger_msg in caplog.record_tuples if - logger_name == "edge_orchestrator"] + actual_messages = [ + logger_msg + for logger_name, logger_level, logger_msg in caplog.record_tuples + if logger_name == "edge_orchestrator" + ] assert expected_messages == actual_messages diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_filesystem_binary_storage.py b/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_filesystem_binary_storage.py index 3c35c9ca..bde78ec6 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_filesystem_binary_storage.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_filesystem_binary_storage.py @@ -2,60 +2,68 @@ from unittest.mock import patch from edge_orchestrator.domain.models.item import Item -from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import FileSystemBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.filesystem_binary_storage import ( + FileSystemBinaryStorage, +) class TestFileSystemBinaryStorage: - @patch('edge_orchestrator.domain.models.item.generate_id') - def test_save_item_binaries_should_write_image_on_filesystem(self, generate_id_mocked, - tmpdir): + @patch("edge_orchestrator.domain.models.item.generate_id") + def test_save_item_binaries_should_write_image_on_filesystem( + self, generate_id_mocked, tmpdir + ): # Given - generate_id_mocked.return_value = 'my_item_id' - src_directory_path = Path(tmpdir.mkdir('binaries')) + generate_id_mocked.return_value = "my_item_id" + src_directory_path = Path(tmpdir.mkdir("binaries")) binary_storage = FileSystemBinaryStorage(src_directory_path) expected_picture = bytes([0, 1, 2, 3, 4]) - item = Item(serial_number='serial_number', category='category', cameras_metadata={}, - binaries={'camera_id': expected_picture}) + item = Item( + serial_number="serial_number", + category="category", + cameras_metadata={}, + binaries={"camera_id": expected_picture}, + ) # When binary_storage.save_item_binaries(item) # Then - path_to_my_picture = (src_directory_path / 'my_item_id' / 'camera_id.jpg') + path_to_my_picture = src_directory_path / "my_item_id" / "camera_id.jpg" assert path_to_my_picture.is_file() - actual_picture = path_to_my_picture.open('rb').read() + actual_picture = path_to_my_picture.open("rb").read() assert actual_picture == expected_picture def test_get_item_binary_should_return_requested_item_binary(self, tmpdir): # Given - src_directory_path = Path(tmpdir.mkdir('binaries')) - (src_directory_path / 'my_item_id').mkdir() + src_directory_path = Path(tmpdir.mkdir("binaries")) + (src_directory_path / "my_item_id").mkdir() binary_storage = FileSystemBinaryStorage(src_directory_path) expected_picture = bytes([0, 1, 2, 3, 4]) - with (src_directory_path / 'my_item_id' / 'camera_id.jpg').open('wb') as f: + with (src_directory_path / "my_item_id" / "camera_id.jpg").open("wb") as f: f.write(expected_picture) # When - actual_binary = binary_storage.get_item_binary('my_item_id', 'camera_id') + actual_binary = binary_storage.get_item_binary("my_item_id", "camera_id") # Then assert actual_binary == expected_picture def test_get_item_binaries_should_return_all_item_binaries_names(self, tmpdir): # Given - src_directory_path = Path(tmpdir.mkdir('binaries')) - (src_directory_path / 'my_item_id').mkdir() + src_directory_path = Path(tmpdir.mkdir("binaries")) + (src_directory_path / "my_item_id").mkdir() binary_storage = FileSystemBinaryStorage(src_directory_path) expected_picture_1 = bytes([0, 1, 2, 3, 4]) expected_picture_2 = bytes([5, 6, 7, 8, 9]) - with (src_directory_path / 'my_item_id' / 'camera_id1.jpg').open('wb') as f_1, \ - (src_directory_path / 'my_item_id' / 'camera_id2.jpg').open('wb') as f_2: + with (src_directory_path / "my_item_id" / "camera_id1.jpg").open("wb") as f_1, ( + src_directory_path / "my_item_id" / "camera_id2.jpg" + ).open("wb") as f_2: f_1.write(expected_picture_1) f_2.write(expected_picture_2) # When - binaries_names = binary_storage.get_item_binaries('my_item_id') + binaries_names = binary_storage.get_item_binaries("my_item_id") # Then - assert set(binaries_names) == {'camera_id1.jpg', 'camera_id2.jpg'} + assert set(binaries_names) == {"camera_id1.jpg", "camera_id2.jpg"} diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_gcp_binary_storage.py b/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_gcp_binary_storage.py index 7f1557fc..ad81fd48 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_gcp_binary_storage.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_gcp_binary_storage.py @@ -1,17 +1,18 @@ -from unittest.mock import patch, Mock +from unittest.mock import Mock, patch from edge_orchestrator.domain.models.item import Item -from edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage import GCPBinaryStorage - +from edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage import ( + GCPBinaryStorage, +) from tests.conftest import TEST_DATA_FOLDER_PATH class TestGCPBinaryStorage: - @patch('edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage.storage') + @patch("edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage.storage") def test_save_item_binaries_should_write_image_in_gcp(self, mock_storage): # Given - test_camera_id = '1' - test_file_path = TEST_DATA_FOLDER_PATH / 'item_2' / 'camera_id1.jpg' + test_camera_id = "1" + test_file_path = TEST_DATA_FOLDER_PATH / "item_2" / "camera_id1.jpg" item = Item.from_nothing() with open(test_file_path, "rb") as f: item.binaries = {test_camera_id: f} @@ -25,15 +26,16 @@ def test_save_item_binaries_should_write_image_in_gcp(self, mock_storage): # Then mock_storage.Client.assert_called_once() - mock_bucket.blob.assert_called_once_with( - f"{item.id}/{test_camera_id}.jpg") - mock_bucket.blob.return_value.upload_from_string.assert_called_once_with(f, content_type="image/jpg") + mock_bucket.blob.assert_called_once_with(f"{item.id}/{test_camera_id}.jpg") + mock_bucket.blob.return_value.upload_from_string.assert_called_once_with( + f, content_type="image/jpg" + ) - @patch('edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage.storage') + @patch("edge_orchestrator.infrastructure.binary_storage.gcp_binary_storage.storage") def test_get_item_binary_should_return_image(self, mock_storage): # Given - test_camera_id = '1' - test_file_path = TEST_DATA_FOLDER_PATH / 'item_2' / 'camera_id1.jpg' + test_camera_id = "1" + test_file_path = TEST_DATA_FOLDER_PATH / "item_2" / "camera_id1.jpg" item = Item.from_nothing() with open(test_file_path, "rb") as f: item.binaries = {test_camera_id: f} @@ -48,5 +50,4 @@ def test_get_item_binary_should_return_image(self, mock_storage): # Then mock_storage.Client.assert_called_once() - mock_bucket.get_blob.assert_called_once_with( - f"{item.id}/{test_camera_id}.jpg") + mock_bucket.get_blob.assert_called_once_with(f"{item.id}/{test_camera_id}.jpg") diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_memory_binary_storage.py b/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_memory_binary_storage.py index 097b3555..4606d2db 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_memory_binary_storage.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/binary_storage/test_memory_binary_storage.py @@ -1,35 +1,47 @@ from unittest.mock import patch from edge_orchestrator.domain.models.item import Item -from edge_orchestrator.infrastructure.binary_storage.memory_binary_storage import MemoryBinaryStorage +from edge_orchestrator.infrastructure.binary_storage.memory_binary_storage import ( + MemoryBinaryStorage, +) class TestMemoryBinaryStorage: - @patch('edge_orchestrator.domain.models.item.generate_id') + @patch("edge_orchestrator.domain.models.item.generate_id") def test_save_item_binaries_should_write_image_in_memory(self, generate_id_mocked): # Given - generate_id_mocked.return_value = 'my_item_id' + generate_id_mocked.return_value = "my_item_id" binary_storage = MemoryBinaryStorage() expected_picture = bytes([0, 1, 2, 3, 4]) - item = Item(serial_number='serial_number', category='category', cameras_metadata={}, - binaries={'my_picture_name': expected_picture}) + item = Item( + serial_number="serial_number", + category="category", + cameras_metadata={}, + binaries={"my_picture_name": expected_picture}, + ) # When binary_storage.save_item_binaries(item) # Then - assert binary_storage.binaries == {'my_item_id': {'my_picture_name': expected_picture}} + assert binary_storage.binaries == { + "my_item_id": {"my_picture_name": expected_picture} + } def test_get_item_binary_should_return_requested_item_binary(self): # Given binary_storage = MemoryBinaryStorage() expected_picture = bytes([0, 1, 2, 3, 4]) another_picture = bytes([5, 6, 7, 8, 9]) - binary_storage.binaries = {'my_item_id': {'my_picture_name_1': expected_picture, - 'my_picture_name_2': another_picture}} + binary_storage.binaries = { + "my_item_id": { + "my_picture_name_1": expected_picture, + "my_picture_name_2": another_picture, + } + } # When - binary = binary_storage.get_item_binary('my_item_id', 'my_picture_name_1') + binary = binary_storage.get_item_binary("my_item_id", "my_picture_name_1") # Then assert binary == expected_picture @@ -39,11 +51,15 @@ def test_get_item_binaries_should_return_all_item_binaries_names(self): binary_storage = MemoryBinaryStorage() expected_picture_1 = bytes([0, 1, 2, 3, 4]) expected_picture_2 = bytes([5, 6, 7, 8, 9]) - binary_storage.binaries = {'my_item_id': {'my_picture_name_1': expected_picture_1, - 'my_picture_name_2': expected_picture_2}} + binary_storage.binaries = { + "my_item_id": { + "my_picture_name_1": expected_picture_1, + "my_picture_name_2": expected_picture_2, + } + } # When - binaries_names = binary_storage.get_item_binaries('my_item_id') + binaries_names = binary_storage.get_item_binaries("my_item_id") # Then - assert binaries_names == ['my_picture_name_1', 'my_picture_name_2'] + assert binaries_names == ["my_picture_name_1", "my_picture_name_2"] diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/camera/test_fake_camera.py b/edge_orchestrator/tests/unit_tests/infrastructure/camera/test_fake_camera.py index 287def3e..58d61592 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/camera/test_fake_camera.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/camera/test_fake_camera.py @@ -3,7 +3,9 @@ class TestFakeCamera: - def test_select_random_image_should_return_random_image_from_input_images_folder(self): + def test_select_random_image_should_return_random_image_from_input_images_folder( + self, + ): # Given camera = FakeCamera("id", {"type": "top_camera", "source": "fake_item"}) camera.data_folder_path = TEST_DATA_FOLDER_PATH @@ -14,4 +16,4 @@ def test_select_random_image_should_return_random_image_from_input_images_folder print(image_path) # Then - assert image_path in list((TEST_DATA_FOLDER_PATH / 'fake_item').glob('*.jpg')) + assert image_path in list((TEST_DATA_FOLDER_PATH / "fake_item").glob("*.jpg")) diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_filesystem_metadata_storage.py b/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_filesystem_metadata_storage.py index a0fab49b..8e0eb916 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_filesystem_metadata_storage.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_filesystem_metadata_storage.py @@ -4,84 +4,100 @@ from edge_orchestrator.domain.models.item import Item from edge_orchestrator.domain.use_cases.supervisor import SupervisorState -from edge_orchestrator.infrastructure.metadata_storage.filesystem_metadata_storage import FileSystemMetadataStorage +from edge_orchestrator.infrastructure.metadata_storage.filesystem_metadata_storage import ( + FileSystemMetadataStorage, +) class TestFileSystemMetadataStorage: - - @patch('edge_orchestrator.domain.models.item.generate_id') - def test_save_item_metadata_should_write_metadata_on_filesystem(self, generate_id_mocked, tmpdir, - my_cameras_metadata_0): + @patch("edge_orchestrator.domain.models.item.generate_id") + def test_save_item_metadata_should_write_metadata_on_filesystem( + self, generate_id_mocked, tmpdir, my_cameras_metadata_0 + ): # Given - generate_id_mocked.return_value = 'my_item_id' - item = Item(serial_number='serial_number', category='category', cameras_metadata=my_cameras_metadata_0, - binaries={}) - src_directory_path = Path(tmpdir.mkdir('metadata')) + generate_id_mocked.return_value = "my_item_id" + item = Item( + serial_number="serial_number", + category="category", + cameras_metadata=my_cameras_metadata_0, + binaries={}, + ) + src_directory_path = Path(tmpdir.mkdir("metadata")) metadata_storage = FileSystemMetadataStorage(src_directory_path) expected_response = { - 'id': item.id, - 'serial_number': item.serial_number, - 'category': item.category, - 'station_config': item.station_config, - 'cameras': item.cameras_metadata, - 'received_time': item.received_time.strftime('%Y-%m-%d %H:%M:%S'), - 'inferences': item.inferences, - 'decision': item.decision, - 'state': item.state, - 'error': item.error_message + "id": item.id, + "serial_number": item.serial_number, + "category": item.category, + "station_config": item.station_config, + "cameras": item.cameras_metadata, + "received_time": item.received_time.strftime("%Y-%m-%d %H:%M:%S"), + "inferences": item.inferences, + "decision": item.decision, + "state": item.state, + "error": item.error_message, } # When metadata_storage.save_item_metadata(item) # Then - path_to_my_metadata = (src_directory_path / 'my_item_id' / 'metadata.json') + path_to_my_metadata = src_directory_path / "my_item_id" / "metadata.json" assert path_to_my_metadata.is_file() - actual_metadata = json.load(path_to_my_metadata.open('r')) + actual_metadata = json.load(path_to_my_metadata.open("r")) assert actual_metadata == expected_response - def test_get_item_metadata_should_return_requested_item_metadata(self, tmpdir, my_cameras_metadata_0): + def test_get_item_metadata_should_return_requested_item_metadata( + self, tmpdir, my_cameras_metadata_0 + ): # Given - src_directory_path = Path(tmpdir.mkdir('metadata')) + src_directory_path = Path(tmpdir.mkdir("metadata")) expected_metadata = my_cameras_metadata_0 - (src_directory_path / 'my_item_id').mkdir() - with (src_directory_path / 'my_item_id' / 'metadata.json').open('w') as f: + (src_directory_path / "my_item_id").mkdir() + with (src_directory_path / "my_item_id" / "metadata.json").open("w") as f: json.dump(expected_metadata, f) metadata_storage = FileSystemMetadataStorage(src_directory_path) # When - actual_metadata = metadata_storage.get_item_metadata('my_item_id') + actual_metadata = metadata_storage.get_item_metadata("my_item_id") # Then assert actual_metadata == expected_metadata - @patch('edge_orchestrator.domain.models.item.generate_id') - def test_get_item_state_should_return_expected_state(self, generate_id_mocked, tmpdir, my_cameras_metadata_0): + @patch("edge_orchestrator.domain.models.item.generate_id") + def test_get_item_state_should_return_expected_state( + self, generate_id_mocked, tmpdir, my_cameras_metadata_0 + ): # Given - generate_id_mocked.return_value = 'my_item_id' - src_directory_path = Path(tmpdir.mkdir('metadata')) - (src_directory_path / 'my_item_id').mkdir() - item = Item(serial_number='serial_number', category='category', cameras_metadata=my_cameras_metadata_0, - binaries={}) + generate_id_mocked.return_value = "my_item_id" + src_directory_path = Path(tmpdir.mkdir("metadata")) + (src_directory_path / "my_item_id").mkdir() + item = Item( + serial_number="serial_number", + category="category", + cameras_metadata=my_cameras_metadata_0, + binaries={}, + ) item.state = SupervisorState.DONE.value expected_state = item.state - with (src_directory_path / 'my_item_id' / 'metadata.json').open('w') as f: + with (src_directory_path / "my_item_id" / "metadata.json").open("w") as f: json.dump(item.get_metadata(), f) metadata_storage = FileSystemMetadataStorage(src_directory_path) # When - actual_state = metadata_storage.get_item_state('my_item_id') + actual_state = metadata_storage.get_item_state("my_item_id") # Then assert actual_state == expected_state - def test_get_all_items_metadata_should_return_expected_metadata_list(self, tmpdir, my_cameras_metadata_0, - my_cameras_metadata_1): - src_directory_path = Path(tmpdir.mkdir('metadata')) - (src_directory_path / 'my_item_id_1').mkdir() - (src_directory_path / 'my_item_id_2').mkdir() - with (src_directory_path / 'my_item_id_1' / 'metadata.json').open('w') as f1, \ - (src_directory_path / 'my_item_id_2' / 'metadata.json').open('w') as f2: + def test_get_all_items_metadata_should_return_expected_metadata_list( + self, tmpdir, my_cameras_metadata_0, my_cameras_metadata_1 + ): + src_directory_path = Path(tmpdir.mkdir("metadata")) + (src_directory_path / "my_item_id_1").mkdir() + (src_directory_path / "my_item_id_2").mkdir() + with (src_directory_path / "my_item_id_1" / "metadata.json").open("w") as f1, ( + src_directory_path / "my_item_id_2" / "metadata.json" + ).open("w") as f2: json.dump(my_cameras_metadata_0, f1) json.dump(my_cameras_metadata_1, f2) metadata_storage = FileSystemMetadataStorage(src_directory_path) @@ -90,5 +106,7 @@ def test_get_all_items_metadata_should_return_expected_metadata_list(self, tmpdi actual_items_metadata = metadata_storage.get_all_items_metadata() # Then - assert (actual_items_metadata == [my_cameras_metadata_0, my_cameras_metadata_1] or - actual_items_metadata == [my_cameras_metadata_1, my_cameras_metadata_0]) + assert actual_items_metadata == [ + my_cameras_metadata_0, + my_cameras_metadata_1, + ] or actual_items_metadata == [my_cameras_metadata_1, my_cameras_metadata_0] diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_memory_item_storage.py b/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_memory_item_storage.py index ff63e875..39b5cd48 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_memory_item_storage.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/metadata_storage/test_memory_item_storage.py @@ -1,4 +1,6 @@ -from edge_orchestrator.infrastructure.metadata_storage.memory_metadata_storage import MemoryMetadataStorage +from edge_orchestrator.infrastructure.metadata_storage.memory_metadata_storage import ( + MemoryMetadataStorage, +) class TestMemoryItemStorage: @@ -12,19 +14,27 @@ def test_save_item_metadata_should_write_item_in_memory(self, my_item_0): # Then assert metadata_storage.items_metadata == { my_item_0.id: { - 'id': my_item_0.id, - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_1': {"brightness": 100, "exposition": 100, "position": "right"}, - 'camera_2': {"brightness": 100, "exposition": 100, "position": "left"} + "id": my_item_0.id, + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_1": { + "brightness": 100, + "exposition": 100, + "position": "right", + }, + "camera_2": { + "brightness": 100, + "exposition": 100, + "position": "left", + }, }, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, } } @@ -38,19 +48,19 @@ def test_get_item_metadata_should_return_requested_item_metadata(self, my_item_0 # Then assert actual_item == { - 'id': my_item_0.id, - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_1': {"brightness": 100, "exposition": 100, "position": "right"}, - 'camera_2': {"brightness": 100, "exposition": 100, "position": "left"} + "id": my_item_0.id, + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_1": {"brightness": 100, "exposition": 100, "position": "right"}, + "camera_2": {"brightness": 100, "exposition": 100, "position": "left"}, }, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, } def test_get_all_items_metadata_should_return_all_items(self, my_item_0, my_item_2): @@ -65,32 +75,44 @@ def test_get_all_items_metadata_should_return_all_items(self, my_item_0, my_item # Then assert list(actual_items) == [ { - 'id': my_item_0.id, - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_1': {"brightness": 100, "exposition": 100, "position": "right"}, - 'camera_2': {"brightness": 100, "exposition": 100, "position": "left"} + "id": my_item_0.id, + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_1": { + "brightness": 100, + "exposition": 100, + "position": "right", + }, + "camera_2": { + "brightness": 100, + "exposition": 100, + "position": "left", + }, }, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, }, { - 'id': my_item_2.id, - 'serial_number': '123', - 'category': 'tacos', - 'station_config': None, - 'cameras': { - 'camera_3': {"brightness": 100, "exposition": 100, "position": "top"} + "id": my_item_2.id, + "serial_number": "123", + "category": "tacos", + "station_config": None, + "cameras": { + "camera_3": { + "brightness": 100, + "exposition": 100, + "position": "top", + } }, - 'received_time': '2021-05-19 15:00:00', - 'inferences': {}, - 'decision': {}, - 'state': None, - 'error': None - } + "received_time": "2021-05-19 15:00:00", + "inferences": {}, + "decision": {}, + "state": None, + "error": None, + }, ] diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_fake_model_forwarder.py b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_fake_model_forwarder.py index 20cd14a6..52310fc7 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_fake_model_forwarder.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_fake_model_forwarder.py @@ -3,9 +3,11 @@ import numpy as np import pytest -from edge_orchestrator.domain.ports.model_forward import Labels -from edge_orchestrator.infrastructure.model_forward.fake_model_forward import FakeModelForward from edge_orchestrator.domain.models.model_infos import ModelInfos +from edge_orchestrator.domain.ports.model_forward import Labels +from edge_orchestrator.infrastructure.model_forward.fake_model_forward import ( + FakeModelForward, +) @pytest.mark.asyncio @@ -15,20 +17,25 @@ async def test_perform_inference_should_return_classification_results(self): np.random.seed(42) random.seed(42) fake_model_forwarder = FakeModelForward() - model_inference_version = ModelInfos(id='model1', depends_on=[], name='inception', - category='classification', - version='1.2', camera_id='camera_id1', image_resolution=[640, 640]) + model_inference_version = ModelInfos( + id="model1", + depends_on=[], + name="inception", + category="classification", + version="1.2", + camera_id="camera_id1", + image_resolution=[640, 640], + ) binary_data = bytes([]) expected = { - 'full_image': { - 'label': Labels.OK.value, - 'probability': 0.3745401188473625 - } + "full_image": {"label": Labels.OK.value, "probability": 0.3745401188473625} } # When - actual = await fake_model_forwarder.perform_inference(model_inference_version, binary_data, 'full_image') + actual = await fake_model_forwarder.perform_inference( + model_inference_version, binary_data, "full_image" + ) # Then assert actual == expected @@ -38,55 +45,72 @@ async def test_perform_inference_should_return_object_detection_results(self): np.random.seed(42) random.seed(42) fake_model_forwarder = FakeModelForward() - model_inference_version = ModelInfos(id='model1', depends_on=[], name='mobilenet_v1_640x640', - category='object_detection', - version='1', camera_id='camera_id1', image_resolution=[640, 640]) + model_inference_version = ModelInfos( + id="model1", + depends_on=[], + name="mobilenet_v1_640x640", + category="object_detection", + version="1", + camera_id="camera_id1", + image_resolution=[640, 640], + ) binary_data = bytes([]) expected = { - 'full_image_object_1': { - 'location': [4, 112, 244, 156], - 'objectness': 0.3745401188473625 + "full_image_object_1": { + "location": [4, 112, 244, 156], + "objectness": 0.3745401188473625, + }, + "full_image_object_2": { + "location": [4, 112, 244, 156], + "objectness": 0.9507143064099162, }, - 'full_image_object_2': { - 'location': [4, 112, 244, 156], - 'objectness': 0.9507143064099162 - } } # When - actual = await fake_model_forwarder.perform_inference(model_inference_version, binary_data, 'full_image') + actual = await fake_model_forwarder.perform_inference( + model_inference_version, binary_data, "full_image" + ) # Then assert actual == expected - async def test_perform_inference_should_return_object_detection_with_classification_results(self): + async def test_perform_inference_should_return_object_detection_with_classification_results( + self, + ): # Given np.random.seed(42) random.seed(42) fake_model_forwarder = FakeModelForward() - model_inference_version = ModelInfos(id='model1', depends_on=[], name='mobilenet_v1_640x640_detect_classif', - category='object_detection_with_classification', - version='1.3', camera_id='camera_id1') + model_inference_version = ModelInfos( + id="model1", + depends_on=[], + name="mobilenet_v1_640x640_detect_classif", + category="object_detection_with_classification", + version="1.3", + camera_id="camera_id1", + ) binary_data = bytes([]) expected = { - 'full_image_object_1': { - 'label': 'OK', - 'objectness': 0.3745401188473625, - 'location': [4, 112, 244, 156], - 'probability': 0.9507143064099162 + "full_image_object_1": { + "label": "OK", + "objectness": 0.3745401188473625, + "location": [4, 112, 244, 156], + "probability": 0.9507143064099162, + }, + "full_image_object_2": { + "label": "OK", + "objectness": 0.7319939418114051, + "location": [4, 112, 244, 156], + "probability": 0.5986584841970366, }, - 'full_image_object_2': { - 'label': 'OK', - 'objectness': 0.7319939418114051, - 'location': [4, 112, 244, 156], - 'probability': 0.5986584841970366 - } } # When - actual = await fake_model_forwarder.perform_inference(model_inference_version, binary_data, 'full_image') + actual = await fake_model_forwarder.perform_inference( + model_inference_version, binary_data, "full_image" + ) # Then assert actual == expected diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_classification_wrapper.py b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_classification_wrapper.py index b6b11982..26303fb8 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_classification_wrapper.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_classification_wrapper.py @@ -1,8 +1,7 @@ +from edge_orchestrator.domain.models.model_infos import ModelInfos from edge_orchestrator.infrastructure.model_forward.tf_serving_classification_wrapper import ( TFServingClassificationWrapper, ) -from edge_orchestrator.domain.models.model_infos import ModelInfos - from tests.conftest import TEST_DATA_FOLDER_PATH diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_and_classification_wrapper.py b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_and_classification_wrapper.py index 7e676d91..d5828055 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_and_classification_wrapper.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_and_classification_wrapper.py @@ -1,17 +1,22 @@ -from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_and_classification_wrapper import \ - TFServingDetectionClassificationWrapper from edge_orchestrator.domain.models.model_infos import ModelInfos +from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_and_classification_wrapper import ( + TFServingDetectionClassificationWrapper, +) from tests.conftest import TEST_DATA_FOLDER_PATH class TestDetectionClassificationHelper: - def test_perform_pre_processing_should_return_an_image_as_an_array_with_the_expected_format(self): + def test_perform_pre_processing_should_return_an_image_as_an_array_with_the_expected_format( + self, + ): # Given model_forwarder = TFServingDetectionClassificationWrapper( - base_url='', - class_names_path=TEST_DATA_FOLDER_PATH / 'test_detection_labels' + base_url="", + class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels", ) - binary = open(TEST_DATA_FOLDER_PATH / 'mask_images' / 'person_with_mask.jpg', 'br').read() + binary = open( + TEST_DATA_FOLDER_PATH / "mask_images" / "person_with_mask.jpg", "br" + ).read() expected_shape = (1, 720, 1080, 3) # When @@ -22,44 +27,46 @@ def test_perform_pre_processing_should_return_an_image_as_an_array_with_the_expe assert 0 <= actual.min() <= 255 assert 0 <= actual.max() <= 255 - def test_perform_post_processing_should_transform_the_standard_output_from_the_model_into_the_expected_format(self): + def test_perform_post_processing_should_transform_the_standard_output_from_the_model_into_the_expected_format( + self, + ): # Given model_forwarder = TFServingDetectionClassificationWrapper( - base_url='', - class_names_path=TEST_DATA_FOLDER_PATH / 'test_detection_foo_bar_baz_labels', - image_shape=[1, 1] + base_url="", + class_names_path=TEST_DATA_FOLDER_PATH + / "test_detection_foo_bar_baz_labels", + image_shape=[1, 1], ) json_outputs = { - 'detection_boxes': [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]], - 'detection_scores': [[0.8, 0.7, 0.6]], - 'detection_classes': [[1., 1., 2.]] + "detection_boxes": [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]], + "detection_scores": [[0.8, 0.7, 0.6]], + "detection_classes": [[1.0, 1.0, 2.0]], } - model = ModelInfos('id1', 'name1', 'detection', '1', 'camera_id1', [224, 224], class_names=['OK', 'KO'], - boxes_coordinates='detection_boxes', objectness_scores='detection_scores', - number_of_boxes='num_detections', detection_classes='detection_classes', - class_to_detect='foo', objectness_threshold=0.5) + model = ModelInfos( + "id1", + "name1", + "detection", + "1", + "camera_id1", + [224, 224], + class_names=["OK", "KO"], + boxes_coordinates="detection_boxes", + objectness_scores="detection_scores", + number_of_boxes="num_detections", + detection_classes="detection_classes", + class_to_detect="foo", + objectness_threshold=0.5, + ) # crop_image expects the box coordinates to be (xmin, ymin, xmax, ymax) # Mobilenet returns the coordinates as (ymin, xmin, ymax, xmax) # Hence, the switch here expected = { - 'object_1': { - 'location': [2, 1, 4, 3], - 'score': 0.8, - 'label': 'bar' - }, - 'object_2': { - 'location': [6, 5, 8, 7], - 'score': 0.7, - 'label': 'bar' - }, - 'object_3': { - 'location': [10, 9, 12, 11], - 'score': 0.6, - 'label': 'baz' - } + "object_1": {"location": [2, 1, 4, 3], "score": 0.8, "label": "bar"}, + "object_2": {"location": [6, 5, 8, 7], "score": 0.7, "label": "bar"}, + "object_3": {"location": [10, 9, 12, 11], "score": 0.6, "label": "baz"}, } # When diff --git a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py index 23d75dad..d44f115b 100644 --- a/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py +++ b/edge_orchestrator/tests/unit_tests/infrastructure/model_forward/test_tf_serving_detection_wrapper.py @@ -1,16 +1,22 @@ -from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_wrapper import TFServingDetectionWrapper from edge_orchestrator.domain.models.model_infos import ModelInfos +from edge_orchestrator.infrastructure.model_forward.tf_serving_detection_wrapper import ( + TFServingDetectionWrapper, +) from tests.conftest import TEST_DATA_FOLDER_PATH class TestDetectionWrapperHelper: - def test_perform_pre_processing_should_return_an_image_as_an_array_with_the_expected_format(self): + def test_perform_pre_processing_should_return_an_image_as_an_array_with_the_expected_format( + self, + ): # Given model_forwarder = TFServingDetectionWrapper( - base_url='', - class_names_path=TEST_DATA_FOLDER_PATH / 'test_detection_labels' + base_url="", + class_names_path=TEST_DATA_FOLDER_PATH / "test_detection_labels", ) - binary = open(TEST_DATA_FOLDER_PATH / 'mask_images' / 'person_with_mask.jpg', 'br').read() + binary = open( + TEST_DATA_FOLDER_PATH / "mask_images" / "person_with_mask.jpg", "br" + ).read() expected_shape = (1, 720, 1080, 3) model = ModelInfos( @@ -30,36 +36,41 @@ def test_perform_pre_processing_should_return_an_image_as_an_array_with_the_expe assert 0 <= actual.min() <= 255 assert 0 <= actual.max() <= 255 - def test_perform_post_processing_should_transform_the_standard_output_from_the_model_into_the_expected_format(self): + def test_perform_post_processing_should_transform_the_standard_output_from_the_model_into_the_expected_format( + self, + ): # Given model_forwarder = TFServingDetectionWrapper( - base_url='', - class_names_path=TEST_DATA_FOLDER_PATH / 'test_detection_foo_bar_baz_labels', - image_shape=[1, 1] + base_url="", + class_names_path=TEST_DATA_FOLDER_PATH + / "test_detection_foo_bar_baz_labels", + image_shape=[1, 1], ) json_outputs = { - 'detection_boxes': [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]], - 'detection_scores': [[0.8, 0.7, 0.6]], - 'detection_classes': [[1., 1., 2.]] + "detection_boxes": [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]], + "detection_scores": [[0.8, 0.7, 0.6]], + "detection_classes": [[1.0, 1.0, 2.0]], } - model = ModelInfos('id1', 'name1', 'detection', '1', 'camera_id1', image_resolution=[640, 640], - class_names=['OK', 'KO'], boxes_coordinates='detection_boxes', - objectness_scores='detection_scores', number_of_boxes='num_detections', - detection_classes='detection_classes', class_to_detect=['foo'], - objectness_threshold=0.5) + model = ModelInfos( + "id1", + "name1", + "detection", + "1", + "camera_id1", + image_resolution=[640, 640], + class_names=["OK", "KO"], + boxes_coordinates="detection_boxes", + objectness_scores="detection_scores", + number_of_boxes="num_detections", + detection_classes="detection_classes", + class_to_detect=["foo"], + objectness_threshold=0.5, + ) expected = { - 'object_1': { - 'label': 'foo', - 'location': [2, 1, 4, 3], - 'score': 0.8 - }, - 'object_2': { - 'label': 'foo', - 'location': [6, 5, 8, 7], - 'score': 0.7 - } + "object_1": {"label": "foo", "location": [2, 1, 4, 3], "score": 0.8}, + "object_2": {"label": "foo", "location": [6, 5, 8, 7], "score": 0.7}, } # When