From 52988fadaa3cab6b101c1990387d2e3241b7e5f9 Mon Sep 17 00:00:00 2001 From: Glydric Date: Tue, 23 Jan 2024 12:39:24 +0100 Subject: [PATCH 1/4] fixes --- Backend/src/database/database.service.spec.ts | 2 +- Backend/src/database/database.service.ts | 21 ++++++++++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/Backend/src/database/database.service.spec.ts b/Backend/src/database/database.service.spec.ts index b0b9e76..f093f8b 100644 --- a/Backend/src/database/database.service.spec.ts +++ b/Backend/src/database/database.service.spec.ts @@ -21,7 +21,7 @@ describe('DatabaseService', () => { }); it('should get aggregated data', async () => { - const aggregateData = await databaseService.aggregateCamera(); + const aggregateData = await databaseService.aggregateCamera('all'); expect(aggregateData).not.toBeNull(); }); diff --git a/Backend/src/database/database.service.ts b/Backend/src/database/database.service.ts index 559fe65..1fa726e 100644 --- a/Backend/src/database/database.service.ts +++ b/Backend/src/database/database.service.ts @@ -101,9 +101,24 @@ export class DatabaseService { else return res.toArray(); } - aggregateCamera(filter?: FiltersAvailable): Promise { + aggregateCamera(filter: FiltersAvailable): Promise { + console.log(this.getFilter(filter)); return this.DB.collection('cameras') - .aggregate() + .aggregate([ + { + $addFields: { + intrusionDetection: { + $cond: { + if: { + $ifNull: ['$intrusionDetection', false], + }, + then: true, + else: false, + }, + }, + }, + }, + ]) .match(this.getFilter(filter)) .group({ _id: '$cameraId', @@ -149,7 +164,7 @@ export class DatabaseService { }); } - private getFilter(filter?: FiltersAvailable) { + private getFilter(filter: FiltersAvailable) { switch (filter) { case 'intrusionDetection': return { From 9cfa9436da13266128c56d5617a1e273abd07fbc Mon Sep 17 00:00:00 2001 From: Glydric Date: Thu, 25 Jan 2024 15:24:59 +0100 Subject: [PATCH 2/4] improvements --- Backend/.env-openvidu | 2 - .../cameraStream/cameraStream.gateway.spec.ts | 1 - Backend/src/database/database.service.ts | 68 ++++++------------- README.md | 4 +- 4 files changed, 25 insertions(+), 50 deletions(-) diff --git a/Backend/.env-openvidu b/Backend/.env-openvidu index 52fb6e3..0ffb3e2 100644 --- a/Backend/.env-openvidu +++ b/Backend/.env-openvidu @@ -1,5 +1,3 @@ OPENVIDU_SECRET=MY_SECRET OPENVIDU_WEBHOOK=true OPENVIDU_WEBHOOK_ENDPOINT=https://localhost:8080/media-server - - diff --git a/Backend/src/cameraStream/cameraStream.gateway.spec.ts b/Backend/src/cameraStream/cameraStream.gateway.spec.ts index 1158e50..cc32db8 100644 --- a/Backend/src/cameraStream/cameraStream.gateway.spec.ts +++ b/Backend/src/cameraStream/cameraStream.gateway.spec.ts @@ -2,7 +2,6 @@ import { Test, TestingModule } from '@nestjs/testing'; import { CameraStreamGateway } from './cameraStream.gateway'; import { JwtModule } from '@nestjs/jwt'; -import { OpenVidu } from 'openvidu-node-client'; import { CSSOpenVidu } from './open-vidu.service'; import { DatabaseService } from '../database/database.service'; diff --git a/Backend/src/database/database.service.ts b/Backend/src/database/database.service.ts index 1fa726e..833a6c8 100644 --- a/Backend/src/database/database.service.ts +++ b/Backend/src/database/database.service.ts @@ -59,11 +59,29 @@ export class DatabaseService { ); } + async initDBNvr() { + const data = { + name: 'NVR', + }; + const filter = { + name: 'NVR', + ip: process.env.NVR_IP_ADDRESS, + channels: [0, 1, 2, 3, 4, 5, 6, 7], + }; + + const size = await this.DB.collection('General').countDocuments(filter); + + if (size == 0) { + await this.DB.collection('General').insertOne(data); + } + } + constructor() { const client = new MongoClient(url); this.DB = client.db('csd'); this.initDBUser(); + this.initDBNvr(); } async addData(data: DataType) { @@ -102,7 +120,6 @@ export class DatabaseService { } aggregateCamera(filter: FiltersAvailable): Promise { - console.log(this.getFilter(filter)); return this.DB.collection('cameras') .aggregate([ { @@ -221,54 +238,13 @@ export class DatabaseService { // TODO TESTME // Returns NVR info such as IP address and available channels async getNVRData(): Promise { - const array = await this.getOtherwiseInsert( - 'General', - { - name: 'NVR', - }, - { - name: 'NVR', - ip: process.env.NVR_IP_ADDRESS, - channels: [0, 1, 2, 3, 4, 5, 6, 7], - }, - ); + const array = await this.getRawDataArray('General', { + name: 'NVR', + }); + return { ip: array[0].ip, channels: array[0].channels, }; - - // try { - // const array = await this.getRawDataArray('General', { - // name: 'NVR', - // }); - // return array[0]; - // } catch (e) { - // if (e instanceof NotFoundException) { - // const data = { - // name: 'NVR', - // ip: process.env.NVR_IP_ADDRESS, - // channels: [0, 1, 2, 3, 4, 5, 6, 7], - // }; - // - // await this.DB.collection(`General`).insertOne(data); - // return data; - // } else { - // console.error(e); - // } - // } - } - - async getOtherwiseInsert( - name: string, - filter: Filter, - data: Document, - ): Promise[]> { - const size = await this.DB.collection(name).countDocuments(filter); - - if (size == 0) { - await this.DB.collection(name).insertOne(data); - } - - return await this.getRawDataArray(name, filter); } } diff --git a/README.md b/README.md index 1405059..5ad1e2a 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,9 @@ BCRYPT_SALT=... # Put the openvidu URL of the component that you have launched earlier OPENVIDU_URL= http://localhost:4443/ # Put the OPENVIDU_SECRET of the component that you have launched earlier -OPENVIDU_SECRET=MY_SECRET +OPENVIDU_SECRET=MY_SECRET +# Put the IP address of the NVR +NVR_IP_ADDRESS= ``` Then you can run ```bash From 4b51b0bdf7e71921e1cb4dc982f8cdf3c3d2135c Mon Sep 17 00:00:00 2001 From: Glydric Date: Thu, 25 Jan 2024 19:41:19 +0100 Subject: [PATCH 3/4] fixed camera detection parallel --- .gitignore | 3 +- .../machineLearning.controller.ts | 2 +- ImageRecognition/main.py | 19 ++ ImageRecognition/requirements.txt | 5 +- ImageRecognition/src/ObjectDetectionYOLO.py | 220 ++++++------------ 5 files changed, 102 insertions(+), 147 deletions(-) create mode 100644 ImageRecognition/main.py diff --git a/.gitignore b/.gitignore index 6f25554..3b19a69 100644 --- a/.gitignore +++ b/.gitignore @@ -112,8 +112,9 @@ dist # TernJS port file .tern-port -# Stores VSCode versions used for testing VSCode extensions +# VSCode files .vscode-test +.vscode # yarn v2 .yarn/cache diff --git a/Backend/src/app/machineLearning/machineLearning.controller.ts b/Backend/src/app/machineLearning/machineLearning.controller.ts index c20b669..4b84707 100644 --- a/Backend/src/app/machineLearning/machineLearning.controller.ts +++ b/Backend/src/app/machineLearning/machineLearning.controller.ts @@ -114,7 +114,7 @@ export class MachineLearningController { new ParseFilePipeBuilder() .addFileTypeValidator({ fileType: 'image/jpeg' }) .addMaxSizeValidator({ - maxSize: 100000, // 100Kb + maxSize: 50000000, // 100Kb }) .build({ errorHttpStatusCode: HttpStatus.UNPROCESSABLE_ENTITY, diff --git a/ImageRecognition/main.py b/ImageRecognition/main.py new file mode 100644 index 0000000..9488906 --- /dev/null +++ b/ImageRecognition/main.py @@ -0,0 +1,19 @@ +import concurrent.futures +from src.ObjectDetectionYOLO import detection + +if __name__ == "__main__": + with concurrent.futures.ThreadPoolExecutor() as executor: + args: (int, int) = [(i, i) for i in range(0, 8)] + + results = [executor.submit(detection, *arg) for arg in args] + concurrent.futures.wait(results) + + +cam1 = "rtsp://192.168.1.41:80/ch0_0.264" +cam2 = "rtsp://192.168.1.41:80/ch1_0.264" +cam3 = "rtsp://192.168.1.41:80/ch2_0.264" +cam4 = "rtsp://192.168.1.41:80/ch3_0.264" +cam5 = "rtsp://192.168.1.41:80/ch4_0.264" +cam6 = "rtsp://192.168.1.41:80/ch5_0.264" +cam7 = "rtsp://192.168.1.41:80/ch6_0.264" +cam8 = "rtsp://192.168.1.41:80/ch7_0.264" diff --git a/ImageRecognition/requirements.txt b/ImageRecognition/requirements.txt index 0113573..e2c0c14 100644 --- a/ImageRecognition/requirements.txt +++ b/ImageRecognition/requirements.txt @@ -1,5 +1,6 @@ opencv-python~=4.8.1.78 numpy~=1.26.1 ping3~=4.0.4 -python>=3.8 -PyTorch>=1.8 \ No newline at end of file +ultralytics>=8.1.5 +scikit-learn~=1.3.2 +torch>=1.8 diff --git a/ImageRecognition/src/ObjectDetectionYOLO.py b/ImageRecognition/src/ObjectDetectionYOLO.py index 42b3095..59e5f46 100644 --- a/ImageRecognition/src/ObjectDetectionYOLO.py +++ b/ImageRecognition/src/ObjectDetectionYOLO.py @@ -1,167 +1,101 @@ +import io from ultralytics import YOLO import cv2 -import math +import math import threading import requests from sklearn.metrics import mean_squared_error +import numpy as np -#model +# model model = YOLO("yolo-Wheights/yolov8n.pt") -#class of YOLO -classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", - "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", - "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", - "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", - "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", - "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", - "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", - "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", - "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", - "teddy bear", "hair drier", "toothbrush" - ] - -cam1 = 'rtsp://192.168.1.41:80/ch0_0.264' -cam2 = 'rtsp://192.168.1.41:80/ch1_0.264' -cam3 = 'rtsp://192.168.1.41:80/ch2_0.264' -cam4 = 'rtsp://192.168.1.41:80/ch3_0.264' -cam5 = 'rtsp://192.168.1.41:80/ch4_0.264' -cam6 = 'rtsp://192.168.1.41:80/ch5_0.264' -cam7 = 'rtsp://192.168.1.41:80/ch6_0.264' -cam8 = 'rtsp://192.168.1.41:80/ch7_0.264' - last_sent_image = None +url = "https://localhost:8080" + def post_request(image, camera_id, status): global last_sent_image - url = 'http://localhost:8080' - - data = { - 'id': camera_id, - 'status': status, - } - - if last_sent_image is None: - last_sent_image = image - try: - response = requests.post(url, data=data, files={'file': (image, 'image/jpeg')}) - print("Status code is: ", response.status_code) - except requests.exceptions.RequestException as e: - print("There was an exception that occurred while handling your request.", e) - else: + if last_sent_image is not None: # Resizing flat_last_sent_image = last_sent_image.flatten() flat_image = image.flatten() + min_len = min(len(flat_last_sent_image), len(flat_image)) - r_last_sent_image = cv2.resize(flat_last_sent_image.reshape(1, -1), (min_len, 1)) + r_last_sent_image = cv2.resize( + flat_last_sent_image.reshape(1, -1), (min_len, 1) + ) r_image = cv2.resize(flat_image.reshape(1, -1), (min_len, 1)) - #mean squared error + # mean squared error mse = mean_squared_error(r_last_sent_image, r_image) print("MEAN SQUARED ERROR is: ", mse) - if mse > 105.85: - try: - response = requests.post(url, data=data, files={'file': (image, 'image/jpeg')}) - print("Status code is: ", response.status_code) - - if response.status_code == 200: - last_sent_image = image - except requests.exceptions.RequestException as e: - print("There was an exception that occurred while handling your request.", e) - - - - -#filename = url of cam -#file_index = index of the file that can be assigned to each thread. cam1 has file_index as 1, cam2 has file_index as 2... -def detection(filename, file_index): - cap = cv2.VideoCapture(filename) - global last_sent_image - status = 'offline' + if mse < 105.70: + return + + # image = cv2.resize(image, (0.5, 0.5)) + image_stream = np.array(image).tobytes() + + try: + response = requests.post( + f"{url}/{camera_id}", + files={"file": ("image.jpg", image_stream, "image/jpeg")}, + verify=False, # "Backend/src/ssl_certificate/server.pem", + ) + + if response.status_code == 201: + last_sent_image = image + elif response.status_code == 422: + print(response.content) + print(response.request.headers) + # else: + # print("Status code is: ", response.status_code) + except requests.exceptions.RequestException as e: + print( + "There was an exception that occurred while handling your request.", + e, + ) + + +# filename = url of cam +# file_index = index of the file that can be assigned to each thread. cam1 has file_index as 1, cam2 has file_index as 2... +def detection(camera_id: int, _: int): + print(f"capturing {camera_id}") + print(f"rtsp://192.168.1.41:80/ch{camera_id}_0.264") + + # FIXME camera capture takes to much time to load the camera connection (first time) + cap = cv2.VideoCapture(f"rtsp://192.168.1.41:80/ch{camera_id}_0.264") + status = "offline" + + print(f"started {camera_id}") while cap.isOpened(): - - success, img = cap.read() - if not success: - break - - status = 'online' - results = model(img, stream = True, classes = 0, conf=0.5) - foundPerson = False - #coordinates - for r in results: - boxes = r.boxes - for box in boxes: - #bounding box - x1, y1, x2, y2 = box.xyxy[0] - x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) - cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 3) - # confidence - confidence = math.ceil((box.conf[0]*100))/100 - print("Confidence --->",confidence) - # class name - cls = int(box.cls[0]) - print("Class name -->", classNames[cls]) - - foundPerson = True - - if(foundPerson): - _, img_encoded = cv2.imencode('.jpg', img) - post_request(img_encoded, file_index, status) - - - cap.release() - - - -detect_thread1 = threading.Thread(target=detection, - args=(cam1, 1), - daemon=True) - -detect_thread2 = threading.Thread(target=detection, - args=(cam2, 2), - daemon=True) + success, img = cap.read() + if not success: + continue + + print(f"captured image from {camera_id}") + + status = "online" + results = model(img, stream=True, classes=0, conf=0.5, verbose=False) + foundPerson = False + # coordinates + for r in results: + boxes = r.boxes + for box in boxes: + # bounding box + x1, y1, x2, y2 = box.xyxy[0] + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 3) + # confidence + confidence = math.ceil((box.conf[0] * 100)) / 100 + print("Confidence --->", confidence) + + foundPerson = True + + if foundPerson: + _, img_encoded = cv2.imencode(".jpg", img) + post_request(img_encoded, camera_id, status) -detect_thread3 = threading.Thread(target=detection, - args=(cam3, 3), - daemon=True) - -detect_thread4 = threading.Thread(target=detection, - args=(cam4, 4), - daemon=True) - -detect_thread5 = threading.Thread(target=detection, - args=(cam5, 5), - daemon=True) - -detect_thread6 = threading.Thread(target=detection, - args=(cam6, 6), - daemon=True) - -detect_thread7 = threading.Thread(target=detection, - args=(cam7, 7), - daemon=True) - -detect_thread8 = threading.Thread(target=detection, - args=(cam8, 8), - daemon=True) - - -detect_thread1.start() -detect_thread2.start() -detect_thread3.start() -detect_thread4.start() -detect_thread5.start() -detect_thread6.start() -detect_thread7.start() -detect_thread8.start() - -detect_thread1.join() -detect_thread2.join() -detect_thread3.join() -detect_thread4.join() -detect_thread5.join() -detect_thread6.join() -detect_thread7.join() -detect_thread8.join() \ No newline at end of file + cap.release() From 13748375f4fd9fd78b88a23ebe6606a34af9b4a1 Mon Sep 17 00:00:00 2001 From: Glydric Date: Thu, 25 Jan 2024 20:00:02 +0100 Subject: [PATCH 4/4] removed useless --- ImageRecognition/src/ObjectDetectionYOLO.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ImageRecognition/src/ObjectDetectionYOLO.py b/ImageRecognition/src/ObjectDetectionYOLO.py index 59e5f46..a7c13d7 100644 --- a/ImageRecognition/src/ObjectDetectionYOLO.py +++ b/ImageRecognition/src/ObjectDetectionYOLO.py @@ -1,8 +1,6 @@ -import io from ultralytics import YOLO import cv2 import math -import threading import requests from sklearn.metrics import mean_squared_error import numpy as np