diff --git a/ImageRecognition/main.py b/ImageRecognition/main.py deleted file mode 100644 index 770a4a7..0000000 --- a/ImageRecognition/main.py +++ /dev/null @@ -1,30 +0,0 @@ -import concurrent.futures - -from src.handleSystemStream import obtain_frames, handle_connection - -IPs = [ - "172.20.14.97", - "172.20.14.128", - "172.20.14.89", - "172.20.14.47", - "172.20.14.197", - "172.20.14.198", - "172.20.14.140", - "172.20.14.58", -] - - -def start(debug: bool): - with concurrent.futures.ThreadPoolExecutor() as executor: - args: (str, str, bool) = [( - ip, - debug - ) for ip in IPs] - - results = [executor.submit(obtain_frames, *arg) for arg in args] - concurrent.futures.wait(results) - - -if __name__ == "__main__": - # handle_connection("172.20.14.97") - start(False) diff --git a/ImageRecognition/send_frame.py b/ImageRecognition/send_frame.py new file mode 100644 index 0000000..ec68298 --- /dev/null +++ b/ImageRecognition/send_frame.py @@ -0,0 +1,60 @@ +import cv2 +import socketio +import base64 +import threading + +#cameras data +cameras = { + '1': 'rtsp://192.168.1.41:80/ch0_0.264', + '2': 'rtsp://192.168.1.41:80/ch1_0.264', + '3': 'rtsp://192.168.1.41:80/ch2_0.264', + '4': 'rtsp://192.168.1.41:80/ch3_0.264', + '5': 'rtsp://192.168.1.41:80/ch4_0.264', + '6': 'rtsp://192.168.1.41:80/ch5_0.264', + '7': 'rtsp://192.168.1.41:80/ch6_0.264', + '8': 'rtsp://192.168.1.41:80/ch7_0.264', +} + +# Server settings +server_url = 'http://localhost:8080' + +sio = socketio.Client() + +def convert_frame(frame): + jpeg_frame = cv2.imencode('.jpg', frame)[1].tobytes() + return jpeg_frame + + +def send_frames(camera_id, capture, interval=0.1): + while True: + ret, frame = capture.read() + if ret: + + frame_conv = convert_frame(frame) + + sio.emit('message', {'id': camera_id, 'data': frame_conv}) + sio.sleep(interval) + + + +for camera_id, camera_url in cameras.items(): + capture = cv2.VideoCapture(camera_url) + if not capture.isOpened(): + print(f"Errore nell'apertura della telecamera {camera_id}") + continue + + threading.Thread(target=send_frames, args=(camera_id, capture)).start() + + +@sio.event +def connect(): + print("Server connected") + +@sio.event +def disconnect(): + print("Server disconnected") + +sio.connect(server_url, transports=['websocket']) + +sio.wait() + diff --git a/ImageRecognition/src/ObjectDetectionYOLO.py b/ImageRecognition/src/ObjectDetectionYOLO.py index 5f409e2..cf69984 100644 --- a/ImageRecognition/src/ObjectDetectionYOLO.py +++ b/ImageRecognition/src/ObjectDetectionYOLO.py @@ -3,8 +3,6 @@ import math import threading import requests -import numpy as np -from skimage.metrics import structural_similarity as ssim from sklearn.metrics import mean_squared_error #model @@ -32,6 +30,8 @@ cam8 = 'rtsp://192.168.1.41:80/ch7_0.264' +TOKEN = '6929905186:AAEouI3G2sbfS-y6ZzkXrpNgPQRAPs5_v-g' +channel_id = '792557360' last_sent_image = None def post_request(image): diff --git a/ImageRecognition/src/convertStream.py b/ImageRecognition/src/convertStream.py deleted file mode 100644 index d1b6d18..0000000 --- a/ImageRecognition/src/convertStream.py +++ /dev/null @@ -1,74 +0,0 @@ -import gi -gi.require_version('Gst', '1.0') -from gi.repository import GObject, Gst -import numpy as np -import cv2 - -GObject.threads_init() -Gst.init(None) - -def YUV_stream2RGB_frame(data): - - w=640 - h=368 - size=w*h - - stream=np.fromstring(data,np.uint8) #convert data form string to numpy array - - #Y bytes will start form 0 and end in size-1 - y=stream[0:size].reshape(h,w) # create the y channel same size as the image - - #U bytes will start from size and end at size+size/4 as its size = framesize/4 - u=stream[size:(size+(size/4))].reshape((h/2),(w/2))# create the u channel its size=framesize/4 - - #up-sample the u channel to be the same size as the y channel and frame using pyrUp func in opencv2 - u_upsize=cv2.pyrUp(u) - - #do the same for v channel - v=stream[(size+(size/4)):].reshape((h/2),(w/2)) - v_upsize=cv2.pyrUp(v) - - #create the 3-channel frame using cv2.merge func watch for the order - yuv=cv2.merge((y,u_upsize,v_upsize)) - - #Convert TO RGB format - rgb=cv2.cvtColor(yuv,cv2.cv.CV_YCrCb2RGB) - - #show frame - cv2.imshow("show",rgb) - cv2.waitKey(5) - -def on_new_buffer(appsink): - - sample = appsink.emit('pull-sample') - #get the buffer - buf=sample.get_buffer() - #extract data stream as string - data=buf.extract_dup(0,buf.get_size()) - YUV_stream2RGB_frame(data) - return False - -def Init(): - - CLI="rtspsrc name=src location=rtsp://192.168.1.20:554/live/ch01_0 latency=10 !decodebin ! appsink name=sink" - - #simplest way to create a pipline - pipline=Gst.parse_launch(CLI) - - #getting the sink by its name set in CLI - appsink=pipline.get_by_name("sink") - - #setting some important properties of appsnik - appsink.set_property("max-buffers",20) # prevent the app to consume huge part of memory - appsink.set_property('emit-signals',True) #tell sink to emit signals - appsink.set_property('sync',False) #no sync to make decoding as fast as possible - - appsink.connect('new-sample', on_new_buffer) #connect signal to callable func - -def run(): - pipline.set_state(Gst.State.PLAYING) - GObject.MainLoop.run() - - -Init() -run() \ No newline at end of file diff --git a/ImageRecognition/src/handleSystemStream.py b/ImageRecognition/src/handleSystemStream.py deleted file mode 100644 index 2a4a66f..0000000 --- a/ImageRecognition/src/handleSystemStream.py +++ /dev/null @@ -1,47 +0,0 @@ -import cv2 -import ffmpeg -import numpy as np -from ping3 import ping - - -def handle_status(ip: str, is_available: bool): - if is_available: - print(f"Failed connection with ip: {ip}") - else: - print(f"Connected with ip: {ip}") - - # TODO save log - - -opts = { - "loglevel": "quiet", # "r": "30", "f": "avfoundation" - "headers": 'Authorization: Basic YWRtaW46' -} - - -def handle_connection(ip: str, debug: bool): - # Run the command using subprocess - cmd = (ffmpeg - .input(f"http://{ip}/livestream/11", **opts) - .output("pipe:", format="rawvideo", pix_fmt="bgr24") - .run_async(pipe_stdout=True) - ) - while True: - raw_frame = cmd.stdout.read(1920 * 1080 * 3) - if not raw_frame: - break - frame = np.frombuffer(raw_frame, np.uint8).reshape((1080, 1920, 3)) - - if debug: - cv2.imshow(f"VideoFrame{ip}", frame) - if cv2.waitKey(1) & 0xFF == ord("q"): - break - - -def obtain_frames(ip: str, debug: bool): - while True: - available = ping(ip) is not None - handle_status(ip, available) - - if available: - handle_connection(ip, debug)