You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi, I am working on a cool autonomous machine powered by yolo. I have connected 7 USB camera module and they run at 50 FPS. However when I run the yolov4-tiny in each of their feed the FPS drops to 10. I put each camera in a different process using multiprocessing.Process but the FPS are steel low. It looks like the multiprocessing only helps the CPU. It is depressing to see the FPS at 10 and the GPU usage at 40%.
Is there any way I can utilize 100% of my RTX 2070 Super with yolo? Is it something that I can do or it depends on the yolo development? Can I use multiprocessing with the GPU?
Thanks for your attention.
I put this script together from others in the internet. When I run one inference it runs at 10 fps 20% GPU. When I run 2 it runs at 5 fps 20% GPU. How can I make the GPU give more performance like in multiprocessing.?
import multiprocessing
import time
import cv2
import numpy as np
In screen FPS counter
timeStamp=time.time()
fpsFilt=0
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
Hi, I am working on a cool autonomous machine powered by yolo. I have connected 7 USB camera module and they run at 50 FPS. However when I run the yolov4-tiny in each of their feed the FPS drops to 10. I put each camera in a different process using multiprocessing.Process but the FPS are steel low. It looks like the multiprocessing only helps the CPU. It is depressing to see the FPS at 10 and the GPU usage at 40%.
Is there any way I can utilize 100% of my RTX 2070 Super with yolo? Is it something that I can do or it depends on the yolo development? Can I use multiprocessing with the GPU?
Thanks for your attention.
I put this script together from others in the internet. When I run one inference it runs at 10 fps 20% GPU. When I run 2 it runs at 5 fps 20% GPU. How can I make the GPU give more performance like in multiprocessing.?
import multiprocessing
import time
import cv2
import numpy as np
In screen FPS counter
timeStamp=time.time()
fpsFilt=0
code constants
th1=5
th2=100
frame_counter = 0
font=cv2.FONT_HERSHEY_SIMPLEX
reverse = 2560
whT = 320
confThreshold =0.5
nmsThreshold= 0.2
imgerror = cv2.imread('/home/kc/Downloads/darknet/C.jpg')
def Cam1(to_AI1,): # 1 camera loop
timeStamp=time.time()
fpsFilt=0
dispW=1280
dispH=720
cap5=cv2.VideoCapture('/dev/v4l/by-path/pci-0000:05:00.0-usb-0:2.3.3:1.0-video-index0')
cap5.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
cap5.set(cv2.CAP_PROP_FRAME_WIDTH, dispW)
cap5.set(cv2.CAP_PROP_FRAME_HEIGHT, dispH)
while True:
success, img = cap5.read()
cap5.release()
def Cam2(to_AI2,): # 1 camera loop
timeStamp=time.time()
fpsFilt=0
dispW=1280
dispH=720
cap5=cv2.VideoCapture('/dev/v4l/by-path/pci-0000:05:00.0-usb-0:2.4:1.0-video-index0')
cap5.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
cap5.set(cv2.CAP_PROP_FRAME_WIDTH, dispW)
cap5.set(cv2.CAP_PROP_FRAME_HEIGHT, dispH)
while True:
success, img = cap5.read()
cap5.release()
def inference1_AI(to_AI1,): # inference from can1
global fpsFilt
global timeStamp
global frame_counter
classesFile = "/home/kc/Downloads/darknet/data/coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
print(classNames)
modelConfiguration = "/home/kc/Downloads/darknet/yolov4.cfg"
modelWeights = "/home/kc/Downloads/darknet/yolov4.weights"
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
while True:
cv2.destroyAllWindows()
print('after brake')
def inference2_AI(to_AI2,): # inference from can1
global fpsFilt
global timeStamp
global frame_counter
classesFile = "/home/kc/Downloads/darknet/data/coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
print(classNames)
modelConfiguration = "/home/kc/Downloads/darknet/yolov4.cfg"
modelWeights = "/home/kc/Downloads/darknet/yolov4.weights"
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
while True:
cv2.destroyAllWindows()
print('after brake')
to_AI1 = multiprocessing.Queue()
to_AI2 = multiprocessing.Queue()
#contol AI
t1Cam = multiprocessing.Process(target=Cam1, args=(to_AI1,))
t2Cam = multiprocessing.Process(target=Cam2, args=(to_AI2,))
#contol AI
t1AI = multiprocessing.Process(target=inference1_AI, args=(to_AI1,))
t2AI = multiprocessing.Process(target=inference2_AI, args=(to_AI2,))
t1Cam.start() # cam start
time.sleep(.3)
t2Cam.start() # cam start
time.sleep(.3)
t1AI.start() # AI start
t2AI.start() # AI start
Beta Was this translation helpful? Give feedback.
All reactions