-
Notifications
You must be signed in to change notification settings - Fork 0
/
car_counter_BGsub.py
166 lines (136 loc) · 5.35 KB
/
car_counter_BGsub.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
# -*- coding: utf-8 -*-
#
# Car counter BGsub
# =================
# Program counts cars on a video stream and uploads the
# counts to Thingspeak.com. It differentiates between the
# direction of travel
#
# In Debug Mode (postfix -d) a file output.avi is saved with marked image crop
#
# by Nico Buhl, 2021-22
#
# Inspired by
# https://www.youtube.com/watch?v=HXDD7-EnGBY
#
import cv2
import sys
from tracker_BGsub import EuclideanDistTracker
import time
import http.client
import numpy as np
from gpiozero import CPUTemperature
# Check if Debug Mode is activated
if len(sys.argv) == 2 and sys.argv[1] == '-d':
debugmode = True
print("Debug mode active!")
else:
debugmode = False
def transferTS(field_str):
key = "HERE_YOUR_THINGSPEAK_WRITEKEY"
while True:
cputemp = round(CPUTemperature().temperature,2)
params = field_str+"&field8="+str(cputemp)+"&key="+key
headers = {"Content-typZZe": "application/x-www-form-urlencoded","Accept": "text/plain"}
conn = http.client.HTTPConnection("api.thingspeak.com:80")
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
print (response.status, response.reason)
conn.close()
status = True
except:
print ("connection failed")
status = False
break
return status
# used to record the time when we processed last frame
prev_frame_time = 0
# used to record the time at which we processed current frame
new_frame_time = 0
# Time between Thingspeak uploads in seconds
timetouploadTS = 60 * 5
# Define picture crop -> Check in debug mode!
roixy = [ 470, 675, 200, 337 ] # X1, X2, Y1, Y2
# Tracker objects
tracker = EuclideanDistTracker(roixy)
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
# If Debug Mode activated save videostream
if debugmode == True:
# Definition Video speichern
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
size = (width, height)
fourcc = cv2.VideoWriter_fourcc('X','V','I','D')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, size)
# Object detection from stable camera
object_detector = cv2.createBackgroundSubtractorMOG2(history=750, varThreshold=250) # History=100 Threshold=250
# Start timer for upload measurements to TS
last_TS_transfer = time.perf_counter()
while True:
if debugmode == True:
start_timer = time.perf_counter()
print("===== NEW FRAME =====")
_,frame = cap.read()
frame = cv2.rotate(frame, cv2.ROTATE_180)
# Extract region of interest
roi = frame[roixy[2]:roixy[3],roixy[0]:roixy[1]]
# Object Detection
mask = object_detector.apply(roi)
# _, mask = cv2.threshold(mask, 240,255, cv2.THRESH_BINARY)
dilkernel = np.ones((5,5),np.uint8)
mask = cv2.dilate(mask,dilkernel,iterations = 2)
contours,_ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
# Calculate area and remove small elements
area = cv2.contourArea(cnt)
if area > 5000:
cv2.drawContours(roi, [cnt], -1, (0,0,255), 1)
x,y,w,h = cv2.boundingRect(cnt)
tracker.detections.append([x,y,w,h])
# Object tracking
boxes_ids = tracker.update()
# Show Boxes and IDs
for box_id in boxes_ids:
x, y, w, h, id = box_id
if debugmode == True:
cv2.putText(roi, str(id), (x,y-15), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)
cv2.rectangle(roi,(x,y),(x+w,y+h),(0,255,0),3)
# Calc center of rectangular
cx = (x + x + w) // 2
cy = (y + y + h) // 2
cv2.circle(roi, (cx,cy), 5, (0,255,0), 2)
cv2.putText(roi, str(cx), (cx,cy-15), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0), 1)
# Check timer for upload to TS
if time.perf_counter() - last_TS_transfer > timetouploadTS:
print("SEND TO THINGSPEAK.com")
# Transmit car counter
carrate = round(sum(tracker.counter) * 60*60 / timetouploadTS)
field_url = "field1="+str(tracker.counter[0])+"&field2="+str(tracker.counter[1])+"&field3="+str(carrate)
if transferTS(field_url) == True:
tracker.counter_all += tracker.counter
tracker.resetCounter()
print("Counter all cars : "+str(tracker.counter_all))
last_TS_transfer = time.perf_counter()
# End function transfer to TS
if debugmode == True:
# Show line at which the car is counted
cv2.rectangle(frame,[roixy[0],roixy[2]],[roixy[1],roixy[3]],
color=(0,0,255),thickness=2)
cv2.line(frame, (int((roixy[1]-roixy[0])/2)+roixy[0], 0),
(int((roixy[1]-roixy[0])/2)+roixy[0], 720), (0, 0, 255), thickness=2)
# Save video
out.write(frame)
# Time when we finish processing for this frame
new_frame_time = time.time()
if debugmode == True:
# Calculating the fps
# fps will be number of frame processed in given time frame
# since their will be most of time error of 0.001 second
# we will be subtracting it to get more accurate result
fps = 1/(new_frame_time-prev_frame_time)
print("fps : " + str(round(fps, 2)))
prev_frame_time = new_frame_time