-
Notifications
You must be signed in to change notification settings - Fork 225
/
Copy pathCards.py
387 lines (297 loc) · 14.2 KB
/
Cards.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
############## Playing Card Detector Functions ###############
#
# Author: Evan Juras
# Date: 9/5/17
# Description: Functions and classes for CardDetector.py that perform
# various steps of the card detection algorithm
# Import necessary packages
import numpy as np
import cv2
import time
### Constants ###
# Adaptive threshold levels
BKG_THRESH = 60
CARD_THRESH = 30
# Width and height of card corner, where rank and suit are
CORNER_WIDTH = 32
CORNER_HEIGHT = 84
# Dimensions of rank train images
RANK_WIDTH = 70
RANK_HEIGHT = 125
# Dimensions of suit train images
SUIT_WIDTH = 70
SUIT_HEIGHT = 100
RANK_DIFF_MAX = 2000
SUIT_DIFF_MAX = 700
CARD_MAX_AREA = 120000
CARD_MIN_AREA = 25000
font = cv2.FONT_HERSHEY_SIMPLEX
### Structures to hold query card and train card information ###
class Query_card:
"""Structure to store information about query cards in the camera image."""
def __init__(self):
self.contour = [] # Contour of card
self.width, self.height = 0, 0 # Width and height of card
self.corner_pts = [] # Corner points of card
self.center = [] # Center point of card
self.warp = [] # 200x300, flattened, grayed, blurred image
self.rank_img = [] # Thresholded, sized image of card's rank
self.suit_img = [] # Thresholded, sized image of card's suit
self.best_rank_match = "Unknown" # Best matched rank
self.best_suit_match = "Unknown" # Best matched suit
self.rank_diff = 0 # Difference between rank image and best matched train rank image
self.suit_diff = 0 # Difference between suit image and best matched train suit image
class Train_ranks:
"""Structure to store information about train rank images."""
def __init__(self):
self.img = [] # Thresholded, sized rank image loaded from hard drive
self.name = "Placeholder"
class Train_suits:
"""Structure to store information about train suit images."""
def __init__(self):
self.img = [] # Thresholded, sized suit image loaded from hard drive
self.name = "Placeholder"
### Functions ###
def load_ranks(filepath):
"""Loads rank images from directory specified by filepath. Stores
them in a list of Train_ranks objects."""
train_ranks = []
i = 0
for Rank in ['Ace','Two','Three','Four','Five','Six','Seven',
'Eight','Nine','Ten','Jack','Queen','King']:
train_ranks.append(Train_ranks())
train_ranks[i].name = Rank
filename = Rank + '.jpg'
train_ranks[i].img = cv2.imread(filepath+filename, cv2.IMREAD_GRAYSCALE)
i = i + 1
return train_ranks
def load_suits(filepath):
"""Loads suit images from directory specified by filepath. Stores
them in a list of Train_suits objects."""
train_suits = []
i = 0
for Suit in ['Spades','Diamonds','Clubs','Hearts']:
train_suits.append(Train_suits())
train_suits[i].name = Suit
filename = Suit + '.jpg'
train_suits[i].img = cv2.imread(filepath+filename, cv2.IMREAD_GRAYSCALE)
i = i + 1
return train_suits
def preprocess_image(image):
"""Returns a grayed, blurred, and adaptively thresholded camera image."""
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
# The best threshold level depends on the ambient lighting conditions.
# For bright lighting, a high threshold must be used to isolate the cards
# from the background. For dim lighting, a low threshold must be used.
# To make the card detector independent of lighting conditions, the
# following adaptive threshold method is used.
#
# A background pixel in the center top of the image is sampled to determine
# its intensity. The adaptive threshold is set at 50 (THRESH_ADDER) higher
# than that. This allows the threshold to adapt to the lighting conditions.
img_w, img_h = np.shape(image)[:2]
bkg_level = gray[int(img_h/100)][int(img_w/2)]
thresh_level = bkg_level + BKG_THRESH
retval, thresh = cv2.threshold(blur,thresh_level,255,cv2.THRESH_BINARY)
return thresh
def find_cards(thresh_image):
"""Finds all card-sized contours in a thresholded camera image.
Returns the number of cards, and a list of card contours sorted
from largest to smallest."""
# Find contours and sort their indices by contour size
dummy,cnts,hier = cv2.findContours(thresh_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
index_sort = sorted(range(len(cnts)), key=lambda i : cv2.contourArea(cnts[i]),reverse=True)
# If there are no contours, do nothing
if len(cnts) == 0:
return [], []
# Otherwise, initialize empty sorted contour and hierarchy lists
cnts_sort = []
hier_sort = []
cnt_is_card = np.zeros(len(cnts),dtype=int)
# Fill empty lists with sorted contour and sorted hierarchy. Now,
# the indices of the contour list still correspond with those of
# the hierarchy list. The hierarchy array can be used to check if
# the contours have parents or not.
for i in index_sort:
cnts_sort.append(cnts[i])
hier_sort.append(hier[0][i])
# Determine which of the contours are cards by applying the
# following criteria: 1) Smaller area than the maximum card size,
# 2), bigger area than the minimum card size, 3) have no parents,
# and 4) have four corners
for i in range(len(cnts_sort)):
size = cv2.contourArea(cnts_sort[i])
peri = cv2.arcLength(cnts_sort[i],True)
approx = cv2.approxPolyDP(cnts_sort[i],0.01*peri,True)
if ((size < CARD_MAX_AREA) and (size > CARD_MIN_AREA)
and (hier_sort[i][3] == -1) and (len(approx) == 4)):
cnt_is_card[i] = 1
return cnts_sort, cnt_is_card
def preprocess_card(contour, image):
"""Uses contour to find information about the query card. Isolates rank
and suit images from the card."""
# Initialize new Query_card object
qCard = Query_card()
qCard.contour = contour
# Find perimeter of card and use it to approximate corner points
peri = cv2.arcLength(contour,True)
approx = cv2.approxPolyDP(contour,0.01*peri,True)
pts = np.float32(approx)
qCard.corner_pts = pts
# Find width and height of card's bounding rectangle
x,y,w,h = cv2.boundingRect(contour)
qCard.width, qCard.height = w, h
# Find center point of card by taking x and y average of the four corners.
average = np.sum(pts, axis=0)/len(pts)
cent_x = int(average[0][0])
cent_y = int(average[0][1])
qCard.center = [cent_x, cent_y]
# Warp card into 200x300 flattened image using perspective transform
qCard.warp = flattener(image, pts, w, h)
# Grab corner of warped card image and do a 4x zoom
Qcorner = qCard.warp[0:CORNER_HEIGHT, 0:CORNER_WIDTH]
Qcorner_zoom = cv2.resize(Qcorner, (0,0), fx=4, fy=4)
# Sample known white pixel intensity to determine good threshold level
white_level = Qcorner_zoom[15,int((CORNER_WIDTH*4)/2)]
thresh_level = white_level - CARD_THRESH
if (thresh_level <= 0):
thresh_level = 1
retval, query_thresh = cv2.threshold(Qcorner_zoom, thresh_level, 255, cv2. THRESH_BINARY_INV)
# Split in to top and bottom half (top shows rank, bottom shows suit)
Qrank = query_thresh[20:185, 0:128]
Qsuit = query_thresh[186:336, 0:128]
# Find rank contour and bounding rectangle, isolate and find largest contour
dummy, Qrank_cnts, hier = cv2.findContours(Qrank, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
Qrank_cnts = sorted(Qrank_cnts, key=cv2.contourArea,reverse=True)
# Find bounding rectangle for largest contour, use it to resize query rank
# image to match dimensions of the train rank image
if len(Qrank_cnts) != 0:
x1,y1,w1,h1 = cv2.boundingRect(Qrank_cnts[0])
Qrank_roi = Qrank[y1:y1+h1, x1:x1+w1]
Qrank_sized = cv2.resize(Qrank_roi, (RANK_WIDTH,RANK_HEIGHT), 0, 0)
qCard.rank_img = Qrank_sized
# Find suit contour and bounding rectangle, isolate and find largest contour
dummy, Qsuit_cnts, hier = cv2.findContours(Qsuit, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
Qsuit_cnts = sorted(Qsuit_cnts, key=cv2.contourArea,reverse=True)
# Find bounding rectangle for largest contour, use it to resize query suit
# image to match dimensions of the train suit image
if len(Qsuit_cnts) != 0:
x2,y2,w2,h2 = cv2.boundingRect(Qsuit_cnts[0])
Qsuit_roi = Qsuit[y2:y2+h2, x2:x2+w2]
Qsuit_sized = cv2.resize(Qsuit_roi, (SUIT_WIDTH, SUIT_HEIGHT), 0, 0)
qCard.suit_img = Qsuit_sized
return qCard
def match_card(qCard, train_ranks, train_suits):
"""Finds best rank and suit matches for the query card. Differences
the query card rank and suit images with the train rank and suit images.
The best match is the rank or suit image that has the least difference."""
best_rank_match_diff = 10000
best_suit_match_diff = 10000
best_rank_match_name = "Unknown"
best_suit_match_name = "Unknown"
i = 0
# If no contours were found in query card in preprocess_card function,
# the img size is zero, so skip the differencing process
# (card will be left as Unknown)
if (len(qCard.rank_img) != 0) and (len(qCard.suit_img) != 0):
# Difference the query card rank image from each of the train rank images,
# and store the result with the least difference
for Trank in train_ranks:
diff_img = cv2.absdiff(qCard.rank_img, Trank.img)
rank_diff = int(np.sum(diff_img)/255)
if rank_diff < best_rank_match_diff:
best_rank_diff_img = diff_img
best_rank_match_diff = rank_diff
best_rank_name = Trank.name
# Same process with suit images
for Tsuit in train_suits:
diff_img = cv2.absdiff(qCard.suit_img, Tsuit.img)
suit_diff = int(np.sum(diff_img)/255)
if suit_diff < best_suit_match_diff:
best_suit_diff_img = diff_img
best_suit_match_diff = suit_diff
best_suit_name = Tsuit.name
# Combine best rank match and best suit match to get query card's identity.
# If the best matches have too high of a difference value, card identity
# is still Unknown
if (best_rank_match_diff < RANK_DIFF_MAX):
best_rank_match_name = best_rank_name
if (best_suit_match_diff < SUIT_DIFF_MAX):
best_suit_match_name = best_suit_name
# Return the identiy of the card and the quality of the suit and rank match
return best_rank_match_name, best_suit_match_name, best_rank_match_diff, best_suit_match_diff
def draw_results(image, qCard):
"""Draw the card name, center point, and contour on the camera image."""
x = qCard.center[0]
y = qCard.center[1]
cv2.circle(image,(x,y),5,(255,0,0),-1)
rank_name = qCard.best_rank_match
suit_name = qCard.best_suit_match
# Draw card name twice, so letters have black outline
cv2.putText(image,(rank_name+' of'),(x-60,y-10),font,1,(0,0,0),3,cv2.LINE_AA)
cv2.putText(image,(rank_name+' of'),(x-60,y-10),font,1,(50,200,200),2,cv2.LINE_AA)
cv2.putText(image,suit_name,(x-60,y+25),font,1,(0,0,0),3,cv2.LINE_AA)
cv2.putText(image,suit_name,(x-60,y+25),font,1,(50,200,200),2,cv2.LINE_AA)
# Can draw difference value for troubleshooting purposes
# (commented out during normal operation)
#r_diff = str(qCard.rank_diff)
#s_diff = str(qCard.suit_diff)
#cv2.putText(image,r_diff,(x+20,y+30),font,0.5,(0,0,255),1,cv2.LINE_AA)
#cv2.putText(image,s_diff,(x+20,y+50),font,0.5,(0,0,255),1,cv2.LINE_AA)
return image
def flattener(image, pts, w, h):
"""Flattens an image of a card into a top-down 200x300 perspective.
Returns the flattened, re-sized, grayed image.
See www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/"""
temp_rect = np.zeros((4,2), dtype = "float32")
s = np.sum(pts, axis = 2)
tl = pts[np.argmin(s)]
br = pts[np.argmax(s)]
diff = np.diff(pts, axis = -1)
tr = pts[np.argmin(diff)]
bl = pts[np.argmax(diff)]
# Need to create an array listing points in order of
# [top left, top right, bottom right, bottom left]
# before doing the perspective transform
if w <= 0.8*h: # If card is vertically oriented
temp_rect[0] = tl
temp_rect[1] = tr
temp_rect[2] = br
temp_rect[3] = bl
if w >= 1.2*h: # If card is horizontally oriented
temp_rect[0] = bl
temp_rect[1] = tl
temp_rect[2] = tr
temp_rect[3] = br
# If the card is 'diamond' oriented, a different algorithm
# has to be used to identify which point is top left, top right
# bottom left, and bottom right.
if w > 0.8*h and w < 1.2*h: #If card is diamond oriented
# If furthest left point is higher than furthest right point,
# card is tilted to the left.
if pts[1][0][1] <= pts[3][0][1]:
# If card is titled to the left, approxPolyDP returns points
# in this order: top right, top left, bottom left, bottom right
temp_rect[0] = pts[1][0] # Top left
temp_rect[1] = pts[0][0] # Top right
temp_rect[2] = pts[3][0] # Bottom right
temp_rect[3] = pts[2][0] # Bottom left
# If furthest left point is lower than furthest right point,
# card is tilted to the right
if pts[1][0][1] > pts[3][0][1]:
# If card is titled to the right, approxPolyDP returns points
# in this order: top left, bottom left, bottom right, top right
temp_rect[0] = pts[0][0] # Top left
temp_rect[1] = pts[3][0] # Top right
temp_rect[2] = pts[2][0] # Bottom right
temp_rect[3] = pts[1][0] # Bottom left
maxWidth = 200
maxHeight = 300
# Create destination array, calculate perspective transform matrix,
# and warp card image
dst = np.array([[0,0],[maxWidth-1,0],[maxWidth-1,maxHeight-1],[0, maxHeight-1]], np.float32)
M = cv2.getPerspectiveTransform(temp_rect,dst)
warp = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
warp = cv2.cvtColor(warp,cv2.COLOR_BGR2GRAY)
return warp