-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathboard_preparator.py
206 lines (152 loc) · 7.71 KB
/
board_preparator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
import cv2
import numpy as np
import utils
class BoardPreparator():
def __init__(self, empty_board_path, debug=True):
self.debug = debug
self.MAX_FEATURES = 500
self.GOOD_MATCH_PERCENT = 0.15
self.orb = cv2.ORB_create(self.MAX_FEATURES)
self.blur = False
self.equalize = "local"
self.empty_board_image = cv2.imread(empty_board_path)
self.empty_board_color, self.empty_board_gray = self.preprocess_each_frame(
self.empty_board_image)
def find_separating_line(self, frame):
# Find point dividing left and right part of the board
g = np.ones((10, 10))/100
g2 = -np.ones((10, 10))/100
fg_cv = cv2.filter2D(frame[:, :, 0], -1, g)
fg_cv2 = cv2.filter2D(frame[:, :, 2].astype(g2.dtype), -1, g2)
fg_cv = cv2.filter2D(frame[:, :, 0], -1, g)
fg_cv2 = cv2.filter2D(frame[:, :, 2].astype(g2.dtype), -1, g2)
filtered = (np.maximum(np.zeros_like(fg_cv),
fg_cv + fg_cv2) > 20).sum(axis=0)
line_x = np.where(filtered > 150)[0][0]
return line_x - 10
def equalize_color_image(self, frame):
# equalize color image
lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
lab_planes = np.array(cv2.split(lab))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(3, 3))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return bgr
def separate(self, frame):
# separates everything into two halves
return frame[:, :self.intersecting_line_x], frame[:, self.intersecting_line_x:]
def preprocess_each_frame(self, frame):
# make processing that is applied to every frame
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
if self.blur:
frame = cv2.GaussianBlur(frame, (3, 3), 0)
if self.equalize: # equalize color image!
frame = self.equalize_color_image(frame)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return frame, frame_gray
def alignImageToFirstFrame(self, im_gray, im_color):
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(self.MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im_gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(
cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = list(matcher.match(
descriptors1, self.first_frame_desc, None))
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * self.GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
#imMatches = cv2.drawMatches(im_color, keypoints1, self.first_frame_color, self.first_frame_key, matches, None)
#cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = self.first_frame_key[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = self.first_frame_color.shape
im1Reg = cv2.warpPerspective(im_color, h, (width, height))
return im1Reg
def initialize_background_subtractor(self, images=None):
# initialize background KNN subtractor with empty board
foreground_knn = cv2.createBackgroundSubtractorKNN()
if images is None: # ! First every initialization
empty_board_color = self.alignImageToFirstFrame(
self.empty_board_gray, self.empty_board_color)
else: # ! reinitialization, but not used, it performed worse
empty_board_color, empty_board_gray = images
for i in range(10):
foreground_knn.apply(empty_board_color)
return foreground_knn
def initialize_first_frame(self, first_frame):
# initialize first frame, keypoints and separate it into two parts
self.first_frame_color, self.first_frame_gray = self.preprocess_each_frame(
first_frame)
self.height, self.width = self.first_frame_gray.shape
self.first_frame_key, self.first_frame_desc = self.orb.detectAndCompute(
self.first_frame_gray, None)
self.intersecting_line_x = self.find_separating_line(
self.first_frame_color)
self.foreground_knn = self.initialize_background_subtractor()
left_part_color, right_part_color = self.separate(
self.first_frame_color)
left_part_gray, right_part_gray = self.separate(
self.first_frame_gray)
return right_part_color, right_part_gray
def reinitialize_first_frame(self, frame_color, frame_gray):
# reinitializes keypoints for alignment
self.first_frame_color, self.first_frame_gray = frame_color, frame_gray
self.first_frame_key, self.first_frame_desc = self.orb.detectAndCompute(
self.first_frame_gray, None)
#! we thought about subtractor reinitialization but it worked bad
#self.foreground_knn = self.initialize_background_subtractor((frame_color, frame_gray))
def get_mask_of_left_mess(self):
# Cut of the left background
edges = cv2.Canny(cv2.medianBlur(
self.empty_board_color, 3), 200, 250, apertureSize=3)
linesP = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, None, 400, 20)
if linesP is not None:
line = linesP[0][0]
else: # ! We have very strange problem that we used the same image and same opencv version and on one PC it was working and on other not.
# ! still this is always the same image.
line = np.array([14, 424, 100, 14])
bounding_points = np.linspace(
line[2]-5, line[3]-5, self.empty_board_color.shape[0]).astype(np.uint8)
mask = np.ones_like(self.empty_board_gray)
for i, boundary in enumerate(bounding_points):
mask[i, :boundary] = 0
self.mask = mask
def zero_mask(self, box):
# Zero mask in the box
x, y, w, h = box
self.mask[y:y+h, x:x+w] = 0
def get_foreground(self, frame_color):
# Get foreground
foreground = self.foreground_knn.apply(
cv2.GaussianBlur(frame_color, (3, 3), 0))
foreground = cv2.morphologyEx(foreground, cv2.MORPH_OPEN, np.ones(
(7, 7), dtype=np.uint8)) * self.mask # To filter defined background we multiply by mask
# To have only very intense foreground
foreground = ((foreground > 200) * 255).astype(np.uint8)
return self.separate(foreground)
def process(self, frame, current_frame):
# This is the main function that is called for start
frame_color, frame_gray = self.preprocess_each_frame(frame)
frame_color = self.alignImageToFirstFrame(frame_gray, frame_color)
left_part_color, right_part_color = self.separate(frame_color)
left_foreground, right_foreground = self.get_foreground(frame_color)
if current_frame % 300 == 0:
self.reinitialize_first_frame(
frame_color, cv2.cvtColor(frame_color, cv2.COLOR_BGR2GRAY))
return left_part_color, left_foreground, right_part_color, right_foreground
def initialize(self, first_frame):
# This function is called only once at the beginning
self.get_mask_of_left_mess()
return self.initialize_first_frame(first_frame)