-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathImageSegmentation.py
More file actions
325 lines (272 loc) · 11 KB
/
ImageSegmentation.py
File metadata and controls
325 lines (272 loc) · 11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
""" ========================================================================
* ImageSegmentation.py
*
* Created on: May 2016
* Author: susan
*
* The ImageSegmentation object contains information about the. segmenting of
* the image using Watershed. It matches segments to each other by finding the
* best match of the four points.
*
========================================================================="""
import math
import cv2
import numpy as np
import FeatureType
import FoxQueue
import OutputLogger
class ImageSegmentation(FeatureType.FeatureType):
"""Holds data about Hough Lines found in the input picture."""
def __init__(self, image, logger):
"""Takes in an image and a logger and builds the hough lines. Also
initializes other instance variables that are needed"""
FeatureType.FeatureType.__init__(self, image, logger, 120.0, 200.0)
self.foundWatersheds = 0
self.wshedImage = self.image.copy()
self._makeColorMapping()
self._computeWatershed()
def _computeWatershed(self):
"""Set up helper images, run watershed algorithm, limit to the most\
significant watersheds, and display the result"""
#distMat = np.array( self.image.shape, np.float32 )
workImg = cv2.GaussianBlur(self.image, (5, 5), sigmaX = 0, sigmaY = 0)
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
simplified = cv2.morphologyEx(workImg, cv2.MORPH_OPEN,
element, iterations = 2)
simplified = cv2.morphologyEx(simplified, cv2.MORPH_CLOSE,
element, iterations = 2)
#simplified = workImg
grayImg = cv2.cvtColor(simplified, cv2.COLOR_BGR2GRAY)
img_gray = cv2.cvtColor(grayImg, cv2.COLOR_GRAY2BGR)
cannyImage = cv2.Canny(grayImg, 10, 70, 3)
cannyImage = cv2.morphologyEx(cannyImage, cv2.MORPH_CLOSE, element)
cannyImage = cv2.dilate(cannyImage, element)
sureBG = cannyImage
nonCanny = cv2.bitwise_not(cannyImage)
distMat = cv2.distanceTransform(nonCanny, cv2.DIST_L1, 3)
dMat = distMat.astype(np.uint8)
ret, sureSegments = cv2.threshold(dMat, dMat.mean(), 255, 0)
unknown = cv2.subtract(sureBG, sureSegments)
ret, markers = cv2.connectedComponents(sureSegments)
markers = markers + 1 # increase marked regions so background is 1, not zero
markers[unknown == 255] = 0
if markers.max() > 255:
print "WARNING: TOO MANY REGIONS!!@"
markerImg = (markers * 255) / markers.max()
markerImg = markerImg.astype(np.uint8)
markerImg1 = cv2.applyColorMap(markerImg, cv2.COLORMAP_HOT)
cv2.imshow("SureBack", sureBG)
cv2.moveWindow("SureBack", 0, 0)
cv2.imshow("SureFront", sureSegments)
cv2.moveWindow("SureFront", 400, 0)
cv2.imshow("Unknown", unknown)
cv2.moveWindow("Unknown", 0, 300)
cv2.imshow("Working RES", workImg)
cv2.moveWindow("Working RES", 400, 300)
cv2.imshow("Markers Before", markerImg1)
cv2.moveWindow("Markers Before", 0, 600)
cv2.waitKey(0)
markersAfter = cv2.watershed(workImg,markers)
workImg[markersAfter == -1] = [255,255,255]
if markersAfter.max() > 255:
print "WARNING: TOO MANY REGIONS!!@"
markerImg = (markersAfter * 255) / markersAfter.max()
markerImg = markerImg.astype(np.uint8)
markerImg2 = cv2.applyColorMap(markerImg, cv2.COLORMAP_JET)
cv2.imshow("Markers After", markerImg2)
cv2.moveWindow("Markers After", 400, 600)
#cv2.imshow("Simplified", simplified)
#cv2.moveWindow("Simplified", 100, 0)
#cv2.imshow("gray version", grayImg)
#cv2.moveWindow("gray version", 400, 0)
#cv2.imshow("Canny", cannyImage)
#cv2.moveWindow("Canny", 100, 300)
#cv2.imshow("NonCanny", nonCanny)
#cv2.moveWindow("NonCanny", 400, 300)
#cv2.imshow("Distance Transform", dMat)
#cv2.moveWindow("Distance Transform", 100, 600)
#cv2.imshow("Sure Sure", sureSegments)
#cv2.moveWindow("Sure Sure", 400, 600)
#cv2.waitKey(20)
#self._findWatersheds() # Inputs?
#self._collectSignifWatersheds()
#self.wshedImage = cv2.addWeighted(self.wshedImage, 0.5,
#img_gray, 0.5)
def _makeColorMapping(self):
"""It generates a random color for each slot in a table
matrix. Not quite sure why I need this. 500 random colors."""
pass
def displayFeaturePics(self, windowName, startX, startY):
"""Given a window name and a starting location on the screen, this creates
an image that represents the color signature and displays it."""
pass
def evaluateSimilarity(self, otherHL):
"""Given another HoughLines object, evaluate the similarity.
Starts by making a priority queue for each line here.
It then adds each line in the otherLines to queues for the lines that match.
It then uses a greedy algorithm to pick matches (not optimal, but efficient).
"""
numLines = len(self.lines)
otherLines = otherHL.lines
numOtherLines = len(otherLines)
matchQueues = [None] * numLines
for thisPos in range(numLines):
matchQueues[thisPos] = FoxQueue.PriorityQueue()
for thatPos in range(numOtherLines):
nextScore = self._compareLines(self.lines[thisPos], otherLines[thatPos])
if nextScore >= 0.0:
matchQueues[thisPos].insert(thatPos, nextScore)
#for i in range(numLines):
#self.logger.log("Matches to line " + str(i))
#self.logger.log(" " + str(matchQueues[i].getSize()) + "matches")
#self.logger.log(" Best match: " + str(matchQueues[i].firstElement()))
finalMatches = self._chooseFinalMatches(matchQueues)
#self.logger.log("Final matches:")
#for i in range(numLines):
#freshIm1 = self.image.copy()
#freshIm2 = otherHL.image.copy()
#val = finalMatches[i]
#if val == None:
#self.logger.log(" " + str(i) + " None")
#else:
#(j, s) = val
#self.logger.log(" " + str(i) + " " + str(j) + " " + str(s))
#[x1, y1, x2, y2] = self.lines[i]
#cv2.line(freshIm1, (x1, y1), (x2, y2), (255, 255, 0), 2)
#print self.lines[i], (x1, y1), (x2, y2)
#[x1, y1, x2, y2] = otherLines[j]
#cv2.line(freshIm2, (x1, y1), (x2, y2), (255, 255, 0), 2)
#cv2.imshow("Matches 1", freshIm1)
#cv2.imshow("Matches 2", freshIm2)
#cv2.waitKey(0)
matchCount = 0
totalScore = 0.0
for i in range(numLines):
if finalMatches[i] != None:
matchCount += 2
(other, score) = finalMatches[i]
totalScore += 2 * score
unmatched = (numLines + numOtherLines) - matchCount
penalty = 200 * unmatched
aveScore = (totalScore + penalty) / (numLines + numOtherLines)
#print "A =", numLines, " B =", numOtherLines
#print "Total Lines =", numLines + numOtherLines
#print "matchCount =", matchCount
#print "unmatched =", unmatched
#print "sum =", matchCount + unmatched
#print "penalty =", penalty
#print "totalScore =", totalScore
#print "aveScore =", aveScore
#self.logger.log("Evaluate similarity, result is:" + str(aveScore))
return self._normalizeSimValue(aveScore)
def _compareLines(self, lineA, lineB):
"""A helper for evaluateSimilarity, it takes two lines and computes
their similarity"""
[xa1, ya1, xa2, ya2] = lineA
[xb1, yb1, xb2, yb2] = lineB
leftLeftX = abs(xa1 - xb1)
leftLeftY = abs(ya1 - yb1)
rightRightX = abs(xa2 - xb2)
rightRightY = abs(ya2 - yb2)
totalOne = math.sqrt(leftLeftX ** 2 + leftLeftY ** 2 +
rightRightX ** 2 + rightRightY ** 2)
leftRightX = abs(xa1 - xb2)
leftRightY = abs(ya1 - yb2)
rightLeftX = abs(xa2 - xb1)
rightLeftY = abs(ya2 - yb1)
totalTwo = math.sqrt(leftRightX ** 2 + leftRightY ** 2 +
rightLeftX ** 2 + rightLeftY ** 2)
endPtDiff = min(totalOne, totalTwo)
if endPtDiff > 100: # if too different already, no match!
return -1.0
angleA = self._calcAngle(lineA)
angleB = self._calcAngle(lineB)
angleDiff = abs(angleA - angleB)
if angleDiff > 30: # if too different then no match!
return -1.0
return endPtDiff + angleDiff
def _calcAngle(self, line):
"""Given a line, it computes the angle of the line in degrees, in the
range [0, 180)"""
xDiff = line[2] - line[0] # points are sorted by x value
yDiff = line[1] - line[3] # top of screen is zero
angle = math.atan2(yDiff, xDiff) * (180 / numpy.pi)
# angle is in the range [-90.0,90). xDiff can never be negative by
# sorting of points but yDiff may be. When yDiff is negative, the
# slope is negative, angle is in 2nd quadrant. We want to convert
# angle to be the range from [0.0, 180) by converting the negative
# angles to be positive, 2nd-quadrant angles
if yDiff < 0:
angle = angle + 180
return angle
def _chooseFinalMatches(self, matchQueues):
"""Does greedy matching of lines from this to lines from the other, using the
array of priority queues."""
done = False
usedThis = set()
usedOther = set()
finalMatches = [None] * len(self.lines)
while not done:
i = 0
bestThis = -1
bestOther = -1
bestScore = 0.0
while i < len(self.lines):
if matchQueues[i].isEmpty():
# if queue for current line i is empty, skip it and go to the next i
i += 1
else:
(j, score) = matchQueues[i].firstElement()
if j in usedOther:
# if the other line was already matched to a different one of
# my line segments, then remove it from this queue and try again
# NOTE: No increment of i here!
matchQueues[i].delete()
elif (bestThis < 0) or (score < bestScore):
# if this is the best score seen so far (or the first valid one)
# then update trackers and go to the next i
bestThis = i
bestOther = j
bestScore = score
else:
# this isn't an option, leave it alone and go on
i += 1
if bestThis == -1:
# if while loop ends without finding anything, then there's nothing else to
# do
done = True
else:
# We found a best score. Mark this segment and its match as used, add pair to
# finalMatches, and pop queue until empty?
usedThis.add(bestThis)
usedOther.add(bestOther)
finalMatches[bestThis] = (bestOther, bestScore)
matchQueues[bestThis].clear()
return finalMatches
if __name__ == '__main__':
name = "PioneerPicsSummer08/foo{:0>4d}.jpeg"
logger = OutputLogger.OutputLogger(True, True)
while True:
nextStr = raw_input("Enter number of first picture: ")
if nextStr == 'q':
break
nextName = name.format(int(nextStr))
#print "Comparing", nextName
image = cv2.imread(nextName)
cv2.imshow("Original", image)
cv2.moveWindow("Original", 0, 0)
h1 = ImageSegmentation(image, logger)
#h1.displayFeaturePics("Original-Hough", 100, 300)
#nextStr = raw_input("Enter number of second picture: ")
#if nextStr == 'q':
#break
#name2 = name.format(int(nextStr))
##print " to", name2
#im2 = cv2.imread(name2)
#cv2.imshow("Other", im2)
#cv2.moveWindow("Other", 400, 0)
#h2 = HoughLines(im2, logger)
#h2.displayFeaturePics("Other-Hough", 400, 300)
#h1.evaluateSimilarity(h2)
cv2.waitKey(30)
cv2.destroyAllWindows()