-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathstich_it.py
166 lines (125 loc) · 5.46 KB
/
stich_it.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import cv2
import numpy as np
import matplotlib.pyplot as plt
import imageio
import imutils
cv2.ocl.setUseOpenCL(False)
feature_extractor = 'orb' # one of 'sift', 'surf', 'brisk', 'orb'
feature_matching = 'knn'
# read images and transform them to grayscale
img2 =cv2.imread('11.jpg')
#For online purposes
#img2 = imageio.imread('https://github.com/Starks-AI/Image-Stitching/blob/main/images/11.jpg')
img2_gray = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
img1 = cv2.imread('12.jpg')
#img1 = imageio.imread('https://github.com/Starks-AI/Image-Stitching/blob/main/images/12.jpg')
# Opencv defines the color channel in the order BGR so we Transformed it to RGB to be compatible to matplotlib
img1_gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
#Defining figures and sections.
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, constrained_layout=False, figsize=(16,9))
ax1.imshow(img2, cmap="gray")
ax1.set_xlabel("Image 1", fontsize=14)
ax2.imshow(img1, cmap="gray")
ax2.set_xlabel("Image 2", fontsize=14)
plt.show()
def detectAndDescribe(image, method):
# detect and extract features from the image
if method == 'sift':
descriptor = cv2.xfeatures2d.SIFT_create()
elif method == 'surf':
descriptor = cv2.xfeatures2d.SURF_create()
elif method == 'brisk':
descriptor = cv2.BRISK_create()
elif method == 'orb':
descriptor = cv2.ORB_create()
# get keypoints and descriptors
(kps, features) = descriptor.detectAndCompute(image, None)
return (kps, features)
kpsA, featuresA = detectAndDescribe(img2_gray, method=feature_extractor)
kpsB, featuresB = detectAndDescribe(img1_gray, method=feature_extractor)
# display the keypoints and features detected on both images
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20,8), constrained_layout=False)
ax1.imshow(cv2.drawKeypoints(img2_gray,kpsA,None,color=(0,255,0)))
ax1.set_xlabel("(a)", fontsize=14)
ax2.imshow(cv2.drawKeypoints(img1_gray,kpsB,None,color=(0,255,0)))
ax2.set_xlabel("(b)", fontsize=14)
plt.show()
def createMatcher(method,crossCheck):
if method == 'sift' or method == 'surf':
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
elif method == 'orb' or method == 'brisk':
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
return bf
def matchKeyPointsBF(featuresA, featuresB, method):
bf = createMatcher(method, crossCheck=True)
# Match descriptors.
best_matches = bf.match(featuresA,featuresB)
# Sort the features in order of distance.
# The points with small distance (more similarity) are ordered first in the vector
rawMatches = sorted(best_matches, key = lambda x:x.distance)
print("Raw matches (Brute force):", len(rawMatches))
return rawMatches
def matchKeyPointsKNN(featuresA, featuresB, ratio, method):
bf = createMatcher(method, crossCheck=False)
# compute the raw matches and initialize the list of actual matches
rawMatches = bf.knnMatch(featuresA, featuresB, 2)
print("Raw matches (knn):", len(rawMatches))
matches = []
# loop over the raw matches
for m,n in rawMatches:
# We ensure the distance is within a certain ratio of each other (i.e. Lowe's ratio test)
if m.distance < n.distance * ratio:
matches.append(m)
return matches
print("Using: {} feature matcher".format(feature_matching))
fig = plt.figure(figsize=(20,8))
if feature_matching == 'bf':
matches = matchKeyPointsBF(featuresA, featuresB, method=feature_extractor)
img3 = cv2.drawMatches(img2,kpsA,img1,kpsB,matches[:100],
None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
elif feature_matching == 'knn':
matches = matchKeyPointsKNN(featuresA, featuresB, ratio=0.75, method=feature_extractor)
img3 = cv2.drawMatches(img2,kpsA,img1,kpsB,np.random.choice(matches,100),
None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(img3)
plt.show()
def getHomography(kpsA, kpsB, featuresA, featuresB, matches, reprojThresh):
# convert the keypoints to numpy arrays
kpsA = np.float32([kp.pt for kp in kpsA])
kpsB = np.float32([kp.pt for kp in kpsB])
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[m.queryIdx] for m in matches])
ptsB = np.float32([kpsB[m.trainIdx] for m in matches])
# estimate the homography between the sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
return (matches, H, status)
else:
return None
M = getHomography(kpsA, kpsB, featuresA, featuresB, matches, reprojThresh=4)
if M is None:
print("Error!")
(matches, H, status) = M
print(H)
# Apply panorama correction
width = img2.shape[1] + img1.shape[1]
height = img2.shape[0] + img1.shape[0]
result = cv2.warpPerspective(img2, H, (width, height))
result[0:img1.shape[0], 0:img1.shape[1]] = img1
# transform the panorama image to grayscale and threshold it
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# Finds contours from the binary image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# get the maximum contour area
c = max(cnts, key=cv2.contourArea)
# get a bbox from the contour area
(x, y, w, h) = cv2.boundingRect(c)
# crop the image to the bbox coordinates
result = result[y:y + h, x:x + w]
# show the cropped image
plt.figure(figsize=(20,10))
plt.imshow(result)
plt.show()