-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathStitching.py
More file actions
118 lines (97 loc) · 4.02 KB
/
Stitching.py
File metadata and controls
118 lines (97 loc) · 4.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import cv2
import numpy as np
# np.random.seed(<int>) # you can use this line to set the fixed random seed if you are using np.random
import random
# random.seed(<int>) # you can use this line to set the fixed random seed if you are using random
def solution(left_img, right_img):
"""
:param left_img:
:param right_img:
:return: you need to return the result panorama image which is stitched by left_img and right_img
"""
key_points1, descriptor1, key_points2, descriptor2 = get_keypoint(left_img, right_img)
good_matches = match_keypoint(key_points1, key_points2, descriptor1, descriptor2)
final_H = ransac(good_matches)
rows1, cols1 = right_img.shape[:2]
rows2, cols2 = left_img.shape[:2]
points1 = np.float32([[0,0], [0,rows1], [cols1,rows1], [cols1,0]]).reshape(-1,1,2)
points = np.float32([[0,0], [0,rows2], [cols2,rows2], [cols2,0]]).reshape(-1,1,2)
points2 = cv2.perspectiveTransform(points, final_H)
list_of_points = np.concatenate((points1,points2), axis=0)
[x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5)
H_translation = (np.array([[1, 0, (-x_min)], [0, 1, (-y_min)], [0, 0, 1]])).dot(final_H)
output_img = cv2.warpPerspective(left_img, H_translation, (x_max-x_min, y_max-y_min))
output_img[(-y_min):rows1+(-y_min), (-x_min):cols1+(-x_min)] = right_img
result_img = output_img
return result_img
# TO DO: implement your solution here
# raise NotImplementedError
def get_keypoint(left_img, right_img):
l_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
r_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create()
sift = cv2.xfeatures2d.SIFT_create()
key_points1 = sift.detect(l_img, None)
key_points1, descriptor1 = surf.compute(l_img, key_points1)
key_points2 = sift.detect(r_img, None)
key_points2, descriptor2 = surf.compute(r_img, key_points2)
return key_points1, descriptor1, key_points2, descriptor2
def match_keypoint(key_points1, key_points2, descriptor1, descriptor2):
#k-Nearest neighbours between each descriptor
i = 0
k=2
all_matches = []
for d1 in descriptor1:
dist = []
j = 0
for d2 in descriptor2:
dist.append([i, j, np.linalg.norm(d1 - d2)])
j = j + 1
dist.sort(key=lambda x: x[2])
all_matches.append(dist[0:k])
i = i + 1
# Ratio test to get good matches
good_matches = []
for m, n in all_matches:
if m[2] < 0.75*n[2]:
left_pt = key_points1[m[0]].pt
right_pt = key_points2[m[1]].pt
good_matches.append(
[left_pt[0], left_pt[1], right_pt[0], right_pt[1]])
return good_matches
def homography(points):
A = []
for pt in points:
x, y = pt[0], pt[1]
X, Y = pt[2], pt[3]
A.append([x, y, 1, 0, 0, 0, -1 * X * x, -1 * X * y, -1 * X])
A.append([0, 0, 0, x, y, 1, -1 * Y * x, -1 * Y * y, -1 * Y])
A = np.array(A)
u, s, vh = np.linalg.svd(A)
H = (vh[-1, :].reshape(3, 3))
H = H/ H[2, 2]
return H
def ransac(good_pts):
best_inliers = []
final_H = []
t=5
for i in range(5000):
random_pts = random.choices(good_pts, k=4)
H = homography(random_pts)
inliers = []
for pt in good_pts:
p = np.array([pt[0], pt[1], 1]).reshape(3, 1)
p_1 = np.array([pt[2], pt[3], 1]).reshape(3, 1)
Hp = np.dot(H, p)
Hp = Hp / Hp[2]
dist = np.linalg.norm(p_1 - Hp)
if dist < t: inliers.append(pt)
if len(inliers) > len(best_inliers):
best_inliers,final_H = inliers,H
return final_H
if __name__ == "__main__":
left_img = cv2.imread('left.jpg')
right_img = cv2.imread('right.jpg')
result_img = solution(left_img, right_img)
cv2.imwrite('results/task1_result.jpg', result_img)