I have used RANSAC algorithm to find the homography and wrap perspective operation to apply it to an image. here is the code
MIN_MATCH_COUNT = 10
img1 = cv2.imread('bus1.jpg',0)
img2 = cv2.imread('bus2.jpg',0)
sift = cv2.SIFT()
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
h,w = img1.shape
result=cv2.warpPerspective(img2,M,(w,h))
cv2.imshow('result',result)
cv2.waitKey(0)
cv2.destroyAllWindows()
output is not showing the whole image .what is wrong? how to wrap the image?
You are computing the homography from img1 to img2 but you are applying it to img2 and not img1.
Change result = cv2.warpPerspective(img2, M, (w,h))
to result = cv2.warpPerspective(img1, M, (2 * w, h))
(the 2 * w is so that there is a bigger part of the warped image included in result)