Search code examples
pythonopencvimage-processingcomputer-visionhomography

remove black dashed lines from image stitching


I am stitching multiple images. While stitching two images it is showing dashed black line in between stitching like below. enter image description here

Has anyone knows about this how I can remove or get rid of this black dashed line ?

main part of stitching code which stitches two images and calls next image with result of previous stitched images untill all images gets over:

detector = cv2.xfeatures2d.SURF_create(400)
gray1 = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
ret1, mask1 = cv2.threshold(gray1,1,255,cv2.THRESH_BINARY)
kp1, descriptors1 = detector.detectAndCompute(gray1,mask1)

gray2 = cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
ret2, mask2 = cv2.threshold(gray2,1,255,cv2.THRESH_BINARY)
kp2, descriptors2 = detector.detectAndCompute(gray2,mask2)

keypoints1Im = cv2.drawKeypoints(image1, kp1, outImage = cv2.DRAW_MATCHES_FLAGS_DEFAULT, color=(0,0,255))
util.display("KEYPOINTS",keypoints1Im)
keypoints2Im = cv2.drawKeypoints(image2, kp2, outImage = cv2.DRAW_MATCHES_FLAGS_DEFAULT, color=(0,0,255))
util.display("KEYPOINTS",keypoints2Im)

matcher = cv2.BFMatcher()
matches = matcher.knnMatch(descriptors2,descriptors1, k=2)

good = []
for m, n in matches:
    if m.distance < 0.55 * n.distance:
        good.append(m)

print (str(len(good)) + " Matches were Found")

if len(good) <= 10:
    return image1

matches = copy.copy(good)

matchDrawing = util.drawMatches(gray2,kp2,gray1,kp1,matches)
util.display("matches",matchDrawing)

src_pts = np.float32([ kp2[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
dst_pts = np.float32([ kp1[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)

A = cv2.estimateRigidTransform(src_pts,dst_pts,fullAffine=False)

if A is None:
    HomogResult = cv2.findHomography(src_pts,dst_pts,method=cv2.RANSAC)
    H = HomogResult[0]

height1,width1 = image1.shape[:2]
height2,width2 = image2.shape[:2]

corners1 = np.float32(([0,0],[0,height1],[width1,height1],[width1,0]))
corners2 = np.float32(([0,0],[0,height2],[width2,height2],[width2,0]))

warpedCorners2 = np.zeros((4,2))

for i in range(0,4):
    cornerX = corners2[i,0]
    cornerY = corners2[i,1]
    if A is not None: #check if we're working with affine transform or perspective transform
        warpedCorners2[i,0] = A[0,0]*cornerX + A[0,1]*cornerY + A[0,2]
        warpedCorners2[i,1] = A[1,0]*cornerX + A[1,1]*cornerY + A[1,2]
    else:
        warpedCorners2[i,0] = (H[0,0]*cornerX + H[0,1]*cornerY + H[0,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
        warpedCorners2[i,1] = (H[1,0]*cornerX + H[1,1]*cornerY + H[1,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])

allCorners = np.concatenate((corners1, warpedCorners2), axis=0)

[xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
[xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)

translation = np.float32(([1,0,-1*xMin],[0,1,-1*yMin],[0,0,1]))
warpedResImg = cv2.warpPerspective(image1, translation, (xMax-xMin, yMax-yMin))


if A is None:
    fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
    warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))

else:
    warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
    warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))

result = np.where(warpedImage2 != 0, warpedImage2, warpedResImg)

Please help me out. Thanks.

Edit:

Input image1(resized)

enter image description here

Input image2(resized)

enter image description here

Result(resized)

enter image description here

Update:

Result after @fmw42 anwser:

enter image description here


Solution

  • The problem arises because when you do the warping, the border pixels of the image get resampled/interpolated with black background pixels. This leaves a non-zero border around your warped image of varying values that show as your dashed dark line when merged with the other image. This happens because your merge test is binary, tested with != 0.

    So one simple thing you can do is mask the warped image in Python/OpenCV to get its bounds from the black background outside the image and then erode the mask. Then use the mask to erode the image boundary. This can be achieve by the following changes to your last lines of code presented as follows:

    if A is None:
        fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
        warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
    
    else:
        warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
        warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
        mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
        mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
        warpedImage2[mask2==0] = 0
    
    result = np.where(warpedImage2 != 0, warpedImage2, warpedResImg)
    

    I simply added the following code lines to your code:

    mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
    mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
    warpedImage2[mask2==0] = 0
    

    You can increase the kernel size if desired to erode more.

    Here is the before and after. Note that I did not have SURF and tried to use ORB, which did not align well. So your roads do not align. But the mismatch due to misalignment emphasizes the issue as it shows the dashed jagged black border line. The fact that ORB did not work or I do not have proper code from above to make it align is not important. The masking does what I think you want and is extendable to the processing of all your images.

    enter image description here

    The other thing that can be done in combination with the above is to feather the mask and then ramp blend the two images using the mask. This is done by blurring the mask (a bit more) and then stretching the values over the inside half of the blurred border and making the ramp only on the outside half of the blurred border. Then blend the two images with the ramped mask and its inverse as follows for the same code as above.

        if A is None:
            fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
            warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
        
        else:
            warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
            warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
            mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
            mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
            warpedImage2[mask2==0] = 0
            mask2 = cv2.blur(mask2, (5,5))
            mask2 = skimage.exposure.rescale_intensity(mask2, in_range=(127.5,255), out_range=(0,255)).astype(np.float64)
        
        result = (warpedImage2 * mask2 +  warpedResImg * (255 - mask2))/255
        result = result.clip(0,255).astype(np.uint8)
    
    cv2.imwrite("image1_image2_merged3.png", result)
    

    The result when compared to the original composite is as follows:

    enter image description here

    ADDITION

    I have corrected my ORB code to reverse the use of images and now it aligns. So here are all 3 techniques: the original, the one that only uses a binary mask and the one that uses a ramped mask for blending (all as described above).

    enter image description here

    ADDITION2

    Here are the 3 requested images: original, binary masked, ramped mask blending.

    enter image description here

    enter image description here

    enter image description here

    Here is my ORB code for the last version above

    I tried to change as little as possible from your code, except I had to use ORB and I had to swap the names image1 and image2 near the end.

    import cv2
    import matplotlib.pyplot as plt
    import numpy as np
    import itertools
    from scipy.interpolate import UnivariateSpline
    from skimage.exposure import rescale_intensity
    
    
    image1 = cv2.imread("image1.jpg")
    image2 = cv2.imread("image2.jpg")
    
    gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    
    # Detect ORB features and compute descriptors.
    MAX_FEATURES = 500
    GOOD_MATCH_PERCENT = 0.15
    orb = cv2.ORB_create(MAX_FEATURES)
    
    keypoints1, descriptors1 = orb.detectAndCompute(gray1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(gray2, None)
    
    # Match features.
    matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)
    
    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)
    
    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]
    
    # Draw top matches
    imMatches = cv2.drawMatches(image1, keypoints1, image2, keypoints2, matches, None)
    cv2.imwrite("/Users/fred/desktop/image1_image2_matches.png", imMatches)
    
    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)
    
    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt
    
    print(points1)
    print("")
    print(points2)
    
    A = cv2.estimateRigidTransform(points1,points2,fullAffine=False)
    #print(A)
    
    if A is None:
        HomogResult = cv2.findHomography(points1,points2,method=cv2.RANSAC)
        H = HomogResult[0]
    
    height1,width1 = image1.shape[:2]
    height2,width2 = image2.shape[:2]
    
    corners1 = np.float32(([0,0],[0,height1],[width1,height1],[width1,0]))
    corners2 = np.float32(([0,0],[0,height2],[width2,height2],[width2,0]))
    
    warpedCorners2 = np.zeros((4,2))
    
    # project corners2 into domain of image1 from A affine or H homography
    for i in range(0,4):
        cornerX = corners2[i,0]
        cornerY = corners2[i,1]
        if A is not None: #check if we're working with affine transform or perspective transform
            warpedCorners2[i,0] = A[0,0]*cornerX + A[0,1]*cornerY + A[0,2]
            warpedCorners2[i,1] = A[1,0]*cornerX + A[1,1]*cornerY + A[1,2]
        else:
            warpedCorners2[i,0] = (H[0,0]*cornerX + H[0,1]*cornerY + H[0,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
            warpedCorners2[i,1] = (H[1,0]*cornerX + H[1,1]*cornerY + H[1,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
    
    allCorners = np.concatenate((corners1, warpedCorners2), axis=0)
    
    [xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
    [xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)
    
    translation = np.float32(([1,0,-1*xMin],[0,1,-1*yMin],[0,0,1]))
    warpedResImg = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
    
    
    if A is None:
        fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
        warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
    
    else:
        warpedImageTemp = cv2.warpPerspective(image1, translation, (xMax-xMin, yMax-yMin))
        warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
        mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
        mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
        warpedImage2[mask2==0] = 0
        mask2 = cv2.blur(mask2, (5,5))
        mask2 = rescale_intensity(mask2, in_range=(127.5,255), out_range=(0,255)).astype(np.float64)
    
    result = (warpedImage2 * mask2 +  warpedResImg * (255 - mask2))/255
    result = result.clip(0,255).astype(np.uint8)
    
    cv2.imwrite("image1_image2_merged2.png", result)
    

    You had the following. Note where the names, image1 and image2 are being used compared to my code above.

    warpedResImg = cv2.warpPerspective(image1, translation, (xMax-xMin, yMax-yMin))
    
    
    if A is None:
        fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
        warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
    
    else:
        warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
        warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))