How can I take two images of an object from different angles and draw epipolar lines on one based on points from the other?
For example, I would like to be able to select a point on the left picture using a mouse, mark the point with a circle, and then draw an epipolar line on the right image corresponding to the marked point.
I have 2 XML files which contain a 3x3 camera matrix and a list of 3x4 projection matrices for each picture. The camera matrix is K
. The projection matrix for the left picture is P_left
. The projection matrix for the right picture is P_right
.
I have tried this approach:
Choose a pixel coordinate (x,y) in the left image (via mouse click)
Calculate a point p
in the left image with K^-1 * (x,y,1)
Calulate the pseudo inverse matrix P+
of P_left
(using np.linalg.pinv
)
Calculate the epipole e'
of the right image: P_right * (0,0,0,1)
Calculate the skew symmetric matrix e'_skew
of e'
Calculate the Fundamental matrix F
: e'_skew * P_right * P+
Calculate the epipolar line l'
on the right image: F * p
Calculate a point p'
in the right image: P_right * P+ * p
Transform p'
and l
back to pixel coordinates
Draw a line using cv2.line
through p'
and l
I just did this a few days ago and it works just fine. Here's the method I used:
getCorners
and calibrateCamera
, you can find lots of tutorials on this, but it sounds like you already have this info)stereoCalibrate()
. It takes as parameters all of the camera and distortion matricies. You need this to determine the correlation between the two visual fields. You will get back several matricies, the rotation matrix R, translation vector T, essential matrix E and fundamental matrix F. getOptimalNewCameraMatrix
and undistort()
. This will get rid of a lot of camera aberrations (it will give you better results) computeCorrespondEpilines
to calculate the lines and plot them. I will include some code below you can try out in Python. When I run it, I can get images like this (The colored points have their corresponding epilines drawn in the other image)Heres some code (Python 3.0). It uses two static images and static points, but you could easily select the points with the cursor. You can also refer to the OpenCV docs on calibration and stereo calibration here.
import cv2
import numpy as np
# find object corners from chessboard pattern and create a correlation with image corners
def getCorners(images, chessboard_size, show=True):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((chessboard_size[1] * chessboard_size[0], 3), np.float32)
objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)*3.88 # multiply by 3.88 for large chessboard squares
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
for image in images:
frame = cv2.imread(image)
# height, width, channels = frame.shape # get image parameters
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, chessboard_size, None) # Find the chess board corners
if ret: # if corners were found
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) # refine corners
imgpoints.append(corners2) # add to corner array
if show:
# Draw and display the corners
frame = cv2.drawChessboardCorners(frame, chessboard_size, corners2, ret)
cv2.imshow('frame', frame)
cv2.waitKey(100)
cv2.destroyAllWindows() # close open windows
return objpoints, imgpoints, gray.shape[::-1]
# perform undistortion on provided image
def undistort(image, mtx, dist):
img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
image = os.path.splitext(image)[0]
h, w = img.shape[:2]
newcameramtx, _ = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
return dst
# draw the provided points on the image
def drawPoints(img, pts, colors):
for pt, color in zip(pts, colors):
cv2.circle(img, tuple(pt[0]), 5, color, -1)
# draw the provided lines on the image
def drawLines(img, lines, colors):
_, c, _ = img.shape
for r, color in zip(lines, colors):
x0, y0 = map(int, [0, -r[2]/r[1]])
x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1]])
cv2.line(img, (x0, y0), (x1, y1), color, 1)
if __name__ == '__main__':
# undistort our chosen images using the left and right camera and distortion matricies
imgL = undistort("2L/2L34.bmp", mtxL, distL)
imgR = undistort("2R/2R34.bmp", mtxR, distR)
imgL = cv2.cvtColor(imgL, cv2.COLOR_GRAY2BGR)
imgR = cv2.cvtColor(imgR, cv2.COLOR_GRAY2BGR)
# use get corners to get the new image locations of the checcboard corners (undistort will have moved them a little)
_, imgpointsL, _ = getCorners(["2L34_undistorted.bmp"], chessboard_size, show=False)
_, imgpointsR, _ = getCorners(["2R34_undistorted.bmp"], chessboard_size, show=False)
# get 3 image points of interest from each image and draw them
ptsL = np.asarray([imgpointsL[0][0], imgpointsL[0][10], imgpointsL[0][20]])
ptsR = np.asarray([imgpointsR[0][5], imgpointsR[0][15], imgpointsR[0][25]])
drawPoints(imgL, ptsL, colors[3:6])
drawPoints(imgR, ptsR, colors[0:3])
# find epilines corresponding to points in right image and draw them on the left image
epilinesR = cv2.computeCorrespondEpilines(ptsR.reshape(-1, 1, 2), 2, F)
epilinesR = epilinesR.reshape(-1, 3)
drawLines(imgL, epilinesR, colors[0:3])
# find epilines corresponding to points in left image and draw them on the right image
epilinesL = cv2.computeCorrespondEpilines(ptsL.reshape(-1, 1, 2), 1, F)
epilinesL = epilinesL.reshape(-1, 3)
drawLines(imgR, epilinesL, colors[3:6])
# combine the corresponding images into one and display them
combineSideBySide(imgL, imgR, "epipolar_lines", save=True)
Hopefully this helps!