Search code examples
opencvimage-processingshaderdistortionleap-motion

Un-Distort raw images received from the Leap motion cameras


I've been working with the leap for a long time now. 2.1.+ SDK version allows us to access the cameras and get raw images. I want to use those images with OpenCV for square/circle detection and stuff... the problem is i can't get those images undistorted. i read the docs, but don't quite get what they mean. here's one thing i need to understand properly before going forward

        distortion_data_ = image.distortion();
        for (int d = 0; d < image.distortionWidth() * image.distortionHeight(); d += 2)
        {
            float dX = distortion_data_[d];
            float dY = distortion_data_[d + 1];
            if(!((dX < 0) || (dX > 1)) && !((dY < 0) || (dY > 1)))
            {
               //what do i do now to undistort the image?
            }
        }
        data = image.data();
        mat.put(0, 0, data);
        //Imgproc.Canny(mat, mat, 100, 200);
        //mat = findSquare(mat);
        ok.showImage(mat);    

in the docs it says something like this " The calibration map can be used to correct image distortion due to lens curvature and other imperfections. The map is a 64x64 grid of points. Each point consists of two 32-bit values....(the rest on the dev website)"

can someone explain this in detail please, OR OR, just post the java code to undistort the images give me an output MAT image so i may continue processing that (i'd still prefer a good explanation if possible)


Solution

  • Here's an example on how to do it without using OpenCV. The following seems to be faster than using the Leap::Image::warp() method (probably due to the additional function call overhead when using warp()):

    float destinationWidth = 320;
    float destinationHeight = 120;
    unsigned char destination[(int)destinationWidth][(int)destinationHeight];
    
    //define needed variables outside the inner loop
    float calX, calY, weightX, weightY, dX1, dX2, dX3, dX4, dY1, dY2, dY3, dY4, dX, dY;
    int x1, x2, y1, y2, denormalizedX, denormalizedY;
    int x, y;
    
    const unsigned char* raw = image.data();
    const float* distortion_buffer = image.distortion();
    
    //Local variables for values needed in loop
    const int distortionWidth = image.distortionWidth();
    const int width = image.width();
    const int height = image.height();
    
    for (x = 0; x < destinationWidth; x++) {
        for (y = 0; y < destinationHeight; y++) {
            //Calculate the position in the calibration map (still with a fractional part)
            calX = 63 * x/destinationWidth;
            calY = 63 * y/destinationHeight;
            //Save the fractional part to use as the weight for interpolation
            weightX = calX - truncf(calX);
            weightY = calY - truncf(calY);
    
            //Get the x,y coordinates of the closest calibration map points to the target pixel
            x1 = calX; //Note truncation to int
            y1 = calY;
            x2 = x1 + 1;
            y2 = y1 + 1;
    
            //Look up the x and y values for the 4 calibration map points around the target
            // (x1, y1)  ..  .. .. (x2, y1)
            //    ..                 ..
            //    ..    (x, y)       ..
            //    ..                 ..
            // (x1, y2)  ..  .. .. (x2, y2)
            dX1 = distortion_buffer[x1 * 2 + y1 * distortionWidth];
            dX2 = distortion_buffer[x2 * 2 + y1 * distortionWidth];
            dX3 = distortion_buffer[x1 * 2 + y2 * distortionWidth];
            dX4 = distortion_buffer[x2 * 2 + y2 * distortionWidth];
            dY1 = distortion_buffer[x1 * 2 + y1 * distortionWidth + 1];
            dY2 = distortion_buffer[x2 * 2 + y1 * distortionWidth + 1];
            dY3 = distortion_buffer[x1 * 2 + y2 * distortionWidth + 1];
            dY4 = distortion_buffer[x2 * 2 + y2 * distortionWidth + 1];
    
            //Bilinear interpolation of the looked-up values:
            // X value
            dX = dX1 * (1 - weightX) * (1- weightY) + dX2 * weightX * (1 - weightY) + dX3 * (1 - weightX) * weightY + dX4 * weightX * weightY;
    
            // Y value
            dY = dY1 * (1 - weightX) * (1- weightY) + dY2 * weightX * (1 - weightY) + dY3 * (1 - weightX) * weightY + dY4 * weightX * weightY;
    
            // Reject points outside the range [0..1]
            if((dX >= 0) && (dX <= 1) && (dY >= 0) && (dY <= 1)) {
                //Denormalize from [0..1] to [0..width] or [0..height]
                denormalizedX = dX * width;
                denormalizedY = dY * height;
    
                //look up the brightness value for the target pixel
                destination[x][y] = raw[denormalizedX + denormalizedY * width];
            } else {
                destination[x][y] = -1;
            }
        }
    }