Search code examples
pythonandroidopencvimage-thresholdingadaptive-threshold

OpenCV output of Adaptive Threshold


I am a newbie in android, and open CV both. However, I am trying to take an image from the camera, convert it into the desired format, and pass it to the tflite model.

Code for capturing image, and applying image processing to it.

    public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
        float mh = mRGBA.height();
        float cw = (float) Resources.getSystem().getDisplayMetrics().widthPixels;
        float scale = mh / cw * 0.7f;

        mRGBA = inputFrame.rgba();
        frame = classifier.processMat(mRGBA);

        Mat temp = new Mat();
        Mat temp3= new Mat();

        if (!isDebug) {
            if (counter == CLASSIFY_INTERVAL) {
                Imgproc.cvtColor(frame, frame, Imgproc.COLOR_RGBA2GRAY);
                Core.rotate(frame, frame, Core.ROTATE_90_CLOCKWISE);
                Imgproc.GaussianBlur(frame, frame, new Size(5, 5), 0);

                Imgproc.adaptiveThreshold(frame, frame, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV , 3, 2);

                Bitmap bmsp = null;

                runInterpreter();
                counter = 0;
            } else {
                counter++;
            }
        }

        Imgproc.rectangle(mRGBA,
                new Point(mRGBA.cols() / 2f - (mRGBA.cols() * scale / 2),
                        mRGBA.rows() / 2f - (mRGBA.cols() * scale / 2)),
                new Point(mRGBA.cols() / 2f + (mRGBA.cols() * scale / 2),
                        mRGBA.rows() / 2f + (mRGBA.cols() * scale / 2)),
                new Scalar(0, 255, 0), 1);
        if (isEdge) {
            mRGBA = classifier.debugMat(mRGBA);
        }

        System.gc();
        return mRGBA;
    }

My output looks like this image, but I want the hand to be filled with white color before passing it to model. Can somebody suggest?

enter image description here

original image


Solution

  • The main issue is that the result of adaptiveThreshold has gaps in the external edge, so you can't use it as input to findContours.

    I think that using GaussianBlur makes things worst, because it blurs the edge between the hand and the background.

    You may use the following stages:

    • Convert frame to Grayscale.
    • Apply adaptiveThreshold with large kernel size (I used size 51).
      Using a large kernel size, keeps a thick edge line without gaps (except from a small gap at the fingernail).
    • Find contours.
      Find the contour with the maximum area.
    • Draw the contour (fill with solid value of 255) on a zeros image.
      There is a problem: the inner part of the hand is not filled due to the weird shape of the contour.
    • For complete the filling:
      Find the center of the contour, and fill it using floodFill.

    Here is a Python code sample:

    import numpy as np
    import cv2
    
    frame = cv2.imread("hand.jpg")  # Read image from file (for testing).
    
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Use BGR to Gray conversion (not RGBA, because image is read from file)
    
    # Apply adaptiveThreshold with large filter size.
    thres_gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 51, 2)
    
    # Find contours (external contours)
    cnts, hier = cv2.findContours(thres_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    
    # Find contour with the maximum area
    c = max(cnts, key=cv2.contourArea)
    
    res = np.zeros_like(gray)  # Create new zeros images for storing the result.
    
    # Fill the contour with white color - draw the filled contour on res image.
    cv2.drawContours(res, [c], -1, 255, -1)
    
    # Compute the center of the contour
    # https://www.pyimagesearch.com/2016/02/01/opencv-center-of-contour/
    M = cv2.moments(c)
    cX = int(M["m10"] / M["m00"])
    cY = int(M["m01"] / M["m00"])
    
    # Use floodFill for filling the center of the contour
    cv2.floodFill(res, None, (cX, cY), 255)
    
    # Show images for testing
    cv2.imshow('thres_gray', thres_gray)
    cv2.imshow('res', res)
    cv2.waitKey()
    cv2.destroyAllWindows()
    

    Results:

    thres_gray:
    enter image description here

    res before floodFill:
    enter image description here

    res after floodFill:
    enter image description here


    JAVA implementation:

    package myproject;
    
    import org.opencv.core.Core;
    import org.opencv.core.Mat;
    import org.opencv.core.CvType;
    import org.opencv.core.Scalar;
    import org.opencv.core.Point;
    import org.opencv.core.MatOfPoint;
    import org.opencv.imgproc.Imgproc;
    import org.opencv.imgproc.Moments;
    import org.opencv.imgcodecs.Imgcodecs;
    import java.util.List;
    import java.util.ArrayList;
    
    class Sample {
        
    
    static { System.loadLibrary(Core.NATIVE_LIBRARY_NAME); }
    
    
      
    public static void main(String[] args) {
        Mat frame = Imgcodecs.imread("hand.jpg");
        
        Mat gray = new Mat();
        Mat thres_gray = new Mat();
        
        Imgproc.cvtColor(frame, gray, Imgproc.COLOR_BGR2GRAY);
        
        //Apply adaptiveThreshold with large filter size.
        Imgproc.adaptiveThreshold(gray, thres_gray, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV, 51, 2);    
          
        List<MatOfPoint> contours = new ArrayList<>();
        Mat hierarchy = new Mat();
        
        //Find contours
        Imgproc.findContours(thres_gray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
    
        //Find contour with the maximum area
        //https://stackoverflow.com/questions/38759925/how-to-find-largest-contour-in-java-opencv
        double maxVal = 0;
        int maxValIdx = 0;
        for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++)
        {
            double contourArea = Imgproc.contourArea(contours.get(contourIdx));
            if (maxVal < contourArea)
            {
                maxVal = contourArea;
                maxValIdx = contourIdx;
            }
        }
           
        Mat res = Mat.zeros(gray.size(), CvType.CV_8UC1);    //Create new zeros images for storing the result.
        
        Imgproc.drawContours(res, contours, maxValIdx, new Scalar(255), -1);
        
        //Compute the center of the contour
        //https://www.pyimagesearch.com/2016/02/01/opencv-center-of-contour/
        Moments M = Imgproc.moments(contours.get(maxValIdx));
        int cX = (int)(M.get_m10() / M.get_m00());
        int cY = (int)(M.get_m01() / M.get_m00());    
    
        //Use floodFill for filling the center of the contour.    
        Mat mask = Mat.zeros(res.rows() + 2, res.cols() + 2, CvType.CV_8UC1);
        Imgproc.floodFill(res, mask, new Point(cX, cY), new Scalar(255));
           
        Imgcodecs.imwrite("res.png", res);
    }
    
    }