Search code examples
androidimage-processingandroid-camerawebrtcyuv

How to overwrite onPreviewFrame data with static YUV frame?


My application overrides the onPreviewFrame callback to pass the current camera frame to a webrtc native function. This works perfectly, however I want to be able to switch to sending a static frame instead of video, if that option has been selected in my app.

So far I have created a YUV NV21 image, which I am storing in the assets dir. All attempts to pass that frame down to the native function have resulted in purple/green stripes rather than the actual image.

This is what I have so far;

@Override
public void onPreviewFrame(byte[] data, Camera camera) {
    previewBufferLock.lock();

    if (mFrameProvider.isEnabled()) {
         mFrameProvider.overwriteWithFrame(data, expectedFrameSize);
    }

    if (isCaptureRunning) {
        if (data.length == expectedFrameSize) {
             ProvideCameraFrame(data, expectedFrameSize, context);
             cameraUtils.addCallbackBuffer(camera, data);
        }
    }
    previewBufferLock.unlock();
}


@Override
public byte[] overwriteWithPreviewFrame(byte[] data, int expectedFrameSize) {
   if (mFrameData == null) {
       loadPreviewFrame();
   }

   for (int i=0; i < expectedFrameSize; i++) {
        if (i < mFrameData.length) {
        data[i] = mFrameData[i];
        }
   }

   return data;
}

And

private void loadPreviewFrame() {
    try {
        InputStream open = mContext.getResources().getAssets().open(PREVIEW_FRAME_FILE);

        mFrameData = IOUtils.toByteArray(open);
        open.close();
    } catch (Exception e) {
        Log.e("", "", e);
    }
}

I have tried converting the image to a bitmap too. So the question is how can I open a YUV frame from assets and convert it into a suitable format to pass to the native methods.

Results in the following output;

enter image description here


Solution

  • Right after a long fight with Android API I have managed to get this working.

    There were two issues that caused the green/purple output;

    Loss of data: the generated YUV frame was larger than the original preview frame at the same resolution, so the data being passed down to the native code was missing around 30% of its image data.

    Wrong resolution: the native code required the resolution of the preview frame and not the camera.

    Below is a working solution for anyone who wishes to add a static frame;

    so updated code:

    @Override
    public byte[] getPreviewFrameData(int width, int height) {
        if (mPreviewFrameData == null) {
            loadPreviewFrame(width, height);
        }
    
        return mPreviewFrameData;
    }
    
    private void loadPreviewFrame(int width, int height) {
        try {
            Bitmap previewImage = BitmapFactory.decodeResource(mContext.getResources(), R.drawable.frame);
            Bitmap resizedPreviewImage = Bitmap.createScaledBitmap(previewImage, width, height, false);
    
            BitmapConverter bitmapConverter = new BitmapConverter();
            mPreviewFrameData = bitmapConverter.convertToNV21(resizedPreviewImage);
        } catch (Exception e) {
            Log.e("DisabledCameraFrameProvider", "Failed to loadPreviewFrame");
        }
    }
    
    class BitmapConverter {
        byte [] convertToNV21(Bitmap bitmap) {
            int inputWidth = bitmap.getWidth();
            int inputHeight = bitmap.getHeight();
    
            int [] argb = new int[inputWidth * inputHeight];
    
            bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
    
            byte [] yuv = new byte[inputWidth*inputHeight*3/2];
            encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
    
            bitmap.recycle();
    
            return yuv;
        }
    
        void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
            final int frameSize = width * height;
    
            int yIndex = 0;
            int uvIndex = frameSize;
    
            int R, G, B, Y, U, V;
            int index = 0;
            for (int j = 0; j < height; j++) {
                for (int i = 0; i < width; i++) {
                    R = (argb[index] & 0xff0000) >> 16;
                    G = (argb[index] & 0xff00) >> 8;
                    B = (argb[index] & 0xff);
    
                    Y = ( (  66 * R + 129 * G +  25 * B + 128) >> 8) +  16;
                    U = ( ( -38 * R -  74 * G + 112 * B + 128) >> 8) + 128;
                    V = ( ( 112 * R -  94 * G -  18 * B + 128) >> 8) + 128;
    
                    yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
                    if (j % 2 == 0 && index % 2 == 0) {
                        yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
                        yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
                    }
    
                    index ++;
                }
            }
        }
    }
    

    Then finally in your callback;

    public void onPreviewFrame(byte[] data, Camera camera) {
    
         byte[] bytes = data;
    
         if (!mProvider.isVideoEnabled()) {
             Camera.Size previewSize = camera.getParameters().getPreviewSize();
             bytes = mProvider.getPreviewFrameData(previewSize.width, previewSize.height);
         }
    
         ProvideCameraFrame(bytes, bytes.length, context);
    }
    

    The key was to scale the image to the camera preview size and convert the image to YUV colour space.