Currently, I am planning on doing an app utilizing RAW data provided by the camera in Android devices (pixel 2).
And I use ARCore to get frame data as following:
final Frame frame = session.update();
final Camera camera = frame.getCamera();
Image image = frame.acquireCameraImage();
It seems not a raw data I want.
How can I get raw image data by ARCore? Or is there other way to do that?
I have get the bitmap before. But it is not raw image. What I mean the "raw" is the original data (RGB RAW DATA) form CMOS without any process, like Gamma correction, AWB, AE, or any pose process.
Convert android Image object to byte array
Image image = null;
try {
image = frame.acquireCameraImage();
} catch (NotYetAvailableException e) {
e.printStackTrace();
}
int h = image.getHeight();
int w = image.getWidth();
Bitmap outputBitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
int pixelSizeBits = ImageFormat.getBitsPerPixel(ImageFormat.YUV_420_888);
byte[] yuvByteArray = new byte[(int)((h * w) * pixelSizeBits / 8)];
YuvToByteArray yuvToByteArray = new YuvToByteArray();
yuvToByteArray.setPixelCount(h * w);
yuvToByteArray.imageToByteArray(image, yuvByteArray);
image.close();
YuvToByteArray.kt
/*
* Copyright 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.memex.Helper
import android.graphics.ImageFormat
import android.graphics.Rect
import android.media.Image
class YuvToByteArray {
var pixelCount: Int = -1
fun imageToByteArray(image: Image, outputBuffer: ByteArray) {
assert(image.format == ImageFormat.YUV_420_888)
val imageCrop = Rect(0, 0, image.width, image.height)
val imagePlanes = image.planes
imagePlanes.forEachIndexed { planeIndex, plane ->
// How many values are read in input for each output value written
// Only the Y plane has a value for every pixel, U and V have half the resolution i.e.
//
// Y Plane U Plane V Plane
// =============== ======= =======
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
val outputStride: Int
// The index in the output buffer the next value will be written at
// For Y it's zero, for U and V we start at the end of Y and interleave them i.e.
//
// First chunk Second chunk
// =============== ===============
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
var outputOffset: Int
when (planeIndex) {
0 -> {
outputStride = 1
outputOffset = 0
}
1 -> {
outputStride = 2
// For NV21 format, U is in odd-numbered indices
outputOffset = pixelCount + 1
}
2 -> {
outputStride = 2
// For NV21 format, V is in even-numbered indices
outputOffset = pixelCount
}
else -> {
// Image contains more than 3 planes, something strange is going on
return@forEachIndexed
}
}
val planeBuffer = plane.buffer
val rowStride = plane.rowStride
val pixelStride = plane.pixelStride
// We have to divide the width and height by two if it's not the Y plane
val planeCrop = if (planeIndex == 0) {
imageCrop
} else {
Rect(
imageCrop.left / 2,
imageCrop.top / 2,
imageCrop.right / 2,
imageCrop.bottom / 2
)
}
val planeWidth = planeCrop.width()
val planeHeight = planeCrop.height()
// Intermediate buffer used to store the bytes of each row
val rowBuffer = ByteArray(plane.rowStride)
// Size of each row in bytes
val rowLength = if (pixelStride == 1 && outputStride == 1) {
planeWidth
} else {
// Take into account that the stride may include data from pixels other than this
// particular plane and row, and that could be between pixels and not after every
// pixel:
//
// |---- Pixel stride ----| Row ends here --> |
// | Pixel 1 | Other Data | Pixel 2 | Other Data | ... | Pixel N |
//
// We need to get (N-1) * (pixel stride bytes) per row + 1 byte for the last pixel
(planeWidth - 1) * pixelStride + 1
}
for (row in 0 until planeHeight) {
// Move buffer position to the beginning of this row
planeBuffer.position(
(row + planeCrop.top) * rowStride + planeCrop.left * pixelStride
)
if (pixelStride == 1 && outputStride == 1) {
// When there is a single stride value for pixel and output, we can just copy
// the entire row in a single step
planeBuffer.get(outputBuffer, outputOffset, rowLength)
outputOffset += rowLength
} else {
// When either pixel or output have a stride > 1 we must copy pixel by pixel
planeBuffer.get(rowBuffer, 0, rowLength)
for (col in 0 until planeWidth) {
outputBuffer[outputOffset] = rowBuffer[col * pixelStride]
outputOffset += outputStride
}
}
}
}
}
}
function to get ARGB bitmap from yuv byte array:
public Bitmap createImageBitmap(byte[] bytes, Bitmap outputBitmap,
RenderScript renderScript,
ScriptIntrinsicYuvToRGB intrinsicYuvToRGB, Type typeYUV) {
Allocation inputAllocation = Allocation.createSized(renderScript,
typeYUV.getElement(), bytes.length);
Allocation outputAllocation = Allocation.createFromBitmap(renderScript, outputBitmap);
inputAllocation.copyFrom(bytes);
intrinsicYuvToRGB.setInput(inputAllocation);
intrinsicYuvToRGB.forEach(outputAllocation);
outputAllocation.copyTo(outputBitmap);
return outputBitmap;
}
How to use this:
// yuvByteArray and outputBitmap from the first code segment
// renderScript, intrinsicYuvToRGB, and typeYUV are provided here: https://github.com/android/renderscript-intrinsics-replacement-toolkit
Bitmap rgbBitmap = createImageBitmap(yuvByteArray, outputBitmap, renderScript, intrinsicYuvToRGB, typeYUV);