NV21 -> ARGB -> NV21 转换的缩小问题
Down scale issue on NV21 -> ARGB -> NV21 conversion
我必须向识别解决方案提供一个 YUV(NV21) 字节数组,我想减少处理时间,缩小预览帧。
根据在 SO 上到处收集的解决方案,我设法以 1:1 比率进行转换,并且获得了认可。但是如果我想缩小中间位图,我就得不到结果。即使我只将它缩小到 95%。
如有任何帮助,我们将不胜感激。
因此,我每隔 400 毫秒就对预览帧进行异步转换。我使用 RenderScript 将其转换为 ARGB,将其缩小然后再转换回来。
// Camera callback
@Override
public void onPreviewFrame(byte[] frame, Camera camera) {
if (camera != null) {
// Debounce
if ((System.currentTimeMillis() - mStart) > 400) {
mStart = System.currentTimeMillis();
Camera.Size size = camera.getParameters().getPreviewSize();
new FrameScaleAsyncTask(frame, size.width, size.height).execute();
}
}
if (mCamera != null) {
mCamera.addCallbackBuffer(mBuffer);
}
}
// In FrameScaleAsyncTask
@Override
protected Void doInBackground(Void... params) {
// Create YUV type for in-allocation
Type yuvType = new Type.Builder(mRenderScript, Element.U8(mRenderScript))
.setX(mFrame.length)
.create();
mAllocationIn = Allocation.createTyped(mRenderScript, yuvType, Allocation.USAGE_SCRIPT);
// Create ARGB-8888 type for out-allocation
Type rgbType = new Type.Builder(mRenderScript, Element.RGBA_8888(mRenderScript))
.setX(mWidth)
.setY(mHeight)
.create();
mAllocationOut = Allocation.createTyped(mRenderScript, rgbType, Allocation.USAGE_SCRIPT);
// Copy frame data into in-allocation
mAllocationIn.copyFrom(mFrame);
// Set script input and fire !
mScript.setInput(mAllocationIn);
mScript.forEach(mAllocationOut);
// Create a bitmap of camera preview size (see camera setup) and copy out-allocation to it
Bitmap bitmap = Bitmap.createBitmap(mWidth, mHeight, Bitmap.Config.ARGB_8888);
mAllocationOut.copyTo(bitmap);
// Scale bitmap down
double scaleRatio = 1;
Bitmap scaledBitmap = Bitmap.createScaledBitmap(
bitmap,
(int) (bitmap.getWidth() * scaleRatio),
(int) (bitmap.getHeight() * scaleRatio),
false
);
bitmap.recycle();
int size = scaledBitmap.getRowBytes() * scaledBitmap.getHeight();
int scaledWidth = scaledBitmap.getWidth();
int scaledHeight = scaledBitmap.getHeight();
int[] pixels = new int[scaledWidth * scaledHeight];
// Put bitmap pixels into an int array
scaledBitmap.getPixels(pixels, 0, scaledWidth, 0, 0, scaledWidth, scaledHeight);
mFrame = new byte[pixels.length * 3 / 2];
ImageHelper.encodeYUV420SPAlt(mFrame, pixels, scaledWidth, scaledHeight);
return null;
}
RGB转YUV算法(见:this answer):
public static void encodeYUV420SPAlt(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
}
index++;
}
}
}
我最终直接在 C++ 中调整了图像大小(作为 OpenCV.Mat)。这更容易、更快捷。
Size size(correctedWidth, correctedHeight);
Mat dst;
resize(image, dst, size);
我必须向识别解决方案提供一个 YUV(NV21) 字节数组,我想减少处理时间,缩小预览帧。
根据在 SO 上到处收集的解决方案,我设法以 1:1 比率进行转换,并且获得了认可。但是如果我想缩小中间位图,我就得不到结果。即使我只将它缩小到 95%。
如有任何帮助,我们将不胜感激。
因此,我每隔 400 毫秒就对预览帧进行异步转换。我使用 RenderScript 将其转换为 ARGB,将其缩小然后再转换回来。
// Camera callback
@Override
public void onPreviewFrame(byte[] frame, Camera camera) {
if (camera != null) {
// Debounce
if ((System.currentTimeMillis() - mStart) > 400) {
mStart = System.currentTimeMillis();
Camera.Size size = camera.getParameters().getPreviewSize();
new FrameScaleAsyncTask(frame, size.width, size.height).execute();
}
}
if (mCamera != null) {
mCamera.addCallbackBuffer(mBuffer);
}
}
// In FrameScaleAsyncTask
@Override
protected Void doInBackground(Void... params) {
// Create YUV type for in-allocation
Type yuvType = new Type.Builder(mRenderScript, Element.U8(mRenderScript))
.setX(mFrame.length)
.create();
mAllocationIn = Allocation.createTyped(mRenderScript, yuvType, Allocation.USAGE_SCRIPT);
// Create ARGB-8888 type for out-allocation
Type rgbType = new Type.Builder(mRenderScript, Element.RGBA_8888(mRenderScript))
.setX(mWidth)
.setY(mHeight)
.create();
mAllocationOut = Allocation.createTyped(mRenderScript, rgbType, Allocation.USAGE_SCRIPT);
// Copy frame data into in-allocation
mAllocationIn.copyFrom(mFrame);
// Set script input and fire !
mScript.setInput(mAllocationIn);
mScript.forEach(mAllocationOut);
// Create a bitmap of camera preview size (see camera setup) and copy out-allocation to it
Bitmap bitmap = Bitmap.createBitmap(mWidth, mHeight, Bitmap.Config.ARGB_8888);
mAllocationOut.copyTo(bitmap);
// Scale bitmap down
double scaleRatio = 1;
Bitmap scaledBitmap = Bitmap.createScaledBitmap(
bitmap,
(int) (bitmap.getWidth() * scaleRatio),
(int) (bitmap.getHeight() * scaleRatio),
false
);
bitmap.recycle();
int size = scaledBitmap.getRowBytes() * scaledBitmap.getHeight();
int scaledWidth = scaledBitmap.getWidth();
int scaledHeight = scaledBitmap.getHeight();
int[] pixels = new int[scaledWidth * scaledHeight];
// Put bitmap pixels into an int array
scaledBitmap.getPixels(pixels, 0, scaledWidth, 0, 0, scaledWidth, scaledHeight);
mFrame = new byte[pixels.length * 3 / 2];
ImageHelper.encodeYUV420SPAlt(mFrame, pixels, scaledWidth, scaledHeight);
return null;
}
RGB转YUV算法(见:this answer):
public static void encodeYUV420SPAlt(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
}
index++;
}
}
}
我最终直接在 C++ 中调整了图像大小(作为 OpenCV.Mat)。这更容易、更快捷。
Size size(correctedWidth, correctedHeight);
Mat dst;
resize(image, dst, size);