自定义 byteArray 数据到 WebRTC videoTrack
Custom byteArray data to WebRTC videoTrack
我需要使用 WebRTC 的 WebRTC for android to send specific cropped(face) video to the videoChannel. I was able manipulate Camera1Session class 来裁剪脸部。现在我将它设置为 ImageView。
listenForBytebufferFrames()
个 Camera1Session.java
private void listenForBytebufferFrames() {
this.camera.setPreviewCallbackWithBuffer(new PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera callbackCamera) {
Camera1Session.this.checkIsOnCameraThread();
if(callbackCamera != Camera1Session.this.camera) {
Logging.e("Camera1Session", "Callback from a different camera. This should never happen.");
} else if(Camera1Session.this.state != Camera1Session.SessionState.RUNNING) {
Logging.d("Camera1Session", "Bytebuffer frame captured but camera is no longer running.");
} else {
mFrameProcessor.setNextFrame(data, callbackCamera);
long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());
if(!Camera1Session.this.firstFrameReported) {
int startTimeMs = (int)TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - Camera1Session.this.constructionTimeNs);
Camera1Session.camera1StartTimeMsHistogram.addSample(startTimeMs);
Camera1Session.this.firstFrameReported = true;
}
ByteBuffer byteBuffer1 = ByteBuffer.wrap(data);
Frame outputFrame = new Frame.Builder()
.setImageData(byteBuffer1,
Camera1Session.this.captureFormat.width,
Camera1Session.this.captureFormat.height,
ImageFormat.NV21)
.setTimestampMillis(mFrameProcessor.mPendingTimeMillis)
.setId(mFrameProcessor.mPendingFrameId)
.setRotation(3)
.build();
int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
if (detectedFaces.size() > 0) {
Face face = detectedFaces.valueAt(0);
ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
byte[] byteBuffer = byteBufferRaw.array();
YuvImage yuvimage = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
//My crop logic to get face co-ordinates
yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
final byte[] jpegArray = baos.toByteArray();
Bitmap bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
Activity currentActivity = getActivity();
if (currentActivity instanceof CallActivity) {
((CallActivity) currentActivity).setBitmapToImageView(bitmap); //face on ImageView is set just fine
}
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
} else {
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
}
}
}
});
}
jpegArray
是我需要通过 WebRTC
流式传输的最终 byteArray,我尝试过这样的操作:
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, jpegArray, (int) face.getWidth(), (int) face.getHeight(), Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(jpegArray);
像这样设置它们会出现以下错误:
../../webrtc/sdk/android/src/jni/androidvideotracksource.cc line 82
Check failed: length >= width * height + 2 * uv_width * ((height + 1) / 2) (2630 vs. 460800)
我认为这是因为 androidvideotracksource
没有得到它期望的 byteArray
的相同长度,因为框架现在被裁剪了。
有人可以指出我如何实现它的方向吗?这是正确的 way/place 来处理数据并输入 videoTrack
吗?
编辑:byteArray data
的bitmap
与 byteArray jpegArray
不同,在 ImageView
上没有给我相机预览.可能是因为包装不同?
尤其是 WebRTC 和视频流通常假定视频具有固定尺寸。如果你想裁剪检测到的人脸,你的选择是用例如填充裁剪后的图像。黑色像素(WebRTC 不使用透明度),并在接收器端裁剪视频,或者,如果您无法控制接收器,resize 裁剪区域以填充预期的 width * height
帧(你还应该保持预期的宽高比)。
请注意,您用来裁剪原件的 JPEG compress/decompress 效率很低。其他一些选项可以在 Image crop and resize in Android.
中找到
我们能否使用 WebRTC 的 Datachannel 交换自定义数据,即在您的情况下裁剪的人脸 "image" 并使用任何第三方库(即 OpenGL 等)在接收端进行相应的计算?我建议的原因是从频道接收的 WebRTC 视频提要是实时流而不是字节数组。另一方面,WebRTC Video 的固有架构并不意味着裁剪视频。如果我们想要裁剪或增强视频,我们必须使用任何 ar 库来完成这项工作。
我们始终可以利用 WebRTC 的数据通道来交换自定义数据。不推荐使用视频频道,因为它是实时流,而不是 bytearray.Please 还原以防万一。
好吧,这肯定是原byte[] data
的打包方式和byte[] jpegArray
的打包方式的问题。按照 AlexCohn 的建议改变打包和缩放的方式对我有用。我在 Whosebug 上找到 other post 的帮助来打包它。这是它的代码:
private byte[] getNV21(int left, int top, int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, left, top, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
private void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}`
我将此 byte[] data
传递给 onByteBufferFrameCaptured
和 callback
:
Camera1Session.this.events.onByteBufferFrameCaptured(
Camera1Session.this,
data,
w,
h,
Camera1Session.this.getFrameOrientation(),
captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
在此之前,我必须缩放非常简单的位图:
int width = bitmapToScale.getWidth();
int height = bitmapToScale.getHeight();
Matrix matrix = new Matrix();
matrix.postScale(newWidth / width, newHeight / height);
Bitmap scaledBitmap = Bitmap.createBitmap(bitmapToScale, 0, 0, bitmapToScale.getWidth(), bitmapToScale.getHeight(), matrix, true);
我需要使用 WebRTC 的 WebRTC for android to send specific cropped(face) video to the videoChannel. I was able manipulate Camera1Session class 来裁剪脸部。现在我将它设置为 ImageView。
listenForBytebufferFrames()
个 Camera1Session.java
private void listenForBytebufferFrames() {
this.camera.setPreviewCallbackWithBuffer(new PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera callbackCamera) {
Camera1Session.this.checkIsOnCameraThread();
if(callbackCamera != Camera1Session.this.camera) {
Logging.e("Camera1Session", "Callback from a different camera. This should never happen.");
} else if(Camera1Session.this.state != Camera1Session.SessionState.RUNNING) {
Logging.d("Camera1Session", "Bytebuffer frame captured but camera is no longer running.");
} else {
mFrameProcessor.setNextFrame(data, callbackCamera);
long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());
if(!Camera1Session.this.firstFrameReported) {
int startTimeMs = (int)TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - Camera1Session.this.constructionTimeNs);
Camera1Session.camera1StartTimeMsHistogram.addSample(startTimeMs);
Camera1Session.this.firstFrameReported = true;
}
ByteBuffer byteBuffer1 = ByteBuffer.wrap(data);
Frame outputFrame = new Frame.Builder()
.setImageData(byteBuffer1,
Camera1Session.this.captureFormat.width,
Camera1Session.this.captureFormat.height,
ImageFormat.NV21)
.setTimestampMillis(mFrameProcessor.mPendingTimeMillis)
.setId(mFrameProcessor.mPendingFrameId)
.setRotation(3)
.build();
int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
if (detectedFaces.size() > 0) {
Face face = detectedFaces.valueAt(0);
ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
byte[] byteBuffer = byteBufferRaw.array();
YuvImage yuvimage = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
//My crop logic to get face co-ordinates
yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
final byte[] jpegArray = baos.toByteArray();
Bitmap bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
Activity currentActivity = getActivity();
if (currentActivity instanceof CallActivity) {
((CallActivity) currentActivity).setBitmapToImageView(bitmap); //face on ImageView is set just fine
}
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
} else {
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
}
}
}
});
}
jpegArray
是我需要通过 WebRTC
流式传输的最终 byteArray,我尝试过这样的操作:
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, jpegArray, (int) face.getWidth(), (int) face.getHeight(), Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(jpegArray);
像这样设置它们会出现以下错误:
../../webrtc/sdk/android/src/jni/androidvideotracksource.cc line 82
Check failed: length >= width * height + 2 * uv_width * ((height + 1) / 2) (2630 vs. 460800)
我认为这是因为 androidvideotracksource
没有得到它期望的 byteArray
的相同长度,因为框架现在被裁剪了。
有人可以指出我如何实现它的方向吗?这是正确的 way/place 来处理数据并输入 videoTrack
吗?
编辑:byteArray data
的bitmap
与 byteArray jpegArray
不同,在 ImageView
上没有给我相机预览.可能是因为包装不同?
尤其是 WebRTC 和视频流通常假定视频具有固定尺寸。如果你想裁剪检测到的人脸,你的选择是用例如填充裁剪后的图像。黑色像素(WebRTC 不使用透明度),并在接收器端裁剪视频,或者,如果您无法控制接收器,resize 裁剪区域以填充预期的 width * height
帧(你还应该保持预期的宽高比)。
请注意,您用来裁剪原件的 JPEG compress/decompress 效率很低。其他一些选项可以在 Image crop and resize in Android.
中找到我们能否使用 WebRTC 的 Datachannel 交换自定义数据,即在您的情况下裁剪的人脸 "image" 并使用任何第三方库(即 OpenGL 等)在接收端进行相应的计算?我建议的原因是从频道接收的 WebRTC 视频提要是实时流而不是字节数组。另一方面,WebRTC Video 的固有架构并不意味着裁剪视频。如果我们想要裁剪或增强视频,我们必须使用任何 ar 库来完成这项工作。
我们始终可以利用 WebRTC 的数据通道来交换自定义数据。不推荐使用视频频道,因为它是实时流,而不是 bytearray.Please 还原以防万一。
好吧,这肯定是原byte[] data
的打包方式和byte[] jpegArray
的打包方式的问题。按照 AlexCohn 的建议改变打包和缩放的方式对我有用。我在 Whosebug 上找到 other post 的帮助来打包它。这是它的代码:
private byte[] getNV21(int left, int top, int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, left, top, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
private void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}`
我将此 byte[] data
传递给 onByteBufferFrameCaptured
和 callback
:
Camera1Session.this.events.onByteBufferFrameCaptured(
Camera1Session.this,
data,
w,
h,
Camera1Session.this.getFrameOrientation(),
captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
在此之前,我必须缩放非常简单的位图:
int width = bitmapToScale.getWidth();
int height = bitmapToScale.getHeight();
Matrix matrix = new Matrix();
matrix.postScale(newWidth / width, newHeight / height);
Bitmap scaledBitmap = Bitmap.createBitmap(bitmapToScale, 0, 0, bitmapToScale.getWidth(), bitmapToScale.getHeight(), matrix, true);