Android org.webrtc.VideoRenderer.I420图像中的帧数组
Android org.webrtc.VideoRenderer.I420Frame arrays from an image
我一直希望一些代码会出现在 Internet 上,但一无所获 ;)
我是运行这个github例子。
WebRTC 传入的 I420Frame 对象似乎有 3 个 yuvPlanes
数组
典型的 Android 相机应用程序获取 PreviewCallback.onPreviewFrame byte[] 作为单个字节数组。
我的工作是定期将图像流式传输为 I420。
有人可以帮助我如何从 JPEG/PNG 文件之类的单字节 [] 数组生成 I420Frames yuvPlanes 吗?
非常关键。所有答案表示赞赏。
PreviewCallback.onPreviewFrame()
永远不会 return JPEG 或 PNG 流。您应该检查您的摄像头 getSupportedPreviewFormats()
列表 (请注意,前后摄像头可能有所不同)。保证您在此列表中有 NV21。如果幸运的话,您可以选择 YV12 因为 API 级别 12 (请注意,某些设备,例如 Amazon Fire HD (2012),会对此撒谎并且实际上无法传送 YV12 流).
构建I420Frame from a YV12字节数组很容易:
private VideoRenderer.I420Frame mFrame;
void onPreviewFrame(byte[] yv12_data, Camera camera) {
if (mFrame == null) {
Camera.Parameters params = camera.getParameters(); // this is an expensive call, don't repeat it on every frame!
assert(params.getPreviewFormat() == ImageFormat.YV12);
int width = params.getPreviewSize().width;
int stride_y = 16 + ((width-1)/16)*16;
int stride_uv = 16 + ((stride_y/2-1)/16)*16;
int height = params.getPreviewSize().height;
mFrame = new VideoRenderer.I420Frame(width, height, 0, new int[]{stride_y, stride_uv, stride_uv}, new ByteBuffer[3], 0);
}
mFrame.yuvPlanes[0] = ByteBuffer.wrap(yv12_data, 0, mFrame.yuvStrides[0]*mFrame.height) // Y
mFrame.yuvPlanes[1] = ByteBuffer.wrap(yv12_data, mFrame.yuvStrides[0]*mFrame.height+mFrame.yuvStrides[2]*mFrame.height/2, mFrame.yuvStrides[1]*mFrame.height/2) // U
mFrame.yuvPlanes[2] = ByteBuffer.wrap(yv12_data, mFrame.yuvStrides[0]*mFrame.height, mFrame.yuvStrides[2]*mFrame.height/4) // V
... do something with the frame
}
对于NV21,必须分配U和V平面:
private VideoRenderer.I420Frame mFrame;
void onPreviewFrame(byte[] nv21_data, Camera camera) {
if (mFrame == null) {
Camera.Parameters params = camera.getParameters(); // this is an expensive call, don't repeat it on every frame!
assert(params.getPreviewFormat() == ImageFormat.NV21);
int width = params.getPreviewSize().width;
int height = params.getPreviewSize().height;
mFrame = new VideoRenderer.I420Frame(width, height, 0, new int[]{width, width/2, width/2}, new ByteBuffer[3], 0);
mFrame.yuvPlanes[1] = ByteBuffer.wrap(new byte[width*height/4]);
mFrame.yuvPlanes[2] = ByteBuffer.wrap(new byte[width*height/4]);
}
mFrame.yuvPlanes[0] = ByteBuffer.wrap(nv21_data, 0, mFrame.width*mFrame.height) // Y
for (int top=0, from=mFrame.width*mFrame.height; from < mFrame.width*mFrame.height*3/2; to++, from+=2) {
mframe.yuvPlanes[1][to] = nv21_data[from+1]; // U
mframe.yuvPlanes[2][to] = nv21_data[from]; // V
}
... do something with the frame
}
I420Frame onPreviewFrame(byte[] yv12_data)
{
if (mFrame == null)
{
//Camera.Parameters params = camera.getParameters(); // this is an expensive call, don't repeat it on every frame!
//assert(params.getPreviewFormat() == ImageFormat.YV12);
int width = 640;
int stride_y = 16 + ((width - 1) / 16) * 16;
int stride_uv = 16 + ((stride_y / 2 - 1) / 16) * 16;
int height = 480;
mFrame = new VideoRenderer.I420Frame(width, height, new int[] { stride_y, stride_uv, stride_uv }, new ByteBuffer[3]);
}
mFrame.YuvPlanes[0] = ByteBuffer.Wrap(yv12_data, 0, mFrame.YuvStrides[0] * mFrame.Height); // Y
mFrame.YuvPlanes[1] = ByteBuffer.Wrap(yv12_data, (mFrame.YuvStrides[0] * mFrame.Height) , mFrame.YuvStrides[1] * mFrame.Height );// U
mFrame.YuvPlanes[2] = ByteBuffer.Wrap(yv12_data, (mFrame.YuvStrides[0] * mFrame.Height )+ (mFrame.YuvStrides[1] * mFrame.Height), mFrame.YuvStrides[2] * mFrame.Height ); // V
return mFrame;
// ... do something with the frame
}
我一直希望一些代码会出现在 Internet 上,但一无所获 ;) 我是运行这个github例子。 WebRTC 传入的 I420Frame 对象似乎有 3 个 yuvPlanes
数组典型的 Android 相机应用程序获取 PreviewCallback.onPreviewFrame byte[] 作为单个字节数组。 我的工作是定期将图像流式传输为 I420。 有人可以帮助我如何从 JPEG/PNG 文件之类的单字节 [] 数组生成 I420Frames yuvPlanes 吗?
非常关键。所有答案表示赞赏。
PreviewCallback.onPreviewFrame()
永远不会 return JPEG 或 PNG 流。您应该检查您的摄像头 getSupportedPreviewFormats()
列表 (请注意,前后摄像头可能有所不同)。保证您在此列表中有 NV21。如果幸运的话,您可以选择 YV12 因为 API 级别 12 (请注意,某些设备,例如 Amazon Fire HD (2012),会对此撒谎并且实际上无法传送 YV12 流).
构建I420Frame from a YV12字节数组很容易:
private VideoRenderer.I420Frame mFrame;
void onPreviewFrame(byte[] yv12_data, Camera camera) {
if (mFrame == null) {
Camera.Parameters params = camera.getParameters(); // this is an expensive call, don't repeat it on every frame!
assert(params.getPreviewFormat() == ImageFormat.YV12);
int width = params.getPreviewSize().width;
int stride_y = 16 + ((width-1)/16)*16;
int stride_uv = 16 + ((stride_y/2-1)/16)*16;
int height = params.getPreviewSize().height;
mFrame = new VideoRenderer.I420Frame(width, height, 0, new int[]{stride_y, stride_uv, stride_uv}, new ByteBuffer[3], 0);
}
mFrame.yuvPlanes[0] = ByteBuffer.wrap(yv12_data, 0, mFrame.yuvStrides[0]*mFrame.height) // Y
mFrame.yuvPlanes[1] = ByteBuffer.wrap(yv12_data, mFrame.yuvStrides[0]*mFrame.height+mFrame.yuvStrides[2]*mFrame.height/2, mFrame.yuvStrides[1]*mFrame.height/2) // U
mFrame.yuvPlanes[2] = ByteBuffer.wrap(yv12_data, mFrame.yuvStrides[0]*mFrame.height, mFrame.yuvStrides[2]*mFrame.height/4) // V
... do something with the frame
}
对于NV21,必须分配U和V平面:
private VideoRenderer.I420Frame mFrame;
void onPreviewFrame(byte[] nv21_data, Camera camera) {
if (mFrame == null) {
Camera.Parameters params = camera.getParameters(); // this is an expensive call, don't repeat it on every frame!
assert(params.getPreviewFormat() == ImageFormat.NV21);
int width = params.getPreviewSize().width;
int height = params.getPreviewSize().height;
mFrame = new VideoRenderer.I420Frame(width, height, 0, new int[]{width, width/2, width/2}, new ByteBuffer[3], 0);
mFrame.yuvPlanes[1] = ByteBuffer.wrap(new byte[width*height/4]);
mFrame.yuvPlanes[2] = ByteBuffer.wrap(new byte[width*height/4]);
}
mFrame.yuvPlanes[0] = ByteBuffer.wrap(nv21_data, 0, mFrame.width*mFrame.height) // Y
for (int top=0, from=mFrame.width*mFrame.height; from < mFrame.width*mFrame.height*3/2; to++, from+=2) {
mframe.yuvPlanes[1][to] = nv21_data[from+1]; // U
mframe.yuvPlanes[2][to] = nv21_data[from]; // V
}
... do something with the frame
}
I420Frame onPreviewFrame(byte[] yv12_data)
{
if (mFrame == null)
{
//Camera.Parameters params = camera.getParameters(); // this is an expensive call, don't repeat it on every frame!
//assert(params.getPreviewFormat() == ImageFormat.YV12);
int width = 640;
int stride_y = 16 + ((width - 1) / 16) * 16;
int stride_uv = 16 + ((stride_y / 2 - 1) / 16) * 16;
int height = 480;
mFrame = new VideoRenderer.I420Frame(width, height, new int[] { stride_y, stride_uv, stride_uv }, new ByteBuffer[3]);
}
mFrame.YuvPlanes[0] = ByteBuffer.Wrap(yv12_data, 0, mFrame.YuvStrides[0] * mFrame.Height); // Y
mFrame.YuvPlanes[1] = ByteBuffer.Wrap(yv12_data, (mFrame.YuvStrides[0] * mFrame.Height) , mFrame.YuvStrides[1] * mFrame.Height );// U
mFrame.YuvPlanes[2] = ByteBuffer.Wrap(yv12_data, (mFrame.YuvStrides[0] * mFrame.Height )+ (mFrame.YuvStrides[1] * mFrame.Height), mFrame.YuvStrides[2] * mFrame.Height ); // V
return mFrame;
// ... do something with the frame
}