如何在 iOS 6+ 上解码 h264 字节流?
How to decode an h264 byte stream on iOS 6+?
我正在开发一个 iOS 应用程序来显示带有 aac 音频 的 h264 视频流。
我的流是自定义流,不使用 HLS 或 rtsp/rtmp,所以我有自己的代码来处理接收数据。
我收到的数据分为两部分:header 数据和帧数据(音频和视频)。我想支持 iOS6+,但如果需要我会熟练的。
我最初的想法是将我的帧数据从字节数组转换为 UIImage,然后用新帧不断更新 UIImageView。这样做的问题是帧仍然需要先解码。
我查看了 ffmpeg,但我看到的所有示例都需要 URL 或本地文件,这对我来说不起作用。我读到使用 ffmpeg 时可能存在一些许可问题。
我也看了openh264。我认为这可能是一个选择,但由于我正在为 iOS 开发,我仍将 运行 处理这些许可问题。
编辑:
我设法使用 videoToolbox 和 在 iOS 8+ 上实现了这一点。
我的问题是我从流中接收到的数据比示例中的多。
我仍在寻找在 iOS 6 和 7 上执行此操作的方法。
所以我的问题是我应该如何处理帧的解码和显示?
我最终在没有使用 GPL 许可的情况下使用 FFmpeg 实现了这一点。
我是这样设置的:
我从 source forge. (You can also build it from scratch by downloading the build script from: https://github.com/kewlbear/FFmpeg-iOS-build-script 下载了 FFmpeg iOS 库)
在代码中我添加了一个检查以查看我使用的 OS 版本:
uint8_t *data = (unsigned char*)buf;
float version = [[[UIDevice currentDevice] systemVersion] floatValue];
if (version >= 8.0)
{
[self receivedRawVideoFrame:data withSize:ret ];
}
else if (version >= 6.0 && version < 8.0)
{
[self altDecodeFrame:data withSize:ret isConfigured:configured];
}
您可以看到找到 VideoToolbox 部分的实现 。
- (void)altDecodeFrame:(uint8_t *)frame_bytes withSize:(int) frameSize isConfigured:(Boolean) configured
{
if (!configured) {
uint8_t *header = NULL;
// I know what my H.264 data source's NALUs look like so I know start code index is always 0.
// if you don't know where it starts, you can use a for loop similar to how i find the 2nd and 3rd start codes
int startCodeIndex = 0;
int secondStartCodeIndex = 0;
int thirdStartCodeIndex = 0;
int fourthStartCodeIndex = 0;
int nalu_type = (frame_bytes[startCodeIndex + 4] & 0x1F);
// NALU type 7 is the SPS parameter NALU
if (nalu_type == 7)
{
// find where the second PPS start code begins, (the 0x00 00 00 01 code)
// from which we also get the length of the first SPS code
for (int i = startCodeIndex + 4; i < startCodeIndex + 40; i++)
{
if (frame_bytes[i] == 0x00 && frame_bytes[i+1] == 0x00 && frame_bytes[i+2] == 0x00 && frame_bytes[i+3] == 0x01)
{
secondStartCodeIndex = i;
_spsSize = secondStartCodeIndex; // includes the header in the size
break;
}
}
// find what the second NALU type is
nalu_type = (frame_bytes[secondStartCodeIndex + 4] & 0x1F);
}
// type 8 is the PPS parameter NALU
if(nalu_type == 8)
{
// find where the NALU after this one starts so we know how long the PPS parameter is
for (int i = _spsSize + 4; i < _spsSize + 30; i++)
{
if (frame_bytes[i] == 0x00 && frame_bytes[i+1] == 0x00 && frame_bytes[i+2] == 0x00 && frame_bytes[i+3] == 0x01)
{
thirdStartCodeIndex = i;
_ppsSize = thirdStartCodeIndex - _spsSize;
break;
}
}
// allocate enough data to fit the SPS and PPS parameters into our data object.
header = malloc(_ppsSize + _spsSize);
// copy in the actual sps and pps values, again ignoring the 4 byte header
memcpy (header, &frame_bytes[0], _ppsSize + _spsSize);
NSLog(@"refresh codec context");
avcodec_close(instance.codec_context);
int result;
// I know I have an H264 stream, so that is the codex I look for
AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264);
self.codec_context = avcodec_alloc_context3(codec);
//open codec
result = avcodec_open2(self.codec_context, codec,NULL);
if (result < 0) {
NSLog(@"avcodec_open2 returned %i", result);
}
if (header != NULL) {
//set the extra data for decoding
self.codec_context->extradata = header;
self.codec_context->extradata_size = _spsSize+_ppsSize;
self.codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
free(header);
}
// allocate the picture data.
// My frame data is in PIX_FMT_YUV420P format, but I will be converting that later on.
avpicture_alloc(&_pictureData, PIX_FMT_RGB24, 1280, 720);
// After my SPS and PPS data I receive a SEI NALU
nalu_type = (frame_bytes[thirdStartCodeIndex + 4] & 0x1F);
}
if(nalu_type == 6)
{
for (int i = _spsSize +_ppsSize + 4; i < _spsSize +_ppsSize + 30; i++)
{
if (frame_bytes[i] == 0x00 && frame_bytes[i+1] == 0x00 && frame_bytes[i+2] == 0x00 && frame_bytes[i+3] == 0x01)
{
fourthStartCodeIndex = i;
_seiSize = fourthStartCodeIndex - (_spsSize + _ppsSize);
break;
}
}
// do stuff here
// [...]
nalu_type = (frame_bytes[fourthStartCodeIndex + 4] & 0x1F);
}
}
//I had some issues with a large build up of memory, so I created an autoreleasepool
@autoreleasepool {
_frm = av_frame_alloc();
int result;
//fill the packet with the frame data
av_init_packet(&_pkt);
_pkt.data = frame_bytes;
_pkt.size = frameSize;
_pkt.flags = AV_PKT_FLAG_KEY;
int got_packet;
//Decode the frames
result = avcodec_decode_video2(self.codec_context, _frm, &got_packet, &_pkt);
if (result < 0) {
NSLog(@"avcodec_decode_video2 returned %i", result);
}
if (_frm == NULL) {
return;
}
else
{
//Here we will convert from YUV420P to RGB24
static int sws_flags = SWS_FAST_BILINEAR;
struct SwsContext *img_convert_ctx = sws_getContext(self.codec_context->width, self.codec_context->height, self.codec_context->pix_fmt, 1280, 720, PIX_FMT_RGB24, sws_flags, NULL, NULL, NULL);
sws_scale(img_convert_ctx, (const uint8_t* const*)_frm->data, _frm->linesize, 0, _frm->height, _pictureData.data, _pictureData.linesize);
sws_freeContext(img_convert_ctx);
self.lastImage = [self imageFromAVPicture:_pictureData width:_frm->width height:_frm->height];
av_frame_unref(_frm);
}
if (!self.lastImage) {
return;
}
//Normally we render on the AVSampleBufferDisplayLayer, so hide that.
//Add a UIImageView and display the image there.
dispatch_sync(dispatch_get_main_queue(), ^{
if (![[[self viewController] avSbdLayer] isHidden]) {
[[[self viewController] avSbdLayer] setHidden:true];
self.imageView = [[UIImageView alloc] initWithFrame:[[[self viewController] view] bounds]] ;
[[[self viewController] view] addSubview: self.imageView];
}
[[self imageView] setImage: self.lastImage];
});
// Free the allocated data
av_free_packet(&_pkt);
av_frame_free(&_frm);
av_free(_frm);
// free(bckgrnd);
}
}
这就是我从 AVPicture 制作 UIImage 的方法
-(UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height {
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, pict.data[0], pict.linesize[0]*height,kCFAllocatorNull);
CGDataProviderRef provider = CGDataProviderCreateWithCFData(data);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGImageRef cgImage = CGImageCreate(width,
height,
8,
24,
pict.linesize[0],
colorSpace,
bitmapInfo,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
CGColorSpaceRelease(colorSpace);
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
CGDataProviderRelease(provider);
CFRelease(data);
return image;
}
如果有人有其他(或更好的)解决方案,请告诉我。
我正在开发一个 iOS 应用程序来显示带有 aac 音频 的 h264 视频流。
我的流是自定义流,不使用 HLS 或 rtsp/rtmp,所以我有自己的代码来处理接收数据。
我收到的数据分为两部分:header 数据和帧数据(音频和视频)。我想支持 iOS6+,但如果需要我会熟练的。
我最初的想法是将我的帧数据从字节数组转换为 UIImage,然后用新帧不断更新 UIImageView。这样做的问题是帧仍然需要先解码。
我查看了 ffmpeg,但我看到的所有示例都需要 URL 或本地文件,这对我来说不起作用。我读到使用 ffmpeg 时可能存在一些许可问题。
我也看了openh264。我认为这可能是一个选择,但由于我正在为 iOS 开发,我仍将 运行 处理这些许可问题。
编辑:
我设法使用 videoToolbox 和
我仍在寻找在 iOS 6 和 7 上执行此操作的方法。
所以我的问题是我应该如何处理帧的解码和显示?
我最终在没有使用 GPL 许可的情况下使用 FFmpeg 实现了这一点。
我是这样设置的:
我从 source forge. (You can also build it from scratch by downloading the build script from: https://github.com/kewlbear/FFmpeg-iOS-build-script 下载了 FFmpeg iOS 库)
在代码中我添加了一个检查以查看我使用的 OS 版本:
uint8_t *data = (unsigned char*)buf;
float version = [[[UIDevice currentDevice] systemVersion] floatValue];
if (version >= 8.0)
{
[self receivedRawVideoFrame:data withSize:ret ];
}
else if (version >= 6.0 && version < 8.0)
{
[self altDecodeFrame:data withSize:ret isConfigured:configured];
}
您可以看到找到 VideoToolbox 部分的实现
- (void)altDecodeFrame:(uint8_t *)frame_bytes withSize:(int) frameSize isConfigured:(Boolean) configured
{
if (!configured) {
uint8_t *header = NULL;
// I know what my H.264 data source's NALUs look like so I know start code index is always 0.
// if you don't know where it starts, you can use a for loop similar to how i find the 2nd and 3rd start codes
int startCodeIndex = 0;
int secondStartCodeIndex = 0;
int thirdStartCodeIndex = 0;
int fourthStartCodeIndex = 0;
int nalu_type = (frame_bytes[startCodeIndex + 4] & 0x1F);
// NALU type 7 is the SPS parameter NALU
if (nalu_type == 7)
{
// find where the second PPS start code begins, (the 0x00 00 00 01 code)
// from which we also get the length of the first SPS code
for (int i = startCodeIndex + 4; i < startCodeIndex + 40; i++)
{
if (frame_bytes[i] == 0x00 && frame_bytes[i+1] == 0x00 && frame_bytes[i+2] == 0x00 && frame_bytes[i+3] == 0x01)
{
secondStartCodeIndex = i;
_spsSize = secondStartCodeIndex; // includes the header in the size
break;
}
}
// find what the second NALU type is
nalu_type = (frame_bytes[secondStartCodeIndex + 4] & 0x1F);
}
// type 8 is the PPS parameter NALU
if(nalu_type == 8)
{
// find where the NALU after this one starts so we know how long the PPS parameter is
for (int i = _spsSize + 4; i < _spsSize + 30; i++)
{
if (frame_bytes[i] == 0x00 && frame_bytes[i+1] == 0x00 && frame_bytes[i+2] == 0x00 && frame_bytes[i+3] == 0x01)
{
thirdStartCodeIndex = i;
_ppsSize = thirdStartCodeIndex - _spsSize;
break;
}
}
// allocate enough data to fit the SPS and PPS parameters into our data object.
header = malloc(_ppsSize + _spsSize);
// copy in the actual sps and pps values, again ignoring the 4 byte header
memcpy (header, &frame_bytes[0], _ppsSize + _spsSize);
NSLog(@"refresh codec context");
avcodec_close(instance.codec_context);
int result;
// I know I have an H264 stream, so that is the codex I look for
AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264);
self.codec_context = avcodec_alloc_context3(codec);
//open codec
result = avcodec_open2(self.codec_context, codec,NULL);
if (result < 0) {
NSLog(@"avcodec_open2 returned %i", result);
}
if (header != NULL) {
//set the extra data for decoding
self.codec_context->extradata = header;
self.codec_context->extradata_size = _spsSize+_ppsSize;
self.codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
free(header);
}
// allocate the picture data.
// My frame data is in PIX_FMT_YUV420P format, but I will be converting that later on.
avpicture_alloc(&_pictureData, PIX_FMT_RGB24, 1280, 720);
// After my SPS and PPS data I receive a SEI NALU
nalu_type = (frame_bytes[thirdStartCodeIndex + 4] & 0x1F);
}
if(nalu_type == 6)
{
for (int i = _spsSize +_ppsSize + 4; i < _spsSize +_ppsSize + 30; i++)
{
if (frame_bytes[i] == 0x00 && frame_bytes[i+1] == 0x00 && frame_bytes[i+2] == 0x00 && frame_bytes[i+3] == 0x01)
{
fourthStartCodeIndex = i;
_seiSize = fourthStartCodeIndex - (_spsSize + _ppsSize);
break;
}
}
// do stuff here
// [...]
nalu_type = (frame_bytes[fourthStartCodeIndex + 4] & 0x1F);
}
}
//I had some issues with a large build up of memory, so I created an autoreleasepool
@autoreleasepool {
_frm = av_frame_alloc();
int result;
//fill the packet with the frame data
av_init_packet(&_pkt);
_pkt.data = frame_bytes;
_pkt.size = frameSize;
_pkt.flags = AV_PKT_FLAG_KEY;
int got_packet;
//Decode the frames
result = avcodec_decode_video2(self.codec_context, _frm, &got_packet, &_pkt);
if (result < 0) {
NSLog(@"avcodec_decode_video2 returned %i", result);
}
if (_frm == NULL) {
return;
}
else
{
//Here we will convert from YUV420P to RGB24
static int sws_flags = SWS_FAST_BILINEAR;
struct SwsContext *img_convert_ctx = sws_getContext(self.codec_context->width, self.codec_context->height, self.codec_context->pix_fmt, 1280, 720, PIX_FMT_RGB24, sws_flags, NULL, NULL, NULL);
sws_scale(img_convert_ctx, (const uint8_t* const*)_frm->data, _frm->linesize, 0, _frm->height, _pictureData.data, _pictureData.linesize);
sws_freeContext(img_convert_ctx);
self.lastImage = [self imageFromAVPicture:_pictureData width:_frm->width height:_frm->height];
av_frame_unref(_frm);
}
if (!self.lastImage) {
return;
}
//Normally we render on the AVSampleBufferDisplayLayer, so hide that.
//Add a UIImageView and display the image there.
dispatch_sync(dispatch_get_main_queue(), ^{
if (![[[self viewController] avSbdLayer] isHidden]) {
[[[self viewController] avSbdLayer] setHidden:true];
self.imageView = [[UIImageView alloc] initWithFrame:[[[self viewController] view] bounds]] ;
[[[self viewController] view] addSubview: self.imageView];
}
[[self imageView] setImage: self.lastImage];
});
// Free the allocated data
av_free_packet(&_pkt);
av_frame_free(&_frm);
av_free(_frm);
// free(bckgrnd);
}
}
这就是我从 AVPicture 制作 UIImage 的方法
-(UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height {
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, pict.data[0], pict.linesize[0]*height,kCFAllocatorNull);
CGDataProviderRef provider = CGDataProviderCreateWithCFData(data);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGImageRef cgImage = CGImageCreate(width,
height,
8,
24,
pict.linesize[0],
colorSpace,
bitmapInfo,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
CGColorSpaceRelease(colorSpace);
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
CGDataProviderRelease(provider);
CFRelease(data);
return image;
}
如果有人有其他(或更好的)解决方案,请告诉我。