编码为 h264 无法使用 ffmpeg c api 发送一些帧
Encoding to h264 failed to send some frames using ffmpeg c api
使用 FFMPEG C API,我正在尝试将生成的图像推送为 MP4 格式。
当我逐帧推送时,复用似乎在 avcodec_receive_packet(...)
上失败,其中 return AVERROR(EAGAIN)
在第一帧上,但过了一会儿开始添加我的帧,但第一个。
我的意思是,当推帧1到13时,我有错误,但是在帧14到结束(36)之后,帧被添加到视频中,但编码图像不是14到36 ,而不是添加的第 1 到 23 帧。
我不明白,这是帧速率(我想要 12 fps)还是 key/inter- 帧的问题?
这里是 class,
不同部分的代码
注意:
- m_filename = "C:\tmp\test.mp4"
- m_framerate = 12
- m_width = 1080
- m_height = 1080
ctor
// Allocate the temporary buffer that hold the our generated image in RGB.
picture_rgb24 = av_frame_alloc();
picture_rgb24->pts = 0;
picture_rgb24->data[0] = NULL;
picture_rgb24->linesize[0] = -1;
picture_rgb24->format = AV_PIX_FMT_RGB24;
picture_rgb24->height = m_height;
picture_rgb24->width = m_width;
if ((_ret = av_image_alloc(picture_rgb24->data, picture_rgb24->linesize, m_width, m_height, (AVPixelFormat)picture_rgb24->format, 24)) < 0)
throw ...
// Allocate the temporary frame that will be convert from RGB to YUV using ffmpeg api.
frame_yuv420 = av_frame_alloc();
frame_yuv420->pts = 0;
frame_yuv420->data[0] = NULL;
frame_yuv420->linesize[0] = -1;
frame_yuv420->format = AV_PIX_FMT_YUV420P;
frame_yuv420->width = m_height;
frame_yuv420->height = m_width;
if ((_ret = av_image_alloc(frame_yuv420->data, frame_yuv420->linesize, m_width, m_height, (AVPixelFormat)frame_yuv420->format, 32)) < 0)
throw ...
init_muxer(); // see below.
m_inited = true;
m_pts_increment = av_rescale_q(1, { 1, m_framerate }, ofmt_ctx->streams[0]->time_base);
// Context that convert the RGB24 to YUV420P format (using this instead of filter similar to GIF).
swsCtx = sws_getContext(m_width, m_height, AV_PIX_FMT_RGB24, m_width, m_height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
init_muxer:
AVOutputFormat* oformat = av_guess_format(nullptr, m_filename.c_str(), nullptr);
if (!oformat) throw ...
_ret = avformat_alloc_output_context2(&ofmt_ctx, oformat, nullptr, m_filename.c_str());
if (_ret) throw ...
AVCodec *codec = avcodec_find_encoder(oformat->video_codec);
if (!codec) throw ...
AVStream *stream = avformat_new_stream(ofmt_ctx, codec);
if (!stream) throw ...
o_codec_ctx = avcodec_alloc_context3(codec);
if (!o_codec_ctx) throw ...
stream->codecpar->codec_id = oformat->video_codec;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width = m_width;
stream->codecpar->height = m_height;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->codecpar->bit_rate = 400000;
avcodec_parameters_to_context(o_codec_ctx, stream->codecpar);
o_codec_ctx->time_base = { 1, m_framerate };
// Using gop_size == 0, we want 'intra' frame, so no b-frame will be generated.
o_codec_ctx->max_b_frames = 0;
o_codec_ctx->gop_size = 0;
o_codec_ctx->b_quant_offset = 0;
//o_codec_ctx->framerate = { m_framerate , 1 };
if (stream->codecpar->codec_id == AV_CODEC_ID_H264)
av_opt_set(o_codec_ctx, "preset", "ultrafast", 0); // Lossless H.264
else if (stream->codecpar->codec_id == AV_CODEC_ID_H265)
av_opt_set(o_codec_ctx, "preset", "ultrafast", 0); // Lossless H.265
avcodec_parameters_from_context(stream->codecpar, o_codec_ctx);
if ((_ret = avcodec_open2(o_codec_ctx, codec, NULL)) < 0)
throw ...
if ((_ret = avio_open(&ofmt_ctx->pb, m_filename.c_str(), AVIO_FLAG_WRITE)) < 0)
throw ...
if ((_ret = avformat_write_header(ofmt_ctx, NULL)) < 0)
throw ...
av_dump_format(ofmt_ctx, 0, m_filename.c_str(), 1);
add_frame:
// loop to transfer our image format to ffmpeg one.
for (int y = 0; y < m_height; y++)
{
for (int x = 0; x < m_width; x++)
{
picture_rgb24->data[0][idx] = ...;
picture_rgb24->data[0][idx + 1] = ...;
picture_rgb24->data[0][idx + 2] = ...;
}
}
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t * const *)picture_rgb24->data, picture_rgb24->linesize, 0, m_height, frame_yuv420->data, frame_yuv420->linesize);
// mux the YUV frame
muxing_one_frame(frame_yuv420);
// Increment the FPS of the picture for the next add-up to the buffer.
picture_rgb24->pts += m_pts_increment;
frame_yuv420->pts += m_pts_increment;
muxing_one_frame:
int ret = avcodec_send_frame(o_codec_ctx, frame);
AVPacket *pkt = av_packet_alloc();
av_init_packet(pkt);
while (ret >= 0) {
ret = avcodec_receive_packet(o_codec_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
av_write_frame(ofmt_ctx, pkt);
}
av_packet_unref(pkt);
close_file:
av_write_trailer(ofmt_ctx);
avio_close(ofmt_ctx->pb);
请阅读文档。 https://ffmpeg.org/doxygen/3.3/group__lavc__encdec.html
或者看看这个已经回答过的许多其他时间之一:
ffmpeg avcodec_receive_packet return -11
FFmpeg - avcodec_receive_frame returns AVERROR(EAGAIN)
ffmpeg function avcodec_receive_frame always return EAGAIN error
使用 FFMPEG C API,我正在尝试将生成的图像推送为 MP4 格式。
当我逐帧推送时,复用似乎在 avcodec_receive_packet(...)
上失败,其中 return AVERROR(EAGAIN)
在第一帧上,但过了一会儿开始添加我的帧,但第一个。
我的意思是,当推帧1到13时,我有错误,但是在帧14到结束(36)之后,帧被添加到视频中,但编码图像不是14到36 ,而不是添加的第 1 到 23 帧。
我不明白,这是帧速率(我想要 12 fps)还是 key/inter- 帧的问题?
这里是 class,
不同部分的代码注意:
- m_filename = "C:\tmp\test.mp4"
- m_framerate = 12
- m_width = 1080
- m_height = 1080
ctor
// Allocate the temporary buffer that hold the our generated image in RGB.
picture_rgb24 = av_frame_alloc();
picture_rgb24->pts = 0;
picture_rgb24->data[0] = NULL;
picture_rgb24->linesize[0] = -1;
picture_rgb24->format = AV_PIX_FMT_RGB24;
picture_rgb24->height = m_height;
picture_rgb24->width = m_width;
if ((_ret = av_image_alloc(picture_rgb24->data, picture_rgb24->linesize, m_width, m_height, (AVPixelFormat)picture_rgb24->format, 24)) < 0)
throw ...
// Allocate the temporary frame that will be convert from RGB to YUV using ffmpeg api.
frame_yuv420 = av_frame_alloc();
frame_yuv420->pts = 0;
frame_yuv420->data[0] = NULL;
frame_yuv420->linesize[0] = -1;
frame_yuv420->format = AV_PIX_FMT_YUV420P;
frame_yuv420->width = m_height;
frame_yuv420->height = m_width;
if ((_ret = av_image_alloc(frame_yuv420->data, frame_yuv420->linesize, m_width, m_height, (AVPixelFormat)frame_yuv420->format, 32)) < 0)
throw ...
init_muxer(); // see below.
m_inited = true;
m_pts_increment = av_rescale_q(1, { 1, m_framerate }, ofmt_ctx->streams[0]->time_base);
// Context that convert the RGB24 to YUV420P format (using this instead of filter similar to GIF).
swsCtx = sws_getContext(m_width, m_height, AV_PIX_FMT_RGB24, m_width, m_height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
init_muxer:
AVOutputFormat* oformat = av_guess_format(nullptr, m_filename.c_str(), nullptr);
if (!oformat) throw ...
_ret = avformat_alloc_output_context2(&ofmt_ctx, oformat, nullptr, m_filename.c_str());
if (_ret) throw ...
AVCodec *codec = avcodec_find_encoder(oformat->video_codec);
if (!codec) throw ...
AVStream *stream = avformat_new_stream(ofmt_ctx, codec);
if (!stream) throw ...
o_codec_ctx = avcodec_alloc_context3(codec);
if (!o_codec_ctx) throw ...
stream->codecpar->codec_id = oformat->video_codec;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width = m_width;
stream->codecpar->height = m_height;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->codecpar->bit_rate = 400000;
avcodec_parameters_to_context(o_codec_ctx, stream->codecpar);
o_codec_ctx->time_base = { 1, m_framerate };
// Using gop_size == 0, we want 'intra' frame, so no b-frame will be generated.
o_codec_ctx->max_b_frames = 0;
o_codec_ctx->gop_size = 0;
o_codec_ctx->b_quant_offset = 0;
//o_codec_ctx->framerate = { m_framerate , 1 };
if (stream->codecpar->codec_id == AV_CODEC_ID_H264)
av_opt_set(o_codec_ctx, "preset", "ultrafast", 0); // Lossless H.264
else if (stream->codecpar->codec_id == AV_CODEC_ID_H265)
av_opt_set(o_codec_ctx, "preset", "ultrafast", 0); // Lossless H.265
avcodec_parameters_from_context(stream->codecpar, o_codec_ctx);
if ((_ret = avcodec_open2(o_codec_ctx, codec, NULL)) < 0)
throw ...
if ((_ret = avio_open(&ofmt_ctx->pb, m_filename.c_str(), AVIO_FLAG_WRITE)) < 0)
throw ...
if ((_ret = avformat_write_header(ofmt_ctx, NULL)) < 0)
throw ...
av_dump_format(ofmt_ctx, 0, m_filename.c_str(), 1);
add_frame:
// loop to transfer our image format to ffmpeg one.
for (int y = 0; y < m_height; y++)
{
for (int x = 0; x < m_width; x++)
{
picture_rgb24->data[0][idx] = ...;
picture_rgb24->data[0][idx + 1] = ...;
picture_rgb24->data[0][idx + 2] = ...;
}
}
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t * const *)picture_rgb24->data, picture_rgb24->linesize, 0, m_height, frame_yuv420->data, frame_yuv420->linesize);
// mux the YUV frame
muxing_one_frame(frame_yuv420);
// Increment the FPS of the picture for the next add-up to the buffer.
picture_rgb24->pts += m_pts_increment;
frame_yuv420->pts += m_pts_increment;
muxing_one_frame:
int ret = avcodec_send_frame(o_codec_ctx, frame);
AVPacket *pkt = av_packet_alloc();
av_init_packet(pkt);
while (ret >= 0) {
ret = avcodec_receive_packet(o_codec_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
av_write_frame(ofmt_ctx, pkt);
}
av_packet_unref(pkt);
close_file:
av_write_trailer(ofmt_ctx);
avio_close(ofmt_ctx->pb);
请阅读文档。 https://ffmpeg.org/doxygen/3.3/group__lavc__encdec.html
或者看看这个已经回答过的许多其他时间之一:
ffmpeg avcodec_receive_packet return -11
FFmpeg - avcodec_receive_frame returns AVERROR(EAGAIN)
ffmpeg function avcodec_receive_frame always return EAGAIN error