C++ FFmpeg 创建 mp4 文件
C++ FFmpeg create mp4 file
我正在尝试使用 FFmpeg 和 C++ 创建 mp4 视频文件,但结果我收到损坏的文件(windows 播放器显示 "Can't play ... 0xc00d36c4")。如果我创建 .h264 文件,它可以用 'ffplay' 播放并通过 CL 成功转换为 mp4。
我的代码:
int main() {
char *filename = "tmp.mp4";
AVOutputFormat *fmt;
AVFormatContext *fctx;
AVCodecContext *cctx;
AVStream *st;
av_register_all();
avcodec_register_all();
//auto detect the output format from the name
fmt = av_guess_format(NULL, filename, NULL);
if (!fmt) {
cout << "Error av_guess_format()" << endl; system("pause"); exit(1);
}
if (avformat_alloc_output_context2(&fctx, fmt, NULL, filename) < 0) {
cout << "Error avformat_alloc_output_context2()" << endl; system("pause"); exit(1);
}
//stream creation + parameters
st = avformat_new_stream(fctx, 0);
if (!st) {
cout << "Error avformat_new_stream()" << endl; system("pause"); exit(1);
}
st->codecpar->codec_id = fmt->video_codec;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->width = 352;
st->codecpar->height = 288;
st->time_base.num = 1;
st->time_base.den = 25;
AVCodec *pCodec = avcodec_find_encoder(st->codecpar->codec_id);
if (!pCodec) {
cout << "Error avcodec_find_encoder()" << endl; system("pause"); exit(1);
}
cctx = avcodec_alloc_context3(pCodec);
if (!cctx) {
cout << "Error avcodec_alloc_context3()" << endl; system("pause"); exit(1);
}
avcodec_parameters_to_context(cctx, st->codecpar);
cctx->bit_rate = 400000;
cctx->width = 352;
cctx->height = 288;
cctx->time_base.num = 1;
cctx->time_base.den = 25;
cctx->gop_size = 12;
cctx->pix_fmt = AV_PIX_FMT_YUV420P;
if (st->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx->priv_data, "preset", "ultrafast", 0);
}
if (fctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avcodec_parameters_from_context(st->codecpar, cctx);
av_dump_format(fctx, 0, filename, 1);
//OPEN FILE + WRITE HEADER
if (avcodec_open2(cctx, pCodec, NULL) < 0) {
cout << "Error avcodec_open2()" << endl; system("pause"); exit(1);
}
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fctx->pb, filename, AVIO_FLAG_WRITE) < 0) {
cout << "Error avio_open()" << endl; system("pause"); exit(1);
}
}
if (avformat_write_header(fctx, NULL) < 0) {
cout << "Error avformat_write_header()" << endl; system("pause"); exit(1);
}
//CREATE DUMMY VIDEO
AVFrame *frame = av_frame_alloc();
frame->format = cctx->pix_fmt;
frame->width = cctx->width;
frame->height = cctx->height;
av_image_alloc(frame->data, frame->linesize, cctx->width, cctx->height, cctx->pix_fmt, 32);
AVPacket pkt;
double video_pts = 0;
for (int i = 0; i < 50; i++) {
video_pts = (double)cctx->time_base.num / cctx->time_base.den * 90 * i;
for (int y = 0; y < cctx->height; y++) {
for (int x = 0; x < cctx->width; x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
if (y < cctx->height / 2 && x < cctx->width / 2) {
/* Cb and Cr */
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
}
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.pts = frame->pts = video_pts;
pkt.data = NULL;
pkt.size = 0;
pkt.stream_index = st->index;
if (avcodec_send_frame(cctx, frame) < 0) {
cout << "Error avcodec_send_frame()" << endl; system("pause"); exit(1);
}
if (avcodec_receive_packet(cctx, &pkt) == 0) {
//cout << "Write frame " << to_string((int) pkt.pts) << endl;
av_interleaved_write_frame(fctx, &pkt);
av_packet_unref(&pkt);
}
}
//DELAYED FRAMES
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
//cout << "-Write frame " << to_string((int)pkt.pts) << endl;
av_interleaved_write_frame(fctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
//FINISH
av_write_trailer(fctx);
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_close(fctx->pb) < 0) {
cout << "Error avio_close()" << endl; system("pause"); exit(1);
}
}
av_frame_free(&frame);
avcodec_free_context(&cctx);
avformat_free_context(fctx);
system("pause");
return 0;
}
程序输出:
Output #0, mp4, to 'tmp.mp4':
Stream #0:0: Video: h264, yuv420p, 352x288, q=2-31, 400 kb/s, 25 tbn
[libx264 @ 0000021c4a995ba0] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 0000021c4a995ba0] profile Constrained Baseline, level 2.0
[libx264 @ 0000021c4a995ba0] 264 - core 152 r2851 ba24899 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=0 ref=1 deblock=0:0:0 analyse=0:0 me=dia subme=0 psy=1 psy_rd=1.00:0.00 mixed_ref=0 me_range=16 chroma_me=1 trellis=0 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=0 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=0 weightp=0 keyint=12 keyint_min=1 scenecut=0 intra_refresh=0 rc=abr mbtree=0 bitrate=400 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=0
[libx264 @ 0000021c4a995ba0] frame I:5 Avg QP: 7.03 size: 9318
[libx264 @ 0000021c4a995ba0] frame P:45 Avg QP: 4.53 size: 4258
[libx264 @ 0000021c4a995ba0] mb I I16..4: 100.0% 0.0% 0.0%
[libx264 @ 0000021c4a995ba0] mb P I16..4: 0.0% 0.0% 0.0% P16..4: 100.0% 0.0% 0.0% 0.0% 0.0% skip: 0.0%
[libx264 @ 0000021c4a995ba0] final ratefactor: 9.11
[libx264 @ 0000021c4a995ba0] coded y,uvDC,uvAC intra: 18.9% 21.8% 14.5% inter: 7.8% 100.0% 15.5%
[libx264 @ 0000021c4a995ba0] i16 v,h,dc,p: 4% 5% 5% 86%
[libx264 @ 0000021c4a995ba0] i8c dc,h,v,p: 2% 9% 6% 82%
[libx264 @ 0000021c4a995ba0] kb/s:264.68
如果我尝试用 'ffplay' 播放 mp4 文件,它会打印:
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000000026bf900] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 352x288, 138953 kb/s): unspecified pixel format
[h264 @ 00000000006c6ae0] non-existing PPS 0 referenced
[h264 @ 00000000006c6ae0] decode_slice_header error
[h264 @ 00000000006c6ae0] no frame!
我花了很多时间都没有成功找到问题,可能是什么原因?
感谢帮助!
您需要填充编解码器上下文额外数据和 extradata_size 字段。
工作解决方案:
#include "VideoCapture.h"
#define VIDEO_TMP_FILE "tmp.h264"
#define FINAL_FILE_NAME "record.mp4"
using namespace std;
void VideoCapture::Init(int width, int height, int fpsrate, int bitrate) {
fps = fpsrate;
int err;
if (!(oformat = av_guess_format(NULL, VIDEO_TMP_FILE, NULL))) {
Debug("Failed to define output format", 0);
return;
}
if ((err = avformat_alloc_output_context2(&ofctx, oformat, NULL, VIDEO_TMP_FILE) < 0)) {
Debug("Failed to allocate output context", err);
Free();
return;
}
if (!(codec = avcodec_find_encoder(oformat->video_codec))) {
Debug("Failed to find encoder", 0);
Free();
return;
}
if (!(videoStream = avformat_new_stream(ofctx, codec))) {
Debug("Failed to create new stream", 0);
Free();
return;
}
if (!(cctx = avcodec_alloc_context3(codec))) {
Debug("Failed to allocate codec context", 0);
Free();
return;
}
videoStream->codecpar->codec_id = oformat->video_codec;
videoStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
videoStream->codecpar->width = width;
videoStream->codecpar->height = height;
videoStream->codecpar->format = AV_PIX_FMT_YUV420P;
videoStream->codecpar->bit_rate = bitrate * 1000;
videoStream->time_base = { 1, fps };
avcodec_parameters_to_context(cctx, videoStream->codecpar);
cctx->time_base = { 1, fps };
cctx->max_b_frames = 2;
cctx->gop_size = 12;
if (videoStream->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx, "preset", "ultrafast", 0);
}
if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avcodec_parameters_from_context(videoStream->codecpar, cctx);
if ((err = avcodec_open2(cctx, codec, NULL)) < 0) {
Debug("Failed to open codec", err);
Free();
return;
}
if (!(oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofctx->pb, VIDEO_TMP_FILE, AVIO_FLAG_WRITE)) < 0) {
Debug("Failed to open file", err);
Free();
return;
}
}
if ((err = avformat_write_header(ofctx, NULL)) < 0) {
Debug("Failed to write header", err);
Free();
return;
}
av_dump_format(ofctx, 0, VIDEO_TMP_FILE, 1);
}
void VideoCapture::AddFrame(uint8_t *data) {
int err;
if (!videoFrame) {
videoFrame = av_frame_alloc();
videoFrame->format = AV_PIX_FMT_YUV420P;
videoFrame->width = cctx->width;
videoFrame->height = cctx->height;
if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
Debug("Failed to allocate picture", err);
return;
}
}
if (!swsCtx) {
swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGB24, cctx->width, cctx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
}
int inLinesize[1] = { 3 * cctx->width };
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t * const *)&data, inLinesize, 0, cctx->height, videoFrame->data, videoFrame->linesize);
videoFrame->pts = frameCounter++;
if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
Debug("Failed to send frame", err);
return;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (avcodec_receive_packet(cctx, &pkt) == 0) {
pkt.flags |= AV_PKT_FLAG_KEY;
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
}
void VideoCapture::Finish() {
//DELAYED FRAMES
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
av_write_trailer(ofctx);
if (!(oformat->flags & AVFMT_NOFILE)) {
int err = avio_close(ofctx->pb);
if (err < 0) {
Debug("Failed to close file", err);
}
}
Free();
Remux();
}
void VideoCapture::Free() {
if (videoFrame) {
av_frame_free(&videoFrame);
}
if (cctx) {
avcodec_free_context(&cctx);
}
if (ofctx) {
avformat_free_context(ofctx);
}
if (swsCtx) {
sws_freeContext(swsCtx);
}
}
void VideoCapture::Remux() {
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
int err;
if ((err = avformat_open_input(&ifmt_ctx, VIDEO_TMP_FILE, 0, 0)) < 0) {
Debug("Failed to open input file for remuxing", err);
goto end;
}
if ((err = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
Debug("Failed to retrieve input stream information", err);
goto end;
}
if ((err = avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, FINAL_FILE_NAME))) {
Debug("Failed to allocate output context", err);
goto end;
}
AVStream *inVideoStream = ifmt_ctx->streams[0];
AVStream *outVideoStream = avformat_new_stream(ofmt_ctx, NULL);
if (!outVideoStream) {
Debug("Failed to allocate output video stream", 0);
goto end;
}
outVideoStream->time_base = { 1, fps };
avcodec_parameters_copy(outVideoStream->codecpar, inVideoStream->codecpar);
outVideoStream->codecpar->codec_tag = 0;
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofmt_ctx->pb, FINAL_FILE_NAME, AVIO_FLAG_WRITE)) < 0) {
Debug("Failed to open output file", err);
goto end;
}
}
if ((err = avformat_write_header(ofmt_ctx, 0)) < 0) {
Debug("Failed to write header to output file", err);
goto end;
}
AVPacket videoPkt;
int ts = 0;
while (true) {
if ((err = av_read_frame(ifmt_ctx, &videoPkt)) < 0) {
break;
}
videoPkt.stream_index = outVideoStream->index;
videoPkt.pts = ts;
videoPkt.dts = ts;
videoPkt.duration = av_rescale_q(videoPkt.duration, inVideoStream->time_base, outVideoStream->time_base);
ts += videoPkt.duration;
videoPkt.pos = -1;
if ((err = av_interleaved_write_frame(ofmt_ctx, &videoPkt)) < 0) {
Debug("Failed to mux packet", err);
av_packet_unref(&videoPkt);
break;
}
av_packet_unref(&videoPkt);
}
av_write_trailer(ofmt_ctx);
end:
if (ifmt_ctx) {
avformat_close_input(&ifmt_ctx);
}
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
avio_closep(&ofmt_ctx->pb);
}
if (ofmt_ctx) {
avformat_free_context(ofmt_ctx);
}
}
头文件:
#define VIDEOCAPTURE_API __declspec(dllexport)
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <cstring>
#include <math.h>
#include <string.h>
#include <algorithm>
#include <string>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavcodec/avfft.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
// libav resample
#include <libavutil/opt.h>
#include <libavutil/common.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/file.h>
// hwaccel
#include "libavcodec/vdpau.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_vdpau.h"
// lib swresample
#include <libswscale/swscale.h>
std::ofstream logFile;
void Log(std::string str) {
logFile.open("Logs.txt", std::ofstream::app);
logFile.write(str.c_str(), str.size());
logFile.close();
}
typedef void(*FuncPtr)(const char *);
FuncPtr ExtDebug;
char errbuf[32];
void Debug(std::string str, int err) {
Log(str + " " + std::to_string(err));
if (err < 0) {
av_strerror(err, errbuf, sizeof(errbuf));
str += errbuf;
}
Log(str);
ExtDebug(str.c_str());
}
void avlog_cb(void *, int level, const char * fmt, va_list vargs) {
static char message[8192];
vsnprintf_s(message, sizeof(message), fmt, vargs);
Log(message);
}
class VideoCapture {
public:
VideoCapture() {
oformat = NULL;
ofctx = NULL;
videoStream = NULL;
videoFrame = NULL;
swsCtx = NULL;
frameCounter = 0;
// Initialize libavcodec
av_register_all();
av_log_set_callback(avlog_cb);
}
~VideoCapture() {
Free();
}
void Init(int width, int height, int fpsrate, int bitrate);
void AddFrame(uint8_t *data);
void Finish();
private:
AVOutputFormat *oformat;
AVFormatContext *ofctx;
AVStream *videoStream;
AVFrame *videoFrame;
AVCodec *codec;
AVCodecContext *cctx;
SwsContext *swsCtx;
int frameCounter;
int fps;
void Free();
void Remux();
};
VIDEOCAPTURE_API VideoCapture* Init(int width, int height, int fps, int bitrate) {
VideoCapture *vc = new VideoCapture();
vc->Init(width, height, fps, bitrate);
return vc;
};
VIDEOCAPTURE_API void AddFrame(uint8_t *data, VideoCapture *vc) {
vc->AddFrame(data);
}
VIDEOCAPTURE_API void Finish(VideoCapture *vc) {
vc->Finish();
}
VIDEOCAPTURE_API void SetDebug(FuncPtr fp) {
ExtDebug = fp;
};
}
我遇到了完全相同的问题(错误 0xc00d36c4)。这个主题和回复帮助我解决了我的问题。解决方案是仅添加此代码:
if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (fctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
//avcodec_parameters_from_context(st->codecpar, cctx); //cut this line
av_dump_format(fctx, 0, filename, 1);
//OPEN FILE + WRITE HEADER
if (avcodec_open2(cctx, pCodec, NULL) < 0) {
cout << "Error avcodec_open2()" << endl; system("pause"); exit(1);
}
avcodec_parameters_from_context(st->codecpar, cctx); //move to here
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fctx->pb, filename, AVIO_FLAG_WRITE) < 0) {
cout << "Error avio_open()" << endl; system("pause"); exit(1);
}
}
当前的解决方案是错误的。这充其量是一种解决方法。它首先将视频编码为 h264,然后再将其重新编码为 mp4。这不是必须的。
真正的解决方案是删除:
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
这是我的完整解决方案:
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
AVFrame* videoFrame = nullptr;
AVCodecContext* cctx = nullptr;
SwsContext* swsCtx = nullptr;
int frameCounter = 0;
AVFormatContext* ofctx = nullptr;
AVOutputFormat* oformat = nullptr;
int fps = 30;
int width = 1920;
int height = 1080;
int bitrate = 2000;
static void pushFrame(uint8_t* data) {
int err;
if (!videoFrame) {
videoFrame = av_frame_alloc();
videoFrame->format = AV_PIX_FMT_YUV420P;
videoFrame->width = cctx->width;
videoFrame->height = cctx->height;
if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
std::cout << "Failed to allocate picture" << err << std::endl;
return;
}
}
if (!swsCtx) {
swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGB24, cctx->width,
cctx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
}
int inLinesize[1] = { 3 * cctx->width };
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t* const*)&data, inLinesize, 0, cctx->height,
videoFrame->data, videoFrame->linesize);
videoFrame->pts = (1.0 / 30.0) * 90000 * (frameCounter++);
std::cout << videoFrame->pts << " " << cctx->time_base.num << " " <<
cctx->time_base.den << " " << frameCounter << std::endl;
if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
std::cout << "Failed to send frame" << err << std::endl;
return;
}
AV_TIME_BASE;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
pkt.flags |= AV_PKT_FLAG_KEY;
if (avcodec_receive_packet(cctx, &pkt) == 0) {
static int counter = 0;
if (counter == 0) {
FILE* fp = fopen("dump_first_frame1.dat", "wb");
fwrite(pkt.data, pkt.size, 1, fp);
fclose(fp);
}
std::cout << "pkt key: " << (pkt.flags & AV_PKT_FLAG_KEY) << " " <<
pkt.size << " " << (counter++) << std::endl;
uint8_t* size = ((uint8_t*)pkt.data);
std::cout << "first: " << (int)size[0] << " " << (int)size[1] <<
" " << (int)size[2] << " " << (int)size[3] << " " << (int)size[4] <<
" " << (int)size[5] << " " << (int)size[6] << " " << (int)size[7] <<
std::endl;
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
}
static void finish() {
//DELAYED FRAMES
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
av_write_trailer(ofctx);
if (!(oformat->flags & AVFMT_NOFILE)) {
int err = avio_close(ofctx->pb);
if (err < 0) {
std::cout << "Failed to close file" << err << std::endl;
}
}
}
static void free() {
if (videoFrame) {
av_frame_free(&videoFrame);
}
if (cctx) {
avcodec_free_context(&cctx);
}
if (ofctx) {
avformat_free_context(ofctx);
}
if (swsCtx) {
sws_freeContext(swsCtx);
}
}
int main(int argc, char* argv[])
{
av_register_all();
avcodec_register_all();
oformat = av_guess_format(nullptr, "test.mp4", nullptr);
if (!oformat)
{
std::cout << "can't create output format" << std::endl;
return -1;
}
//oformat->video_codec = AV_CODEC_ID_H265;
int err = avformat_alloc_output_context2(&ofctx, oformat, nullptr, "test.mp4");
if (err)
{
std::cout << "can't create output context" << std::endl;
return -1;
}
AVCodec* codec = nullptr;
codec = avcodec_find_encoder(oformat->video_codec);
if (!codec)
{
std::cout << "can't create codec" << std::endl;
return -1;
}
AVStream* stream = avformat_new_stream(ofctx, codec);
if (!stream)
{
std::cout << "can't find format" << std::endl;
return -1;
}
cctx = avcodec_alloc_context3(codec);
if (!cctx)
{
std::cout << "can't create codec context" << std::endl;
return -1;
}
stream->codecpar->codec_id = oformat->video_codec;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width = width;
stream->codecpar->height = height;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->codecpar->bit_rate = bitrate * 1000;
avcodec_parameters_to_context(cctx, stream->codecpar);
cctx->time_base = (AVRational){ 1, 1 };
cctx->max_b_frames = 2;
cctx->gop_size = 12;
cctx->framerate = (AVRational){ fps, 1 };
//must remove the following
//cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx, "preset", "ultrafast", 0);
}
else if (stream->codecpar->codec_id == AV_CODEC_ID_H265)
{
av_opt_set(cctx, "preset", "ultrafast", 0);
}
avcodec_parameters_from_context(stream->codecpar, cctx);
if ((err = avcodec_open2(cctx, codec, NULL)) < 0) {
std::cout << "Failed to open codec" << err << std::endl;
return -1;
}
if (!(oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofctx->pb, "test.mp4", AVIO_FLAG_WRITE)) < 0) {
std::cout << "Failed to open file" << err << std::endl;
return -1;
}
}
if ((err = avformat_write_header(ofctx, NULL)) < 0) {
std::cout << "Failed to write header" << err << std::endl;
return -1;
}
av_dump_format(ofctx, 0, "test.mp4", 1);
uint8_t* frameraw = new uint8_t[1920 * 1080 * 4];
memset(frameraw, 222, 1920 * 1080 * 4);
for (int i = 0;i < 60;++i) {
pushFrame(frameraw);
}
delete[] frameraw;
finish();
free();
return 0;
}
我是这样调试问题的:
我先从提问者的解答入手。我比较了编码生成的数据包和remuxing生成的数据包,
在第一次编码期间,第一个数据包错过了 header:
当 remuxing 时,第一个数据包有 header:
然后我注意到了 AV_CODEC_FLAG_GLOBAL_HEADER。从它的名字来看,它似乎写了一个全局 header 并剥离了数据包 header.
所以我删除了它,一切正常。我已经详细说明了我的发现 here。
code is here with makefile: https://github.com/shi-yan/videosamples
我正在尝试使用 FFmpeg 和 C++ 创建 mp4 视频文件,但结果我收到损坏的文件(windows 播放器显示 "Can't play ... 0xc00d36c4")。如果我创建 .h264 文件,它可以用 'ffplay' 播放并通过 CL 成功转换为 mp4。
我的代码:
int main() {
char *filename = "tmp.mp4";
AVOutputFormat *fmt;
AVFormatContext *fctx;
AVCodecContext *cctx;
AVStream *st;
av_register_all();
avcodec_register_all();
//auto detect the output format from the name
fmt = av_guess_format(NULL, filename, NULL);
if (!fmt) {
cout << "Error av_guess_format()" << endl; system("pause"); exit(1);
}
if (avformat_alloc_output_context2(&fctx, fmt, NULL, filename) < 0) {
cout << "Error avformat_alloc_output_context2()" << endl; system("pause"); exit(1);
}
//stream creation + parameters
st = avformat_new_stream(fctx, 0);
if (!st) {
cout << "Error avformat_new_stream()" << endl; system("pause"); exit(1);
}
st->codecpar->codec_id = fmt->video_codec;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->width = 352;
st->codecpar->height = 288;
st->time_base.num = 1;
st->time_base.den = 25;
AVCodec *pCodec = avcodec_find_encoder(st->codecpar->codec_id);
if (!pCodec) {
cout << "Error avcodec_find_encoder()" << endl; system("pause"); exit(1);
}
cctx = avcodec_alloc_context3(pCodec);
if (!cctx) {
cout << "Error avcodec_alloc_context3()" << endl; system("pause"); exit(1);
}
avcodec_parameters_to_context(cctx, st->codecpar);
cctx->bit_rate = 400000;
cctx->width = 352;
cctx->height = 288;
cctx->time_base.num = 1;
cctx->time_base.den = 25;
cctx->gop_size = 12;
cctx->pix_fmt = AV_PIX_FMT_YUV420P;
if (st->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx->priv_data, "preset", "ultrafast", 0);
}
if (fctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avcodec_parameters_from_context(st->codecpar, cctx);
av_dump_format(fctx, 0, filename, 1);
//OPEN FILE + WRITE HEADER
if (avcodec_open2(cctx, pCodec, NULL) < 0) {
cout << "Error avcodec_open2()" << endl; system("pause"); exit(1);
}
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fctx->pb, filename, AVIO_FLAG_WRITE) < 0) {
cout << "Error avio_open()" << endl; system("pause"); exit(1);
}
}
if (avformat_write_header(fctx, NULL) < 0) {
cout << "Error avformat_write_header()" << endl; system("pause"); exit(1);
}
//CREATE DUMMY VIDEO
AVFrame *frame = av_frame_alloc();
frame->format = cctx->pix_fmt;
frame->width = cctx->width;
frame->height = cctx->height;
av_image_alloc(frame->data, frame->linesize, cctx->width, cctx->height, cctx->pix_fmt, 32);
AVPacket pkt;
double video_pts = 0;
for (int i = 0; i < 50; i++) {
video_pts = (double)cctx->time_base.num / cctx->time_base.den * 90 * i;
for (int y = 0; y < cctx->height; y++) {
for (int x = 0; x < cctx->width; x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
if (y < cctx->height / 2 && x < cctx->width / 2) {
/* Cb and Cr */
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
}
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.pts = frame->pts = video_pts;
pkt.data = NULL;
pkt.size = 0;
pkt.stream_index = st->index;
if (avcodec_send_frame(cctx, frame) < 0) {
cout << "Error avcodec_send_frame()" << endl; system("pause"); exit(1);
}
if (avcodec_receive_packet(cctx, &pkt) == 0) {
//cout << "Write frame " << to_string((int) pkt.pts) << endl;
av_interleaved_write_frame(fctx, &pkt);
av_packet_unref(&pkt);
}
}
//DELAYED FRAMES
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
//cout << "-Write frame " << to_string((int)pkt.pts) << endl;
av_interleaved_write_frame(fctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
//FINISH
av_write_trailer(fctx);
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_close(fctx->pb) < 0) {
cout << "Error avio_close()" << endl; system("pause"); exit(1);
}
}
av_frame_free(&frame);
avcodec_free_context(&cctx);
avformat_free_context(fctx);
system("pause");
return 0;
}
程序输出:
Output #0, mp4, to 'tmp.mp4':
Stream #0:0: Video: h264, yuv420p, 352x288, q=2-31, 400 kb/s, 25 tbn
[libx264 @ 0000021c4a995ba0] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 0000021c4a995ba0] profile Constrained Baseline, level 2.0
[libx264 @ 0000021c4a995ba0] 264 - core 152 r2851 ba24899 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=0 ref=1 deblock=0:0:0 analyse=0:0 me=dia subme=0 psy=1 psy_rd=1.00:0.00 mixed_ref=0 me_range=16 chroma_me=1 trellis=0 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=0 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=0 weightp=0 keyint=12 keyint_min=1 scenecut=0 intra_refresh=0 rc=abr mbtree=0 bitrate=400 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=0
[libx264 @ 0000021c4a995ba0] frame I:5 Avg QP: 7.03 size: 9318
[libx264 @ 0000021c4a995ba0] frame P:45 Avg QP: 4.53 size: 4258
[libx264 @ 0000021c4a995ba0] mb I I16..4: 100.0% 0.0% 0.0%
[libx264 @ 0000021c4a995ba0] mb P I16..4: 0.0% 0.0% 0.0% P16..4: 100.0% 0.0% 0.0% 0.0% 0.0% skip: 0.0%
[libx264 @ 0000021c4a995ba0] final ratefactor: 9.11
[libx264 @ 0000021c4a995ba0] coded y,uvDC,uvAC intra: 18.9% 21.8% 14.5% inter: 7.8% 100.0% 15.5%
[libx264 @ 0000021c4a995ba0] i16 v,h,dc,p: 4% 5% 5% 86%
[libx264 @ 0000021c4a995ba0] i8c dc,h,v,p: 2% 9% 6% 82%
[libx264 @ 0000021c4a995ba0] kb/s:264.68
如果我尝试用 'ffplay' 播放 mp4 文件,它会打印:
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000000026bf900] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 352x288, 138953 kb/s): unspecified pixel format
[h264 @ 00000000006c6ae0] non-existing PPS 0 referenced
[h264 @ 00000000006c6ae0] decode_slice_header error
[h264 @ 00000000006c6ae0] no frame!
我花了很多时间都没有成功找到问题,可能是什么原因?
感谢帮助!
您需要填充编解码器上下文额外数据和 extradata_size 字段。
工作解决方案:
#include "VideoCapture.h"
#define VIDEO_TMP_FILE "tmp.h264"
#define FINAL_FILE_NAME "record.mp4"
using namespace std;
void VideoCapture::Init(int width, int height, int fpsrate, int bitrate) {
fps = fpsrate;
int err;
if (!(oformat = av_guess_format(NULL, VIDEO_TMP_FILE, NULL))) {
Debug("Failed to define output format", 0);
return;
}
if ((err = avformat_alloc_output_context2(&ofctx, oformat, NULL, VIDEO_TMP_FILE) < 0)) {
Debug("Failed to allocate output context", err);
Free();
return;
}
if (!(codec = avcodec_find_encoder(oformat->video_codec))) {
Debug("Failed to find encoder", 0);
Free();
return;
}
if (!(videoStream = avformat_new_stream(ofctx, codec))) {
Debug("Failed to create new stream", 0);
Free();
return;
}
if (!(cctx = avcodec_alloc_context3(codec))) {
Debug("Failed to allocate codec context", 0);
Free();
return;
}
videoStream->codecpar->codec_id = oformat->video_codec;
videoStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
videoStream->codecpar->width = width;
videoStream->codecpar->height = height;
videoStream->codecpar->format = AV_PIX_FMT_YUV420P;
videoStream->codecpar->bit_rate = bitrate * 1000;
videoStream->time_base = { 1, fps };
avcodec_parameters_to_context(cctx, videoStream->codecpar);
cctx->time_base = { 1, fps };
cctx->max_b_frames = 2;
cctx->gop_size = 12;
if (videoStream->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx, "preset", "ultrafast", 0);
}
if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avcodec_parameters_from_context(videoStream->codecpar, cctx);
if ((err = avcodec_open2(cctx, codec, NULL)) < 0) {
Debug("Failed to open codec", err);
Free();
return;
}
if (!(oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofctx->pb, VIDEO_TMP_FILE, AVIO_FLAG_WRITE)) < 0) {
Debug("Failed to open file", err);
Free();
return;
}
}
if ((err = avformat_write_header(ofctx, NULL)) < 0) {
Debug("Failed to write header", err);
Free();
return;
}
av_dump_format(ofctx, 0, VIDEO_TMP_FILE, 1);
}
void VideoCapture::AddFrame(uint8_t *data) {
int err;
if (!videoFrame) {
videoFrame = av_frame_alloc();
videoFrame->format = AV_PIX_FMT_YUV420P;
videoFrame->width = cctx->width;
videoFrame->height = cctx->height;
if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
Debug("Failed to allocate picture", err);
return;
}
}
if (!swsCtx) {
swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGB24, cctx->width, cctx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
}
int inLinesize[1] = { 3 * cctx->width };
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t * const *)&data, inLinesize, 0, cctx->height, videoFrame->data, videoFrame->linesize);
videoFrame->pts = frameCounter++;
if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
Debug("Failed to send frame", err);
return;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (avcodec_receive_packet(cctx, &pkt) == 0) {
pkt.flags |= AV_PKT_FLAG_KEY;
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
}
void VideoCapture::Finish() {
//DELAYED FRAMES
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
av_write_trailer(ofctx);
if (!(oformat->flags & AVFMT_NOFILE)) {
int err = avio_close(ofctx->pb);
if (err < 0) {
Debug("Failed to close file", err);
}
}
Free();
Remux();
}
void VideoCapture::Free() {
if (videoFrame) {
av_frame_free(&videoFrame);
}
if (cctx) {
avcodec_free_context(&cctx);
}
if (ofctx) {
avformat_free_context(ofctx);
}
if (swsCtx) {
sws_freeContext(swsCtx);
}
}
void VideoCapture::Remux() {
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
int err;
if ((err = avformat_open_input(&ifmt_ctx, VIDEO_TMP_FILE, 0, 0)) < 0) {
Debug("Failed to open input file for remuxing", err);
goto end;
}
if ((err = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
Debug("Failed to retrieve input stream information", err);
goto end;
}
if ((err = avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, FINAL_FILE_NAME))) {
Debug("Failed to allocate output context", err);
goto end;
}
AVStream *inVideoStream = ifmt_ctx->streams[0];
AVStream *outVideoStream = avformat_new_stream(ofmt_ctx, NULL);
if (!outVideoStream) {
Debug("Failed to allocate output video stream", 0);
goto end;
}
outVideoStream->time_base = { 1, fps };
avcodec_parameters_copy(outVideoStream->codecpar, inVideoStream->codecpar);
outVideoStream->codecpar->codec_tag = 0;
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofmt_ctx->pb, FINAL_FILE_NAME, AVIO_FLAG_WRITE)) < 0) {
Debug("Failed to open output file", err);
goto end;
}
}
if ((err = avformat_write_header(ofmt_ctx, 0)) < 0) {
Debug("Failed to write header to output file", err);
goto end;
}
AVPacket videoPkt;
int ts = 0;
while (true) {
if ((err = av_read_frame(ifmt_ctx, &videoPkt)) < 0) {
break;
}
videoPkt.stream_index = outVideoStream->index;
videoPkt.pts = ts;
videoPkt.dts = ts;
videoPkt.duration = av_rescale_q(videoPkt.duration, inVideoStream->time_base, outVideoStream->time_base);
ts += videoPkt.duration;
videoPkt.pos = -1;
if ((err = av_interleaved_write_frame(ofmt_ctx, &videoPkt)) < 0) {
Debug("Failed to mux packet", err);
av_packet_unref(&videoPkt);
break;
}
av_packet_unref(&videoPkt);
}
av_write_trailer(ofmt_ctx);
end:
if (ifmt_ctx) {
avformat_close_input(&ifmt_ctx);
}
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
avio_closep(&ofmt_ctx->pb);
}
if (ofmt_ctx) {
avformat_free_context(ofmt_ctx);
}
}
头文件:
#define VIDEOCAPTURE_API __declspec(dllexport)
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <cstring>
#include <math.h>
#include <string.h>
#include <algorithm>
#include <string>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavcodec/avfft.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
// libav resample
#include <libavutil/opt.h>
#include <libavutil/common.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/file.h>
// hwaccel
#include "libavcodec/vdpau.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_vdpau.h"
// lib swresample
#include <libswscale/swscale.h>
std::ofstream logFile;
void Log(std::string str) {
logFile.open("Logs.txt", std::ofstream::app);
logFile.write(str.c_str(), str.size());
logFile.close();
}
typedef void(*FuncPtr)(const char *);
FuncPtr ExtDebug;
char errbuf[32];
void Debug(std::string str, int err) {
Log(str + " " + std::to_string(err));
if (err < 0) {
av_strerror(err, errbuf, sizeof(errbuf));
str += errbuf;
}
Log(str);
ExtDebug(str.c_str());
}
void avlog_cb(void *, int level, const char * fmt, va_list vargs) {
static char message[8192];
vsnprintf_s(message, sizeof(message), fmt, vargs);
Log(message);
}
class VideoCapture {
public:
VideoCapture() {
oformat = NULL;
ofctx = NULL;
videoStream = NULL;
videoFrame = NULL;
swsCtx = NULL;
frameCounter = 0;
// Initialize libavcodec
av_register_all();
av_log_set_callback(avlog_cb);
}
~VideoCapture() {
Free();
}
void Init(int width, int height, int fpsrate, int bitrate);
void AddFrame(uint8_t *data);
void Finish();
private:
AVOutputFormat *oformat;
AVFormatContext *ofctx;
AVStream *videoStream;
AVFrame *videoFrame;
AVCodec *codec;
AVCodecContext *cctx;
SwsContext *swsCtx;
int frameCounter;
int fps;
void Free();
void Remux();
};
VIDEOCAPTURE_API VideoCapture* Init(int width, int height, int fps, int bitrate) {
VideoCapture *vc = new VideoCapture();
vc->Init(width, height, fps, bitrate);
return vc;
};
VIDEOCAPTURE_API void AddFrame(uint8_t *data, VideoCapture *vc) {
vc->AddFrame(data);
}
VIDEOCAPTURE_API void Finish(VideoCapture *vc) {
vc->Finish();
}
VIDEOCAPTURE_API void SetDebug(FuncPtr fp) {
ExtDebug = fp;
};
}
我遇到了完全相同的问题(错误 0xc00d36c4)。这个主题和回复帮助我解决了我的问题。解决方案是仅添加此代码:
if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (fctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
//avcodec_parameters_from_context(st->codecpar, cctx); //cut this line
av_dump_format(fctx, 0, filename, 1);
//OPEN FILE + WRITE HEADER
if (avcodec_open2(cctx, pCodec, NULL) < 0) {
cout << "Error avcodec_open2()" << endl; system("pause"); exit(1);
}
avcodec_parameters_from_context(st->codecpar, cctx); //move to here
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fctx->pb, filename, AVIO_FLAG_WRITE) < 0) {
cout << "Error avio_open()" << endl; system("pause"); exit(1);
}
}
当前的解决方案是错误的。这充其量是一种解决方法。它首先将视频编码为 h264,然后再将其重新编码为 mp4。这不是必须的。
真正的解决方案是删除:
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
这是我的完整解决方案:
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
AVFrame* videoFrame = nullptr;
AVCodecContext* cctx = nullptr;
SwsContext* swsCtx = nullptr;
int frameCounter = 0;
AVFormatContext* ofctx = nullptr;
AVOutputFormat* oformat = nullptr;
int fps = 30;
int width = 1920;
int height = 1080;
int bitrate = 2000;
static void pushFrame(uint8_t* data) {
int err;
if (!videoFrame) {
videoFrame = av_frame_alloc();
videoFrame->format = AV_PIX_FMT_YUV420P;
videoFrame->width = cctx->width;
videoFrame->height = cctx->height;
if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
std::cout << "Failed to allocate picture" << err << std::endl;
return;
}
}
if (!swsCtx) {
swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGB24, cctx->width,
cctx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
}
int inLinesize[1] = { 3 * cctx->width };
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t* const*)&data, inLinesize, 0, cctx->height,
videoFrame->data, videoFrame->linesize);
videoFrame->pts = (1.0 / 30.0) * 90000 * (frameCounter++);
std::cout << videoFrame->pts << " " << cctx->time_base.num << " " <<
cctx->time_base.den << " " << frameCounter << std::endl;
if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
std::cout << "Failed to send frame" << err << std::endl;
return;
}
AV_TIME_BASE;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
pkt.flags |= AV_PKT_FLAG_KEY;
if (avcodec_receive_packet(cctx, &pkt) == 0) {
static int counter = 0;
if (counter == 0) {
FILE* fp = fopen("dump_first_frame1.dat", "wb");
fwrite(pkt.data, pkt.size, 1, fp);
fclose(fp);
}
std::cout << "pkt key: " << (pkt.flags & AV_PKT_FLAG_KEY) << " " <<
pkt.size << " " << (counter++) << std::endl;
uint8_t* size = ((uint8_t*)pkt.data);
std::cout << "first: " << (int)size[0] << " " << (int)size[1] <<
" " << (int)size[2] << " " << (int)size[3] << " " << (int)size[4] <<
" " << (int)size[5] << " " << (int)size[6] << " " << (int)size[7] <<
std::endl;
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
}
static void finish() {
//DELAYED FRAMES
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
av_write_trailer(ofctx);
if (!(oformat->flags & AVFMT_NOFILE)) {
int err = avio_close(ofctx->pb);
if (err < 0) {
std::cout << "Failed to close file" << err << std::endl;
}
}
}
static void free() {
if (videoFrame) {
av_frame_free(&videoFrame);
}
if (cctx) {
avcodec_free_context(&cctx);
}
if (ofctx) {
avformat_free_context(ofctx);
}
if (swsCtx) {
sws_freeContext(swsCtx);
}
}
int main(int argc, char* argv[])
{
av_register_all();
avcodec_register_all();
oformat = av_guess_format(nullptr, "test.mp4", nullptr);
if (!oformat)
{
std::cout << "can't create output format" << std::endl;
return -1;
}
//oformat->video_codec = AV_CODEC_ID_H265;
int err = avformat_alloc_output_context2(&ofctx, oformat, nullptr, "test.mp4");
if (err)
{
std::cout << "can't create output context" << std::endl;
return -1;
}
AVCodec* codec = nullptr;
codec = avcodec_find_encoder(oformat->video_codec);
if (!codec)
{
std::cout << "can't create codec" << std::endl;
return -1;
}
AVStream* stream = avformat_new_stream(ofctx, codec);
if (!stream)
{
std::cout << "can't find format" << std::endl;
return -1;
}
cctx = avcodec_alloc_context3(codec);
if (!cctx)
{
std::cout << "can't create codec context" << std::endl;
return -1;
}
stream->codecpar->codec_id = oformat->video_codec;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width = width;
stream->codecpar->height = height;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->codecpar->bit_rate = bitrate * 1000;
avcodec_parameters_to_context(cctx, stream->codecpar);
cctx->time_base = (AVRational){ 1, 1 };
cctx->max_b_frames = 2;
cctx->gop_size = 12;
cctx->framerate = (AVRational){ fps, 1 };
//must remove the following
//cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx, "preset", "ultrafast", 0);
}
else if (stream->codecpar->codec_id == AV_CODEC_ID_H265)
{
av_opt_set(cctx, "preset", "ultrafast", 0);
}
avcodec_parameters_from_context(stream->codecpar, cctx);
if ((err = avcodec_open2(cctx, codec, NULL)) < 0) {
std::cout << "Failed to open codec" << err << std::endl;
return -1;
}
if (!(oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofctx->pb, "test.mp4", AVIO_FLAG_WRITE)) < 0) {
std::cout << "Failed to open file" << err << std::endl;
return -1;
}
}
if ((err = avformat_write_header(ofctx, NULL)) < 0) {
std::cout << "Failed to write header" << err << std::endl;
return -1;
}
av_dump_format(ofctx, 0, "test.mp4", 1);
uint8_t* frameraw = new uint8_t[1920 * 1080 * 4];
memset(frameraw, 222, 1920 * 1080 * 4);
for (int i = 0;i < 60;++i) {
pushFrame(frameraw);
}
delete[] frameraw;
finish();
free();
return 0;
}
我是这样调试问题的:
我先从提问者的解答入手。我比较了编码生成的数据包和remuxing生成的数据包,
在第一次编码期间,第一个数据包错过了 header:
当 remuxing 时,第一个数据包有 header:
然后我注意到了 AV_CODEC_FLAG_GLOBAL_HEADER。从它的名字来看,它似乎写了一个全局 header 并剥离了数据包 header.
所以我删除了它,一切正常。我已经详细说明了我的发现 here。
code is here with makefile: https://github.com/shi-yan/videosamples