FFMPEG API Mp4 H264 Encoding/Muxing - 未指定的像素格式

FFMPEG API Mp4 H264 Encoding/Muxing - unspecified pixel format

我正在使用 ffmpeg 开发一个 C++ 项目。我必须生成一个 h264 编码的 mp4 文件。

我的问题是文件生成了,但是当用 VLC 读取文件时我没有图像,用 ffprobe 分析它给我(在下面记录)错误:

unspecified pixel format

ffprobe version N-93020-g3224d6691c Copyright (c) 2007-2019 the FFmpeg developers
  built with gcc 8.2.1 (GCC) 20181201
  configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
  libavutil      56. 26.100 / 56. 26.100
  libavcodec     58. 44.100 / 58. 44.100
  libavformat    58. 26.100 / 58. 26.100
  libavdevice    58.  6.101 / 58.  6.101
  libavfilter     7. 48.100 /  7. 48.100
  libswscale      5.  4.100 /  5.  4.100
  libswresample   3.  4.100 /  3.  4.100
  libpostproc    55.  4.100 / 55.  4.100
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
...
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] decoding for stream 0 failed
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'C:\Users\Fabrice\Desktop\video\Test.mp4':
  Metadata:
    major_brand     : isom
    minor_version   : 512
    compatible_brands: isomiso2avc1mp41
    encoder         : Lavf58.26.100
  Duration: 00:00:09.00, start: 0.000000, bitrate: 323 kb/s
    Stream #0:0(und): Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s, 25.11 fps, 25 tbr, 12800 tbn, 25600 tbc (default)
    Metadata:
      handler_name    : VideoHandler

这是我用来生成我的 mp4 文件的代码,它基于 ffmpeg 的样本(参见:FFMPEG Muxing sample)。我试图在不使用不推荐使用的功能的情况下对其进行调整。它使用 webm/vp8 编码工作,但不使用 mp4/h264.

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
#include <libavutil/error.h> 
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")

/* 10 seconds stream duration */
#define STREAM_DURATION   10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */

//#pragma warning(disable : 4996) // TODO: remove

static int sws_flags = SWS_BICUBIC;

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *formatContext, AVCodec **codec, enum AVCodecID codecId, AVCodecContext **codecCtx)
{
    AVStream *stream;

    // Get the encoder codec
    *codec = avcodec_find_encoder(codecId);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
            avcodec_get_name(codecId));
        exit(1);
    }

    // Get the stream for codec
    stream = avformat_new_stream(formatContext, *codec);
    if (!stream) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    stream->id = formatContext->nb_streams - 1;

    (*codecCtx) = avcodec_alloc_context3(*codec);

    switch ((*codec)->type) {
    case AVMEDIA_TYPE_VIDEO:
        stream->codecpar->codec_id = codecId;
        stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
        stream->codecpar->bit_rate = 400000;
        stream->codecpar->width = 352;
        stream->codecpar->height = 288;
        stream->codecpar->format = STREAM_PIX_FMT;
        stream->time_base = { 1, STREAM_FRAME_RATE };

        avcodec_parameters_to_context((*codecCtx), stream->codecpar);
        (*codecCtx)->gop_size = 12; /* emit one intra frame every twelve frames at most */
        (*codecCtx)->max_b_frames = 2;
        (*codecCtx)->time_base = { 1, STREAM_FRAME_RATE };
        if ((*codecCtx)->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
            /* Needed to avoid using macroblocks in which some coeffs overflow.
             * This does not happen with normal video, it just happens here as
             * the motion of the chroma plane does not match the luma plane. */
            (*codecCtx)->mb_decision = 2;
        }
        break;

    default:
        break;
    }
    
    //if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
    //  av_opt_set(codecCtx, "preset", "ultrafast", 0);
    //}
    //(*codecCtx)->flags |= AV_CODEC_FLAG_LOW_DELAY;

    /* Some formats want stream headers to be separate. */
    if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
        (*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


    int ret = avcodec_parameters_from_context(stream->codecpar, (*codecCtx));
    if (ret < 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "avcodec_parameters_from_context returned (%d) - %s", ret, error);
        return false;
    }

    return stream;
}

/**************************************************************/
/* video output */

static AVFrame *frame_video;
static int frame_count;

static void open_video(AVCodec *codec, AVStream *stream, AVCodecContext *codecCtx)
{
    int ret;

    /* open the codec */
    ret = avcodec_open2(codecCtx, codec, NULL);
    if (ret < 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Could not open video codec: %s\n", error);
        exit(1);
    }

    /* allocate and init a re-usable frame */
    frame_video = av_frame_alloc();
    if (!frame_video) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    frame_video->format = codecCtx->pix_fmt;
    frame_video->width = codecCtx->width;
    frame_video->height = codecCtx->height;

    ret = av_frame_get_buffer(frame_video, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate the video frame data\n");
        exit(1);
    }
}

/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
    int x, y, i;

    i = frame_index;

    /* Y */
    for (y = 0; y < height; y++)
        for (x = 0; x < width; x++)
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

    /* Cb and Cr */
    for (y = 0; y < height / 2; y++) {
        for (x = 0; x < width / 2; x++) {
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
        }
    }
}

int timestamp = 0;
static void write_video_frame(AVFormatContext *formatContext, AVStream *stream, AVCodecContext *codecCtx)
{
    int ret;
    static struct SwsContext *sws_ctx;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* No more frames to compress. The codec has a latency of a few
         * frames if using B-frames, so we get the last frames by
         * passing the same picture again. */
    }
    else {
        if (codecCtx->pix_fmt != AV_PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (!sws_ctx) {
                sws_ctx = sws_getContext(codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P,
                    codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
                    sws_flags, NULL, NULL, NULL);
                if (!sws_ctx) {
                    fprintf(stderr, "Could not initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
            sws_scale(sws_ctx, (const uint8_t * const *)frame_video->data, frame_video->linesize,
                0, codecCtx->height, frame_video->data, frame_video->linesize);
        }
        else {
            fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
        }
    }

    frame_video->format = AV_PIX_FMT_YUV420P;
    frame_video->width = codecCtx->width;
    frame_video->height = codecCtx->height;

    if (formatContext->oformat->flags & 0x0020) {
        /* Raw video case - directly store the picture in the packet */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index = stream->index;
        pkt.data = frame_video->data[0];
        pkt.size = sizeof(AVPicture);

        ret = av_interleaved_write_frame(formatContext, &pkt);
    }
    else {
        AVPacket pkt = { 0 };
        av_init_packet(&pkt);

        /* encode the image */
        fprintf(stderr, "\nFrame type : %c\n", av_get_picture_type_char(frame_video->pict_type));
        fprintf(stderr, "Frame pts: %lld, \n", frame_video->pts);
        fprintf(stderr, "Codec timebase: %d/%d\n", codecCtx->time_base.num, codecCtx->time_base.den);
        fprintf(stderr, "Stream timebase: %d/%d\n", stream->time_base.num, stream->time_base.den);
        fprintf(stderr, "Resacale: %lld, \n\n", av_rescale_q(1, codecCtx->time_base, stream->time_base));
        ret = avcodec_send_frame(codecCtx, frame_video);
        if (ret < 0) {
            char error[255];
            av_strerror(ret, error, 255);
            fprintf(stderr, "Error encoding video frame: %s\n", error);
            exit(1);
        }
        /* If size is zero, it means the image was buffered. */
        ret = avcodec_receive_packet(codecCtx, &pkt);
        if (!ret && pkt.size) {
            pkt.stream_index = stream->index;
            fprintf(stderr, "Packet flags : %d\n", pkt.flags);
            fprintf(stderr, "Packet pts: %lld\n", pkt.pts);
            fprintf(stderr, "Packet dts: %lld\n", pkt.dts);
            fprintf(stderr, "Packet duration: %lld\n", pkt.duration);
            fprintf(stderr, "Packet pos: %lld\n\n", pkt.pos);
            
            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(formatContext, &pkt);
        }
        else {
            ret = 0;
        }
    }
    if (ret != 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Error while writing video frame: %s\n", error);
        exit(1);
    }
    frame_count++;
}

static void close_video(AVFormatContext *oc, AVStream *st)
{
    av_free(frame_video->data[0]);
    av_free(frame_video);
}

/**************************************************************/
/* media file output */

int main(int argc, char **argv)
{
    // The outputed media
    char filename[100];
    const char *mediaFormat = "mp4"; AVCodecID mediaVideoCodec = AV_CODEC_ID_H264;
    //const char *mediaFormat="webm"; AVCodecID mediaVideoCodec = AV_CODEC_ID_VP8;
    AVOutputFormat *formatOut;
    AVFormatContext *formatCtx;

    // The video stream
    AVStream *stream_video;
    AVCodec *codec_video = nullptr;
    AVCodecContext *codecCtx_video = nullptr;
    double time_video = 0;

    // Return code
    int ret;

    strcpy_s(filename, "C:\Test.");
    strcat_s(filename, mediaFormat);

    // allocate the output media context
    avformat_alloc_output_context2(&formatCtx, NULL, NULL, filename);
    if (!formatCtx) {
        return 1;
    }
    formatOut = formatCtx->oformat;

    // Add the video stream using H264 codec
    stream_video = NULL;
    stream_video = add_stream(formatCtx, &codec_video, mediaVideoCodec, &codecCtx_video);

    // Open video codec and allocate the necessary encode buffers
    if (stream_video)
        open_video(codec_video, stream_video, codecCtx_video);

    av_dump_format(formatCtx, 0, filename, 1);

    // Open the output media file, if needed
    if (!(formatOut->flags & AVFMT_NOFILE)) {
        ret = avio_open(&formatCtx->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            char error[255];
            av_strerror(ret, error, 255);
            fprintf(stderr, "Could not open '%s': %s\n", filename, error);
            return 1;
        }
    }

    // Write media header
    ret = avformat_write_header(formatCtx, NULL);
    if (ret < 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Error occurred when opening output file: %s\n", error);
        return 1;
    }

    if (frame_video)
        frame_video->pts = 0;
    for (;;) {
        // Compute video time from last added video frame
        time_video = ((double)frame_video->pts) * av_q2d(stream_video->time_base);

        // Stop media if enough time
        if (!stream_video || time_video >= STREAM_DURATION)
            break;

        // Add a video frame
        write_video_frame(formatCtx, stream_video, codecCtx_video);

        // Increase frame pts according to time base
        frame_video->pts += av_rescale_q(1, codecCtx_video->time_base, stream_video->time_base);
    }

    // Write media trailer
    av_write_trailer(formatCtx);

    /* Close each codec. */
    if (stream_video)
        close_video(formatCtx, stream_video);

    if (!(formatOut->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(formatCtx->pb);

    /* free the stream */
    avformat_free_context(formatCtx);

    return 0;
}

我错过了什么?哪个部分给我这个错误?

我必须评论这些行才能使其正常工作。

if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
    (*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

这是经过一些更正的完整代码:

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
#include <libavutil/error.h> 
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")

/* 10 seconds stream duration */
#define STREAM_DURATION   100.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */

//#pragma warning(disable : 4996) // TODO: remove

static int sws_flags = SWS_BICUBIC;

static int __WritePacket(void* opaque, uint8_t* buf, int buf_size)
{
    FILE *f = (FILE *)opaque;
    
    fprintf(stderr, "writen : %d\n", fwrite(buf, sizeof(uint8_t), buf_size, f));
    fflush(f);

    return buf_size;
}

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *formatContext, AVCodec **codec, enum AVCodecID codecId, AVCodecContext **codecCtx)
{
    AVStream *stream;

    // Get the encoder codec
    *codec = avcodec_find_encoder(codecId);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
            avcodec_get_name(codecId));
        exit(1);
    }

    // Get the stream for codec
    stream = avformat_new_stream(formatContext, *codec);
    if (!stream) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    stream->id = formatContext->nb_streams - 1;

    (*codecCtx) = avcodec_alloc_context3(*codec);

    switch ((*codec)->type) {
    case AVMEDIA_TYPE_VIDEO:
        stream->codecpar->codec_id = codecId;
        stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
        stream->codecpar->bit_rate = 400000;
        stream->codecpar->width = 352;
        stream->codecpar->height = 288;
        stream->codecpar->format = STREAM_PIX_FMT;
        stream->codecpar->codec_tag = 0x31637661;
        stream->codecpar->video_delay = 0;
        stream->time_base = { 1, STREAM_FRAME_RATE };

        avcodec_parameters_to_context((*codecCtx), stream->codecpar);
        (*codecCtx)->gop_size = 12; /* emit one intra frame every twelve frames at most */
        (*codecCtx)->max_b_frames = 2;
        (*codecCtx)->time_base = { 1, STREAM_FRAME_RATE };
        (*codecCtx)->framerate = { STREAM_FRAME_RATE, 1 };
        (*codecCtx)->pix_fmt = STREAM_PIX_FMT;
        if ((*codecCtx)->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
            /* Needed to avoid using macroblocks in which some coeffs overflow.
             * This does not happen with normal video, it just happens here as
             * the motion of the chroma plane does not match the luma plane. */
            (*codecCtx)->mb_decision = 2;
        }
        break;

    default:
        break;
    }

    /* Some formats want stream headers to be separate. */
    // Setting this option make the video stream not readable.
//  if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
//      (*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


    int ret = avcodec_parameters_from_context(stream->codecpar, (*codecCtx));
    if (ret < 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "avcodec_parameters_from_context returned (%d) - %s", ret, error);
        return false;
    }

    return stream;
}

/**************************************************************/
/* video output */

static AVFrame *frame_video;
static int frame_count;

static void open_video(AVCodec *codec, AVStream *stream, AVCodecContext *codecCtx)
{
    int ret;

    /* open the codec */
    ret = avcodec_open2(codecCtx, codec, NULL);
    if (ret < 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Could not open video codec: %s\n", error);
        exit(1);
    }

    /* allocate and init a re-usable frame */
    frame_video = av_frame_alloc();
    if (!frame_video) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    frame_video->format = codecCtx->pix_fmt;
    frame_video->width = codecCtx->width;
    frame_video->height = codecCtx->height;

    ret = av_frame_get_buffer(frame_video, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate the video frame data\n");
        exit(1);
    }
}

/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
    int x, y, i;

    i = frame_index;

    /* Y */
    for (y = 0; y < height; y++)
        for (x = 0; x < width; x++)
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

    /* Cb and Cr */
    for (y = 0; y < height / 2; y++) {
        for (x = 0; x < width / 2; x++) {
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
        }
    }
}

int timestamp = 0;
static void write_video_frame(AVFormatContext *formatContext, AVStream *stream, AVCodecContext *codecCtx)
{
    int ret;
    static struct SwsContext *sws_ctx;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* No more frames to compress. The codec has a latency of a few
         * frames if using B-frames, so we get the last frames by
         * passing the same picture again. */
    }
    else {
        if (codecCtx->pix_fmt != STREAM_PIX_FMT) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (!sws_ctx) {
                sws_ctx = sws_getContext(codecCtx->width, codecCtx->height, STREAM_PIX_FMT,
                    codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
                    sws_flags, NULL, NULL, NULL);
                if (!sws_ctx) {
                    fprintf(stderr, "Could not initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
            sws_scale(sws_ctx, (const uint8_t * const *)frame_video->data, frame_video->linesize,
                0, codecCtx->height, frame_video->data, frame_video->linesize);
        }
        else {
            fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
        }
    }

    frame_video->format = STREAM_PIX_FMT;
    frame_video->width = codecCtx->width;
    frame_video->height = codecCtx->height;

    if (formatContext->oformat->flags & 0x0020) {
        /* Raw video case - directly store the picture in the packet */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index = stream->index;
        pkt.data = frame_video->data[0];
        pkt.size = sizeof(AVPicture);

        ret = av_interleaved_write_frame(formatContext, &pkt);
    }
    else {
        AVPacket packet = { 0 };
        av_init_packet(&packet);

        /* encode the image */
        fprintf(stderr, "\nFrame type : %c\n", av_get_picture_type_char(frame_video->pict_type));
        fprintf(stderr, "Frame pts: %lld, \n", frame_video->pts);
        fprintf(stderr, "Codec timebase: %d/%d\n", codecCtx->time_base.num, codecCtx->time_base.den);
        fprintf(stderr, "Stream timebase: %d/%d\n", stream->time_base.num, stream->time_base.den);
        fprintf(stderr, "Resacale: %lld, \n\n", av_rescale_q(1, codecCtx->time_base, stream->time_base));
        ret = avcodec_send_frame(codecCtx, frame_video);
        if (ret < 0) {
            char error[255];
            av_strerror(ret, error, 255);
            fprintf(stderr, "Error encoding video frame: %s\n", error);
            exit(1);
        }
        /* If size is zero, it means the image was buffered. */
        ret = avcodec_receive_packet(codecCtx, &packet);
        if (!ret && packet.size) {
            packet.stream_index = stream->index;
            fprintf(stderr, "Packet flags : %d\n", packet.flags);
            fprintf(stderr, "Packet pts: %lld\n", packet.pts);
            fprintf(stderr, "Packet dts: %lld\n", packet.dts);
            fprintf(stderr, "Packet duration: %lld\n", packet.duration);
            fprintf(stderr, "Packet pos: %lld\n\n", packet.pos);

            FILE *f = nullptr;
            fopen_s(&f, "C:\Users\Fabrice\Desktop\video\Test.h264", "wb");
            fwrite(packet.data, sizeof(uint8_t), packet.size, f);
            fclose(f);

            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(formatContext, &packet);
        }
        else {
            ret = 0;
        }
    }
    if (ret != 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Error while writing video frame: %s\n", error);
        exit(1);
    }
    frame_count++;
}

static void close_video(AVFormatContext *oc, AVStream *st)
{
    av_free(frame_video->data[0]);
    av_free(frame_video);
}

/**************************************************************/
/* media file output */

int main(int argc, char **argv)
{

    // The outputed media
    char filename[100];
    const char *mediaFormat = "mp4"; AVCodecID mediaVideoCodec = AV_CODEC_ID_H264;
    //const char *mediaFormat="webm"; AVCodecID mediaVideoCodec = AV_CODEC_ID_VP8;
    AVOutputFormat *formatOut;
    AVFormatContext *formatCtx;

    // The video stream
    AVStream *stream_video;
    AVCodec *codec_video = nullptr;
    AVCodecContext *codecCtx_video = nullptr;
    double time_video = 0;

    // Return code
    int ret;

    strcpy_s(filename, "C:\Users\Fabrice\Desktop\video\Test.");
    strcat_s(filename, mediaFormat);

    remove("C:\Users\Fabrice\Desktop\video\Test.h264");
    remove(filename);

    // allocate the output media context
    avformat_alloc_output_context2(&formatCtx, NULL, NULL, filename);
    if (!formatCtx) {
        return 1;
    }
    formatOut = formatCtx->oformat;

    // Add the video stream using H264 codec
    stream_video = NULL;
    stream_video = add_stream(formatCtx, &codec_video, mediaVideoCodec, &codecCtx_video);

    // Open video codec and allocate the necessary encode buffers
    if (stream_video)
        open_video(codec_video, stream_video, codecCtx_video);

    av_dump_format(formatCtx, 0, filename, 1);

    /*// Open the output media file, if needed
    if (!(formatOut->flags & AVFMT_NOFILE)) {
        ret = avio_open(&formatCtx->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            char error[255];
            av_strerror(ret, error, 255);
            fprintf(stderr, "Could not open '%s': %s\n", filename, error);
            return 1;
        }
    }*/

    uint8_t *ioBuffer = (uint8_t*)av_malloc(4096);
    if (!ioBuffer) {
        return 1;
    }
    
    FILE *f = nullptr;
    fopen_s(&f, filename, "wb");

    AVIOContext *ioCtx = avio_alloc_context(ioBuffer, 4096, 1, f, NULL, __WritePacket, NULL);
    if (!ioCtx) {
        return 1;
    }
    formatCtx->pb = ioCtx;
    formatCtx->flush_packets = 1;

    fprintf(stderr, "Stream timebase: %d/%d\n", stream_video->time_base.num, stream_video->time_base.den);

    // Fragmented mp4
    AVDictionary* opts = NULL;
    av_dict_set(&opts, "movflags", "frag_keyframe+empty_moov", 0);

    // Write media header
    ret = avformat_write_header(formatCtx, &opts);
    if (ret < 0) {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Error occurred when opening output file: %s\n", error);
        return 1;
    }
    fprintf(stderr, "Stream timebase: %d/%d\n", stream_video->time_base.num, stream_video->time_base.den);

    if (frame_video)
        frame_video->pts = 0;
    for (;;) {
        // Compute video time from last added video frame
        time_video = ((double)frame_video->pts) * av_q2d(stream_video->time_base);

        // Stop media if enough time
        if (!stream_video || time_video >= STREAM_DURATION)
            break;

        // Add a video frame
        write_video_frame(formatCtx, stream_video, codecCtx_video);

        // Increase frame pts according to time base
        frame_video->pts += av_rescale_q(1, codecCtx_video->time_base, stream_video->time_base);
    }

    // Write media trailer
    av_write_trailer(formatCtx);

    /* Close each codec. */
    if (stream_video)
        close_video(formatCtx, stream_video);

    /* free the stream */
    avformat_free_context(formatCtx);

    return 0;
}