"moov atom not found" 使用 av_interleaved_write_frame 而不是 avio_write

"moov atom not found" when using av_interleaved_write_frame but not avio_write

我正在尝试组合一个 class,它可以获取任意帧并使用 ffmpeg 3.3.3 API 从中构建视频。我一直在努力寻找一个很好的例子,因为这些例子似乎仍然使用过时的函数,所以我尝试使用 headers 中的文档并参考一些 github 似乎正在使用新版本的回购协议。

如果我使用 av_interleaved_write_frame 将编码数据包写入输出,则 ffprobe 输出以下内容:

[mov,mp4,m4a,3gp,3g2,mj2 @ 0000000002760120] moov atom not found0
X:\Diagnostics.mp4: Invalid data found when processing input

ffplay无法播放使用该方法生成的文件

如果我改为调用 avio_write,则 ffprobe 会输出:

Input #0, h264, from 'X:\Diagnostics.mp4':
  Duration: N/A, bitrate: N/A
    Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc

ffplay 可以大部分 播放这个文件,直到它接近尾声,当它输出:

Input #0, h264, from 'X:\Diagnostics.mp4':    0KB sq=    0B f=0/0
  Duration: N/A, bitrate: N/A
    Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc
[h264 @ 000000000254ef80] error while decoding MB 31 22, bytestream -65
[h264 @ 000000000254ef80] concealing 102 DC, 102 AC, 102 MV errors in I frame
    nan M-V:    nan fd=   1 aq=    0KB vq=    0KB sq=    0B f=0/0

VLC 无法通过这两种方法播放文件。第二种方法的文件显示单个黑框,然后隐藏视频输出。第一个不显示任何内容。他们都没有给出视频时长。

有人知道这里发生了什么吗?我假设我的解决方案接近工作,因为我收到了大量有效帧。

代码:

void main()
{
    OutputStream Stream( "Output.mp4", 672, 380, 25, true );
    Stream.Initialize();

    int i = 100;
    while( i-- )
    {
        //... Generate a frame

        Stream.WriteFrame( Frame );
    }
    Stream.CloseFile();
}

OutputStream::OutputStream( const std::string& Path, unsigned int Width, unsigned int Height, int Framerate, bool IsBGR )
: Stream()
, FrameIndex( 0 )
{
    auto& ID = *m_InternalData;

    ID.Path = Path;

    ID.Width = Width;
    ID.Height= Height;
    ID.Framerate.num = Framerate;
    ID.Framerate.den = 1;

    ID.PixelFormat = IsBGR ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24;
    ID.CodecID = AV_CODEC_ID_H264;
    ID.CodecTag = 0;

    ID.AspectRatio.num = 1;
    ID.AspectRatio.den = 1;
}

CameraStreamError OutputStream::Initialize()
{
    av_log_set_callback( &InputStream::LogCallback );
    av_register_all();
    avformat_network_init();

    auto& ID = *m_InternalData;

    av_init_packet( &ID.Packet );

    int Result = avformat_alloc_output_context2( &ID.FormatContext, nullptr, nullptr, ID.Path.c_str() );
    if( Result < 0 || !ID.FormatContext )
    {
        STREAM_ERROR( UnknownError );
    }

    AVCodec* Encoder = avcodec_find_encoder( ID.CodecID );

    if( !Encoder )
    {
        STREAM_ERROR( NoH264Support );
    }

    AVStream* OutStream = avformat_new_stream( ID.FormatContext, Encoder );
    if( !OutStream )
    {
        STREAM_ERROR( UnknownError );
    }

    ID.CodecContext = avcodec_alloc_context3( Encoder );
    if( !ID.CodecContext )
    {
        STREAM_ERROR( NoH264Support );
    }

    ID.CodecContext->time_base = av_inv_q(ID.Framerate);

    {
        AVCodecParameters* CodecParams = OutStream->codecpar;

        CodecParams->width = ID.Width;
        CodecParams->height = ID.Height;
        CodecParams->format = AV_PIX_FMT_YUV420P;
        CodecParams->codec_id = ID.CodecID;
        CodecParams->codec_type = AVMEDIA_TYPE_VIDEO;
        CodecParams->profile = FF_PROFILE_H264_MAIN;
        CodecParams->level = 40;

        Result = avcodec_parameters_to_context( ID.CodecContext, CodecParams );
        if( Result < 0 )
        {
            STREAM_ERROR( EncoderCreationError );
        }
    }

    if( ID.IsVideo )
    {
        ID.CodecContext->width = ID.Width;
        ID.CodecContext->height = ID.Height;
        ID.CodecContext->sample_aspect_ratio = ID.AspectRatio;
        ID.CodecContext->time_base = av_inv_q(ID.Framerate);

        if( Encoder->pix_fmts )
        {
            ID.CodecContext->pix_fmt = Encoder->pix_fmts[0];
        }
        else
        {
            ID.CodecContext->pix_fmt = ID.PixelFormat;
        }
    }
    //Snip

    Result = avcodec_open2( ID.CodecContext, Encoder, nullptr );
    if( Result < 0 )
    {
        STREAM_ERROR( EncoderCreationError );
    }

    Result = avcodec_parameters_from_context( OutStream->codecpar, ID.CodecContext );
    if( Result < 0 )
    {
        STREAM_ERROR( EncoderCreationError );
    }

    if( ID.FormatContext->oformat->flags & AVFMT_GLOBALHEADER )
    {
        ID.CodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }

    OutStream->time_base = ID.CodecContext->time_base;
    OutStream->avg_frame_rate= av_inv_q(OutStream->time_base);

    if( !( ID.FormatContext->oformat->flags & AVFMT_NOFILE ) )
    {
        Result = avio_open( &ID.FormatContext->pb, ID.Path.c_str(), AVIO_FLAG_WRITE );
        if( Result < 0 )
        {
            STREAM_ERROR( FileNotWriteable );
        }
    }

    Result = avformat_write_header( ID.FormatContext, nullptr );
    if( Result < 0 )
    {
        STREAM_ERROR( WriteFailed );
    }

    ID.Output = std::make_unique<FFMPEG::Frame>( ID.CodecContext->width, ID.CodecContext->height, ID.CodecContext->pix_fmt );

    ID.ConversionContext = sws_getCachedContext(
        ID.ConversionContext,
        ID.Width,
        ID.Height,
        ID.PixelFormat,
        ID.CodecContext->width,
        ID.CodecContext->height,
        ID.CodecContext->pix_fmt,
        SWS_BICUBIC,
        NULL,
        NULL,
        NULL );

    return CameraStreamError::Success;
}

CameraStreamError OutputStream::WriteFrame( FFMPEG::Frame* Frame )
{
    auto& ID = *m_InternalData;

    ID.Output->Prepare();

    int OutputSliceSize = sws_scale( m_InternalData->ConversionContext, Frame->GetFrame()->data, Frame->GetFrame()->linesize, 0, Frame->GetHeight(), ID.Output->GetFrame()->data, ID.Output->GetFrame()->linesize );

    ID.Output->GetFrame()->pts = ID.CodecContext->frame_number;

    int Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
    if( Result == AVERROR(EAGAIN) )
    {
        CameraStreamError ResultErr = SendAll();
        if( ResultErr != CameraStreamError::Success )
        {
            return ResultErr;
        }
        Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
    }

    if( Result == 0 )
    {
        CameraStreamError ResultErr = SendAll();
        if( ResultErr != CameraStreamError::Success )
        {
            return ResultErr;
        }
    }

    FrameIndex++;

    return CameraStreamError::Success;
}

CameraStreamError OutputStream::SendAll( void )
{
    auto& ID = *m_InternalData;

    int Result;
    do 
    {
        AVPacket TempPacket = {};
        av_init_packet( &TempPacket );

        Result = avcodec_receive_packet( GetData().CodecContext, &TempPacket );
        if( Result == 0 )
        {
            av_packet_rescale_ts( &TempPacket, ID.CodecContext->time_base, ID.FormatContext->streams[0]->time_base );

            TempPacket.stream_index = ID.FormatContext->streams[0]->index;

            //avio_write( ID.FormatContext->pb, TempPacket.data, TempPacket.size );
            Result = av_interleaved_write_frame( ID.FormatContext, &TempPacket );
            if( Result < 0 )
            {
                STREAM_ERROR( WriteFailed );
            }

            av_packet_unref( &TempPacket );
        }
        else if( Result != AVERROR(EAGAIN) )
        {
            continue;
        }
        else if( Result != AVERROR_EOF )
        {
            break;
        }
        else if( Result < 0 )
        {
            STREAM_ERROR( WriteFailed );
        }
    } while ( Result == 0);

    return CameraStreamError::Success;
}

CameraStreamError OutputStream::CloseFile()
{
    auto& ID = *m_InternalData;

    while( true )
    {
        //Flush
        int Result = avcodec_send_frame( ID.CodecContext, nullptr );
        if( Result == 0 )
        {
            CameraStreamError StrError = SendAll();
            if( StrError != CameraStreamError::Success )
            {
                return StrError;
            }
        }
        else if( Result == AVERROR_EOF )
        {
            break;
        }
        else
        {
            STREAM_ERROR( WriteFailed );
        }
    }

    int Result = av_write_trailer( ID.FormatContext );
    if( Result < 0 )
    {
        STREAM_ERROR( WriteFailed );
    }

    if( !(ID.FormatContext->oformat->flags& AVFMT_NOFILE) )
    {
        Result = avio_close( ID.FormatContext->pb );
        if( Result < 0 )
        {
            STREAM_ERROR( WriteFailed );
        }
    }

    return CameraStreamError::Success;
}

请注意,我简化了一些事情并内联了一些其他地方的内容。我还删除了所有关闭代码,因为文件关闭后发生的任何事情都是无关紧要的。

这里有完整的回购:https://github.com/IanNorris/Witness 如果你克隆这个问题是 'Diagnostics' 输出,输出文件是好的。有两个到 X:.

的硬编码路径

您的 avio_write() 文件不是 MP4 文件,它只是按顺序压缩的 H.264 数据包,也称为 AnnexB H.264。

要使用av_interleaved_frame_write()向容器中写入文件,您还需要在所有压缩的video/audio数据包之后调用avformat_write_header() before and av_write_trailer()。否则,该文件将不会包含全局 headers(例如 MP4 中的 moov 块),并且不会被外部应用程序识别为有效文件,正如此处的错误确实表明的那样。

另请参阅 muxing documentation 中更详细的说明。