如何从 GL_RGB 转换为 AVFrame

How to convert from GL_RGB to AVFrame

对于我的项目,我需要将 glReadPixels 生成的 RGB (GL_RGB) 图像转换为 AVFrame。我用谷歌搜索了一下,只找到了相反的例子。但在这种情况下,我需要从 GL_RGBAVFrame

这是我的代码:

这是我设置编解码器的方法:

/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(AVCodecID codec_id, int fps, int width, int height) {
    const AVCodec *codec;
    int ret;
    codec = avcodec_find_encoder(codec_id);
    if (!codec ) {
        std::cerr << "Codec not found" << std::endl;
        exit(1);
    }
    c = avcodec_alloc_context3(codec);
    if (!c) {
        std::cerr << "Could not allocate video codec context" << std::endl;
        exit(1);
    }
    c->bit_rate = 400000;
    c->width = width;
    c->height = height;
    c->time_base.num = 1;
    c->time_base.den = fps;
    c->keyint_min = 600;
    c->pix_fmt = AV_PIX_FMT_YUV420P;
    if (avcodec_open2(c, codec, NULL) < 0) {
        std::cerr << "Could not open codec" << std::endl;
        exit(1);
    }
    frame = av_frame_alloc();
    if (!frame) {
        std::cerr << "Could not allocate video frame" << std::endl;
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
    if (ret < 0) {
        std::cerr << "Could not allocate raw picture buffer" << std::endl;
        exit(1);
    }
}

获取像素并设置新帧:

BYTE* pixels = new BYTE[3 * DEFAULT_MONITOR.maxResolution.width * DEFAULT_MONITOR.maxResolution.height];

glReadPixels(0, 0, DEFAULT_MONITOR.maxResolution.width, DEFAULT_MONITOR.maxResolution.height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
screenSrc->setNextFrame(pixels, DEFAULT_MONITOR.maxResolution.width, DEFAULT_MONITOR.maxResolution.height);

以及我的转换函数:

static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
    const int in_linesize[1] = { 3 * c->width };
    sws_context = sws_getCachedContext(sws_context,
            c->width, c->height, AV_PIX_FMT_RGB24,
            c->width, c->height, AV_PIX_FMT_YUV420P,
            0, 0, 0, 0);
    sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
            c->height, frame->data, frame->linesize);
}

所有代码都可以找到here Here 是导致分段错误的行。

不幸的是,该函数给我一个分段错误。你知道如何解决这个问题吗?

sws_scale的第二个参数是一个指针数组:const uint8_t *const srcSlice[]

不是指针转换:(const uint8_t * const *)&rgb,而是将 rgb 指针放在指针数组中:

const uint8_t* const src_data[] = { rgb };

sws_scale(sws_context, src_data, in_linesize, 0,
            c->height, frame->data, frame->linesize);    

将 RGB 转换为 YUV420P 的示例:

#include <stdio.h>
#include <string.h>
#include <stdint.h>

extern "C"
{
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
}


int main()
{
    //Use FFmpeg for building raw RGB image (used as input).
    //ffmpeg -y -f lavfi -i testsrc=size=192x108:rate=1 -vcodec rawvideo -pix_fmt rgb24 -frames 1 -f rawvideo rgb_image.bin
    
    const int width = 192;
    const int height = 108;
    unsigned char* rgb_in = new uint8_t[width * height * 3];

    const enum AVPixelFormat out_pix_fmt = AV_PIX_FMT_YUV420P;

    //Read input image for binary file (for testing)
    ////////////////////////////////////////////////////////////////////////////
    FILE* f = fopen("rgb_image.bin", "rb"); //For using fopen in Visual Studio, define: _CRT_SECURE_NO_WARNINGS (or use fopen_s).
    fread(rgb_in, 1, width * height * 3, f);
    fclose(f);
    ////////////////////////////////////////////////////////////////////////////    

    //Allocate output buffers:
    ////////////////////////////////////////////////////////////////////////////
    // YUV420p data is separated in three planes
    // 1. Y - intensity plane, resolution: width x height
    // 2. U - Color plane, resolution: width/2 x height/2
    // 3. V - Color plane, resolution: width/2 x height/2

    int out_linesize[4] = {0, 0, 0, 0};
    uint8_t* out_planes[4] = { nullptr, nullptr, nullptr, nullptr };   

    int sts = av_image_alloc(out_planes,    //uint8_t * pointers[4], 
                             out_linesize,  //int linesizes[4], 
                             width,         //int w, 
                             height,        //int h, 
                             out_pix_fmt,   //enum AVPixelFormat pix_fmt, 
                             32);           //int align);   //Align to 32 bytes address may result faster execution time compared to 1 byte aligenment.

    if (sts < 0)
    {
        printf("Error: av_image_alloc response = %d\n", sts);
        return -1;
    }
    ////////////////////////////////////////////////////////////////////////////

    
    
    //Get SWS context
    ////////////////////////////////////////////////////////////////////////////
    struct SwsContext* sws_context = nullptr;

    sws_context = sws_getCachedContext(sws_context,         //struct SwsContext *context,
                                       width,               //int srcW,
                                       height,              //int srcH,
                                       AV_PIX_FMT_RGB24,    //enum AVPixelFormat srcFormat,
                                       width,               //int dstW,
                                       height,              //int dstH,
                                       out_pix_fmt,         //enum AVPixelFormat dstFormat,
                                       SWS_BILINEAR,        //int flags,
                                       nullptr,             //SwsFilter *srcFilter,
                                       nullptr,             //SwsFilter *dstFilter,
                                       nullptr);            //const double *param);

    if (sws_context == nullptr)
    {
        printf("Error: sws_getCachedContext returned nullptr\n");
        return -1;
    }
    ////////////////////////////////////////////////////////////////////////////


    //Apply color conversion
    ////////////////////////////////////////////////////////////////////////////
    const int in_linesize[1] = { 3 * width }; // RGB stride
    const uint8_t* in_planes[1] = { rgb_in };

    int response = sws_scale(sws_context,   //struct SwsContext *c, 
                             in_planes,     //const uint8_t *const srcSlice[],
                             in_linesize,   //const int srcStride[], 
                             0,             //int srcSliceY, 
                             height,        //int srcSliceH,
                             out_planes,    //uint8_t *const dst[], 
                             out_linesize); //const int dstStride[]);


    if (response < 0)
    {
        printf("Error: sws_scale response = %d\n", response);
        return -1;
    }
    ////////////////////////////////////////////////////////////////////////////


    //Write YUV420p output image to binary file (for testing)
    //You may execute FFmpeg after conversion for testing the output:
    //ffmpeg -y -f rawvideo -s 192x108 -pixel_format yuv420p -i yuv420p_image.bin rgb.png
    ////////////////////////////////////////////////////////////////////////////
    f = fopen("yuv420p_image.bin", "wb");
    fwrite(out_planes[0], 1, width * height, f);
    fwrite(out_planes[1], 1, width * height / 4, f);
    fwrite(out_planes[2], 1, width * height / 4, f);
    fclose(f);
    ////////////////////////////////////////////////////////////////////////////


    //Free allocated memory
    ////////////////////////////////////////////////////////////////////////////
    av_freep(out_planes);
    sws_freeContext(sws_context);
    delete[] rgb_in;
    ////////////////////////////////////////////////////////////////////////////

    return 0;
}

发布的代码片段似乎没有明显的错误,可以这样验证:

#include <iostream>
extern "C" {
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavcodec/avcodec.h>
}

AVFrame *frame = nullptr;
AVCodecContext *c = nullptr;
SwsContext *sws_context = nullptr;

/* <copy_of_original_code> */
void ffmpeg_encoder_start(AVCodecID codec_id, int fps, int width, int height) { ... }
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) { ... }
/* </copy_of_original_code> */

int main()
{
    const int width = 640, height = 480;
    uint8_t rgb_in[width * height * 3] = { 0 };
    ffmpeg_encoder_start(AV_CODEC_ID_RAWVIDEO, 1, width, height);
    ffmpeg_encoder_set_frame_yuv_from_rgb(rgb_in);
    std::cout << "OK" << std::endl;
    /* TODO: deallocate all resources here */
    return 0;
}

然后这样检查:

$ g++ -Wall -pedantic -o prg prg.cpp -lswscale -lavcodec -lavutil
$ ./prg
OK

因此,很难说问题出在哪里。可能想到的问题:

  • 输入缓冲区是否足够大?使用的 DEFAULT_MONITOR.maxResolution.widthDEFAULT_MONITOR.maxResolution.height 值是否有效?有必要用堆吗?
  • 全局 AVFrameAVCodecContextsws_context 指针变量在使用前是否已初始化?
  • 在多台不同的机器上测试时,段错误是否总是发生?还是 hardware/OS 依赖? FFmpeg 库是最新的吗?
  • sws_scale()中使用的库的确切故障点在哪里? (这可以通过使用调试版本和我猜的调试器来检查,通过深入研究该函数来查看程序崩溃的位置...)
  • Valgrind 说了什么?

这个问题似乎与OpenGL. FFmpeg's libswscale library documentation can be found here没有直接关系。

为了找到更好的答案,如果该问题可以被具有类似 minimal reproducible example 的其他人复制,将会有所帮助。