尝试为 rtsp 流填充 yuv 图像时出现段错误

Segfault while trying to fill the yuv image for rtsp streaming

我正在从 window 捕获视频流,我想将其重新传输到我的 rtsp 代理服务器。但是,我似乎无法正确编写框架,但我可以在 SDL window 中显示所述框架。这是我的代码:

int StreamHandler::storeStreamData()
{
// Allocate video frame
pFrame = av_frame_alloc();

// Allocate an AVFrame structure
pFrameRGB = av_frame_alloc();
if (pFrameRGB == NULL)
    throw myExceptions("Error : Can't alloc the frame.");

// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
    pCodecCtx->height);
buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_YUV420P,
    pCodecCtx->width, pCodecCtx->height);

//InitSdlDrawBack();

// initialize SWS context for software scaling

sws_ctx = sws_getContext(pCodecCtx->width,
    pCodecCtx->height,
    pCodecCtx->pix_fmt,
    pCodecCtx->width,
    pCodecCtx->height,
    pCodecCtx->pix_fmt,
    SWS_LANCZOS,
    NULL,
    NULL,
    NULL
);

SetPixelArray();
FfmpegEncoder enc("rtsp://127.0.0.1:1935/live/myStream");

i = 0;
while (av_read_frame(pFormatCtx, &packet) >= 0) {
    if (packet.stream_index == videoindex) {
        // Decode video frame
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
        if (frameFinished) {
            i++;
            //DrawFrame();

            sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                pFrame->linesize, 0, pCodecCtx->height,
                pFrameRGB->data, pFrameRGB->linesize);
            enc.encodeFrame(pFrameRGB, i);
        }
    }
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_frame_free(&pFrameRGB);

// Free the YUV frame
av_frame_free(&pFrame);

// Close the codecs
avcodec_close(pCodecCtx);
avcodec_close(pCodecCtxOrig);

// Close the video file
avformat_close_input(&pFormatCtx);

return 0;
}

void StreamHandler::SetPixelArray()
{
yPlaneSz = pCodecCtx->width * pCodecCtx->height;
uvPlaneSz = pCodecCtx->width * pCodecCtx->height / 4;
yPlane = (Uint8*)malloc(yPlaneSz);
uPlane = (Uint8*)malloc(uvPlaneSz);
vPlane = (Uint8*)malloc(uvPlaneSz);
if (!yPlane || !uPlane || !vPlane)
    throw myExceptions("Error : Can't create pixel array.");

uvPitch = pCodecCtx->width / 2;
}

这里我填充YUV图片,写数据包

void FfmpegEncoder::encodeFrame(AVFrame * frame, int frameCount)
{
AVPacket    pkt = { 0 };
int         got_pkt;

av_init_packet(&pkt);
frame->pts = frameCount;

FillYuvImage(frame, frameCount, this->pCodecCtx->width, this->pCodecCtx->height);

if (avcodec_encode_video2(this->pCodecCtx, &pkt, frame, &got_pkt) < 0)
    throw myExceptions("Error: failed to encode the frame. FfmpegEncoder.cpp l:61\n");

//if the frame is well encoded
if (got_pkt) {
    pkt.stream_index = this->st->index;
    pkt.pts = av_rescale_q_rnd(pkt.pts, this->pCodecCtx->time_base, this->st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    if (av_write_frame(this->outFormatCtx, &pkt) < 0)
        throw myExceptions("Error: failed to write video frame. FfmpegEncoder.cpp l:68\n");
}
}

void FfmpegEncoder::FillYuvImage(AVFrame * pict, int frame_index, int width, int height)
{
int x, y, i;

i = frame_index;

for (y = 0; y < height; y++)
{
    for (x = 0; x < width / 2; x++)
        pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
}
for (y = 0; y < height; y++)
{
    for (x = 0; x < width / 2; x++)
    {
        pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
        pict->data[2][y * pict->linesize[2] + x] = 64 + y + i * 5; //segault here
    }
}
}

"FillYuvImage"方法是从一个FFMPEG示例中复制过来的,但它对我不起作用。如果我不调用它,"av_write_frame" 函数将无法工作(也是段错误)。

编辑:这是我的输出上下文和编解码器初始化。

FfmpegEncoder::FfmpegEncoder(char *url)
{
AVRational      tmp_time_base;
AVDictionary*   options = NULL;

this->pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (this->pCodec == NULL)
    throw myExceptions("Error: Can't initialize the encoder. FfmpegEncoder.cpp l:9\n");

this->pCodecCtx = avcodec_alloc_context3(this->pCodec);

//Alloc output context
if (avformat_alloc_output_context2(&outFormatCtx, NULL, "rtsp", url) < 0)
    throw myExceptions("Error: Can't alloc stream output. FfmpegEncoder.cpp l:17\n");

this->st = avformat_new_stream(this->outFormatCtx, this->pCodec);

if (this->st == NULL)
    throw myExceptions("Error: Can't create stream . FfmpegEncoder.cpp l:22\n");

av_dict_set(&options, "vprofile", "main", 0);
av_dict_set(&options, "tune", "zerolatency", 0);

tmp_time_base.num = 1;
tmp_time_base.den = 60;

//TODO : parse these values
this->pCodecCtx->bit_rate = 3000000;
this->pCodecCtx->width = 1280;
this->pCodecCtx->height = 720;
//This set the fps. 60fps at this point.
this->pCodecCtx->time_base = tmp_time_base;
//Add a intra frame every 12 frames
this->pCodecCtx->gop_size = 12;
this->pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

//Open Codec, using the context + x264 options
if (avcodec_open2(this->pCodecCtx, this->pCodec, &options) < 0)
    throw myExceptions("Error: Can't open the codec. FfmpegEncoder.cpp l:43\n");

if (avcodec_copy_context(this->st->codec, this->pCodecCtx) != 0) {
    throw myExceptions("Error : Can't copy codec context. FfmpegEncoder.cpp : l.46");
}

av_dump_format(this->outFormatCtx, 0, url, 1);

if (avformat_write_header(this->outFormatCtx, NULL) != 0)
    throw myExceptions("Error: failed to connect to RTSP server. FfmpegEncoder.cpp l:48\n");
}
for (y = 0; y < height; y++)
{
    for (x = 0; x < width / 2; x++)
    {
         pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
         pict->data[2][y * pict->linesize[2] + x] = 64 + y + i * 5; //segault here
    }
}

应该是

for (y = 0; y < height / 2; y++) // Divide height by 2, you need only every other line
{
    for (x = 0; x < width / 2; x++)
    {
         pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
         pict->data[2][y * pict->linesize[2] + x] = 64 + y + i * 5; //segault here
    }
}

YUV 420 意味着每个 2x2 正方形都有一个 U 和 V 样本,但是使用您的代码,您正在为每个 1x2 正方形做一个样本,显然超出了范围。