FFMpeg DVB 字幕内存泄漏
FFMpeg DVB Subtitles memory leak
从 mpegts udp 多播流解码字幕轨道时,我使用 avcodec_decode_subtitle2 遇到内存泄漏。音频和视频流都很好。所有三个流都是通过预先分配所有缓冲区来手动管理内存的。
相关信息很少,但我相信某处有补丁。
我目前正在为 android.
使用为 armv7-a 编译的 ffmpeg 2.0.4
在此过程中,我发现视频流有不同的分辨率,即 720x576 或 576x576,现在这无关紧要,因为我将字幕单独渲染为视频上的叠加层。我原来的解码函数(正在更改以呈现单独的叠加层)是:
void ffProcessSubtitlePacket( AVPacket *pkt )
{
//LOGI("NATIVE FFMPEG SUBTITLE - Decoding subtitle packet");
int got = 0;
avcodec_decode_subtitle2(ffSubtitleContext, &ffSubtitleFrame, &got, pkt);
if ( got )
{
//LOGI("NATIVE FFMPEG SUBTITLE - Got subtitle frame");
//LOGI("NATIVE FFMPEG SUBTITLE - Format = %d, Start = %d, End = %d, Rects = %d, PTS = %llu, AudioPTS = %llu, PacketPTS = %llu",
// ffSubtitleFrame.format, ffSubtitleFrame.start_display_time,
// ffSubtitleFrame.end_display_time, ffSubtitleFrame.num_rects,
// ffSubtitleFrame.pts, ffAudioGetPTS(), pkt->pts);
// now add the subtitle data to the list ready
for ( int s = 0; s < ffSubtitleFrame.num_rects; s++ )
{
ffSubtitle *sub = (ffSubtitle*)mmAlloc(sizeof(ffSubtitle)); //new ffSubtitle;
if ( sub )
{
AVSubtitleRect *r = ffSubtitleFrame.rects[s];
AVPicture *p = &r->pict;
// set main data
sub->startPTS = pkt->pts + (uint64_t)ffSubtitleFrame.start_display_time;
sub->endPTS = pkt->pts + (uint64_t)ffSubtitleFrame.end_display_time * (uint64_t)500;
sub->nb_colors = r->nb_colors;
sub->xpos = r->x;
sub->ypos = r->y;
sub->width = r->w;
sub->height = r->h;
// allocate space for CLUT and image all in one chunk
sub->data = mmAlloc(r->nb_colors * 4 + r->w * r->h); //new char[r->nb_colors * 4 + r->w * r->h];
if ( sub->data )
{
// copy the CLUT data
memcpy(sub->data, p->data[1], r->nb_colors * 4);
// copy the bitmap onto the end
memcpy(sub->data + r->nb_colors * 4, p->data[0], r->w * r->h);
// check for duplicate subtitles and remove them as this
// one replaces it with a new bitmap data
int pos = ffSubtitles.size();
while ( pos-- )
{
ffSubtitle *s = ffSubtitles[pos];
if ( s->xpos == sub->xpos &&
s->ypos == sub->ypos &&
s->width == sub->width &&
s->height == sub->height )
{
//delete s;
ffSubtitles.erase( ffSubtitles.begin() + pos );
//LOGI("NATIVE FFMPEG SUBTITLE - Removed old duplicate subtitle, size %d", ffSubtitles.size());
}
}
// append to subtitles list
ffSubtitles.push_back( sub );
char *dat; // data pointer used for the CLUT table
//LOGI("NATIVE FFMPEG SUBTITLE - Added %d,%d - %d,%d, Queue %d, Length = %d",
// r->x, r->y, r->w, r->h, ffSubtitles.size(), ffSubtitleFrame.end_display_time);
// convert the CLUT (RGB) to YUV values
dat = sub->data;
for ( int c = 0; c < r->nb_colors; c++ )
{
int r = dat[0];
int g = dat[1];
int b = dat[2];
int y = ( ( 65 * r + 128 * g + 24 * b + 128) >> 8) + 16;
int u = ( ( -37 * r - 74 * g + 112 * b + 128) >> 8) + 128;
int v = ( ( 112 * r - 93 * g - 18 * b + 128) >> 8) + 128;
*dat++ = (char)y;
*dat++ = (char)u;
*dat++ = (char)v;
dat++; // skip the alpha channel
}
}
else
{
//delete sub;
sub = 0;
LOGI("NATIVE FFMPEG SUBTITLE - Memory allocation error CLUT and BITMAP");
}
}
else
{
LOGI("NATIVE FFMPEG SUBTITLE - Memory allocation error ffSubtitle struct");
mmGarbageCollect();
ffSubtitles.clear();
}
}
}
}
void ffSubtitleRenderCheck(int bpos)
{
if ( ffSubtitleID == -1 || !usingSubtitles )
{
// empty the list in case of memory leaks
ffSubtitles.clear();
mmGarbageCollect();
return;
}
uint64_t audioPTS = ffAudioGetPTS();
int pos = 0;
// draw the subtitle list to the YUV frames
char *yframe = ffVideoBuffers[bpos].yFrame;
char *uframe = ffVideoBuffers[bpos].uFrame;
char *vframe = ffVideoBuffers[bpos].vFrame;
int ywidth = fv.frameActualWidth; // actual width with padding
int uvwidth = fv.frameAWidthHalf; // and for uv frames
while ( pos < ffSubtitles.size() )
{
ffSubtitle *sub = ffSubtitles[pos];
if ( sub->startPTS >= audioPTS ) // okay to draw this one?
{
//LOGI("NATIVE FFMPEG SUBTITLE - Rendering subtitle bitmap %d", pos);
char *clut = sub->data; // colour table
char *dat = clut + sub->nb_colors * 4; // start of bitmap data
int w = sub->width;
int h = sub->height;
int x = sub->xpos;
int y = sub->ypos;
for ( int xpos = 0; xpos < w; xpos++ )
{
for ( int ypos = 0; ypos < h; ypos++ )
{
// get colour for pixel
char bcol = dat[ypos * w + xpos];
if ( bcol != 0 ) // ignore 0 pixels
{
char cluty = clut[bcol * 4 + 0]; // get colours from CLUT
char clutu = clut[bcol * 4 + 1];
char clutv = clut[bcol * 4 + 2];
// draw to Y frame
int newx = x + xpos;
int newy = y + ypos;
yframe[newy * ywidth + newx] = cluty;
// draw to uv frames if we have a quarter pixel only
if ( ( newy & 1 ) && ( newx & 1 ) )
{
uframe[(newy >> 1) * uvwidth + (newx >> 1)] = clutu;
vframe[(newy >> 1) * uvwidth + (newx >> 1)] = clutv;
}
}
}
}
}
pos++;
}
// Last thing is to erase timed out subtitles
pos = ffSubtitles.size();
while ( pos-- )
{
ffSubtitle *sub = ffSubtitles[pos];
if ( sub->endPTS < audioPTS )
{
//delete sub;
ffSubtitles.erase( ffSubtitles.begin() + pos );
//LOGI("NATIVE FFMPEG SUBTITLE - Removed timed out subtitle");
}
}
if ( ffSubtitles.size() == 0 )
{
// garbage collect the custom memory pool
mmGarbageCollect();
}
//LOGI("NATIVE FFMPEG SUBTITLE - Size of subtitle list = %d", ffSubtitles.size());
}
任何信息将不胜感激,或者我是否必须升级到更高版本的 ffmpeg?
在查看 ffmpeg 源代码本身后,我发现了内存泄漏的原因。
事实证明,在解码字幕帧时,在处理了从获取的帧中获取的信息后,我所缺少的是:
avsubtitle_free( &ffSubtitleFrame );
现在我可以继续项目的其余部分并重写字幕解码器和渲染器。
从 mpegts udp 多播流解码字幕轨道时,我使用 avcodec_decode_subtitle2 遇到内存泄漏。音频和视频流都很好。所有三个流都是通过预先分配所有缓冲区来手动管理内存的。
相关信息很少,但我相信某处有补丁。
我目前正在为 android.
使用为 armv7-a 编译的 ffmpeg 2.0.4在此过程中,我发现视频流有不同的分辨率,即 720x576 或 576x576,现在这无关紧要,因为我将字幕单独渲染为视频上的叠加层。我原来的解码函数(正在更改以呈现单独的叠加层)是:
void ffProcessSubtitlePacket( AVPacket *pkt )
{
//LOGI("NATIVE FFMPEG SUBTITLE - Decoding subtitle packet");
int got = 0;
avcodec_decode_subtitle2(ffSubtitleContext, &ffSubtitleFrame, &got, pkt);
if ( got )
{
//LOGI("NATIVE FFMPEG SUBTITLE - Got subtitle frame");
//LOGI("NATIVE FFMPEG SUBTITLE - Format = %d, Start = %d, End = %d, Rects = %d, PTS = %llu, AudioPTS = %llu, PacketPTS = %llu",
// ffSubtitleFrame.format, ffSubtitleFrame.start_display_time,
// ffSubtitleFrame.end_display_time, ffSubtitleFrame.num_rects,
// ffSubtitleFrame.pts, ffAudioGetPTS(), pkt->pts);
// now add the subtitle data to the list ready
for ( int s = 0; s < ffSubtitleFrame.num_rects; s++ )
{
ffSubtitle *sub = (ffSubtitle*)mmAlloc(sizeof(ffSubtitle)); //new ffSubtitle;
if ( sub )
{
AVSubtitleRect *r = ffSubtitleFrame.rects[s];
AVPicture *p = &r->pict;
// set main data
sub->startPTS = pkt->pts + (uint64_t)ffSubtitleFrame.start_display_time;
sub->endPTS = pkt->pts + (uint64_t)ffSubtitleFrame.end_display_time * (uint64_t)500;
sub->nb_colors = r->nb_colors;
sub->xpos = r->x;
sub->ypos = r->y;
sub->width = r->w;
sub->height = r->h;
// allocate space for CLUT and image all in one chunk
sub->data = mmAlloc(r->nb_colors * 4 + r->w * r->h); //new char[r->nb_colors * 4 + r->w * r->h];
if ( sub->data )
{
// copy the CLUT data
memcpy(sub->data, p->data[1], r->nb_colors * 4);
// copy the bitmap onto the end
memcpy(sub->data + r->nb_colors * 4, p->data[0], r->w * r->h);
// check for duplicate subtitles and remove them as this
// one replaces it with a new bitmap data
int pos = ffSubtitles.size();
while ( pos-- )
{
ffSubtitle *s = ffSubtitles[pos];
if ( s->xpos == sub->xpos &&
s->ypos == sub->ypos &&
s->width == sub->width &&
s->height == sub->height )
{
//delete s;
ffSubtitles.erase( ffSubtitles.begin() + pos );
//LOGI("NATIVE FFMPEG SUBTITLE - Removed old duplicate subtitle, size %d", ffSubtitles.size());
}
}
// append to subtitles list
ffSubtitles.push_back( sub );
char *dat; // data pointer used for the CLUT table
//LOGI("NATIVE FFMPEG SUBTITLE - Added %d,%d - %d,%d, Queue %d, Length = %d",
// r->x, r->y, r->w, r->h, ffSubtitles.size(), ffSubtitleFrame.end_display_time);
// convert the CLUT (RGB) to YUV values
dat = sub->data;
for ( int c = 0; c < r->nb_colors; c++ )
{
int r = dat[0];
int g = dat[1];
int b = dat[2];
int y = ( ( 65 * r + 128 * g + 24 * b + 128) >> 8) + 16;
int u = ( ( -37 * r - 74 * g + 112 * b + 128) >> 8) + 128;
int v = ( ( 112 * r - 93 * g - 18 * b + 128) >> 8) + 128;
*dat++ = (char)y;
*dat++ = (char)u;
*dat++ = (char)v;
dat++; // skip the alpha channel
}
}
else
{
//delete sub;
sub = 0;
LOGI("NATIVE FFMPEG SUBTITLE - Memory allocation error CLUT and BITMAP");
}
}
else
{
LOGI("NATIVE FFMPEG SUBTITLE - Memory allocation error ffSubtitle struct");
mmGarbageCollect();
ffSubtitles.clear();
}
}
}
}
void ffSubtitleRenderCheck(int bpos)
{
if ( ffSubtitleID == -1 || !usingSubtitles )
{
// empty the list in case of memory leaks
ffSubtitles.clear();
mmGarbageCollect();
return;
}
uint64_t audioPTS = ffAudioGetPTS();
int pos = 0;
// draw the subtitle list to the YUV frames
char *yframe = ffVideoBuffers[bpos].yFrame;
char *uframe = ffVideoBuffers[bpos].uFrame;
char *vframe = ffVideoBuffers[bpos].vFrame;
int ywidth = fv.frameActualWidth; // actual width with padding
int uvwidth = fv.frameAWidthHalf; // and for uv frames
while ( pos < ffSubtitles.size() )
{
ffSubtitle *sub = ffSubtitles[pos];
if ( sub->startPTS >= audioPTS ) // okay to draw this one?
{
//LOGI("NATIVE FFMPEG SUBTITLE - Rendering subtitle bitmap %d", pos);
char *clut = sub->data; // colour table
char *dat = clut + sub->nb_colors * 4; // start of bitmap data
int w = sub->width;
int h = sub->height;
int x = sub->xpos;
int y = sub->ypos;
for ( int xpos = 0; xpos < w; xpos++ )
{
for ( int ypos = 0; ypos < h; ypos++ )
{
// get colour for pixel
char bcol = dat[ypos * w + xpos];
if ( bcol != 0 ) // ignore 0 pixels
{
char cluty = clut[bcol * 4 + 0]; // get colours from CLUT
char clutu = clut[bcol * 4 + 1];
char clutv = clut[bcol * 4 + 2];
// draw to Y frame
int newx = x + xpos;
int newy = y + ypos;
yframe[newy * ywidth + newx] = cluty;
// draw to uv frames if we have a quarter pixel only
if ( ( newy & 1 ) && ( newx & 1 ) )
{
uframe[(newy >> 1) * uvwidth + (newx >> 1)] = clutu;
vframe[(newy >> 1) * uvwidth + (newx >> 1)] = clutv;
}
}
}
}
}
pos++;
}
// Last thing is to erase timed out subtitles
pos = ffSubtitles.size();
while ( pos-- )
{
ffSubtitle *sub = ffSubtitles[pos];
if ( sub->endPTS < audioPTS )
{
//delete sub;
ffSubtitles.erase( ffSubtitles.begin() + pos );
//LOGI("NATIVE FFMPEG SUBTITLE - Removed timed out subtitle");
}
}
if ( ffSubtitles.size() == 0 )
{
// garbage collect the custom memory pool
mmGarbageCollect();
}
//LOGI("NATIVE FFMPEG SUBTITLE - Size of subtitle list = %d", ffSubtitles.size());
}
任何信息将不胜感激,或者我是否必须升级到更高版本的 ffmpeg?
在查看 ffmpeg 源代码本身后,我发现了内存泄漏的原因。
事实证明,在解码字幕帧时,在处理了从获取的帧中获取的信息后,我所缺少的是:
avsubtitle_free( &ffSubtitleFrame );
现在我可以继续项目的其余部分并重写字幕解码器和渲染器。