如何正确使用 libvlc (C++) 从 h264 视频流中获取帧?
How correctly use libvlc (C++) to get frames from h264 videostream?
我对使用 libvlc 库从 h264 流获取视频帧的解决方案感兴趣。我已将回调设置为 libvlc_video_set_format_callbacks
并从我的回调函数 (format_callback
) 参数中收到以下信息:色度:"J420"、宽度:1088、高度:1922
当我调用 main
Player p;
p.play("rtsp://path/to/camera?videocodec=h264");
它打印出以下错误
chroma "J420" width: 1088 , height: 1922
[00007fddfc001268] core vout display error: Failed to change zoom
[00007fddfc001268] core vout display error: Failed to set on top
[00007fddfc001268] core vout display error: Failed to change source AR
[h264 @ 0x7fde1c06cea0] error while decoding MB 24 111, bytestream -15
[swscaler @ 0x7fddfc002ca0] bad dst image pointers
[swscaler @ 0x7fddfc002ca0] bad dst image pointers
我的猜测是缓冲区及其大小有问题。在哪里,使用哪种类型和多大的缓冲区来逐帧获取视频?后面打算把帧数据转发给QImage。下面是 Player.h 文件:
const int ResoHeight = 1922;
const int ResoWidth = 1088;
const int BytesPerPixel = 3; // not sure about this
struct ImageData
{
QVector<unsigned char> raw;
QVector<unsigned char> picture;
ImageData()
{
raw.resize(BytesPerPixel * ResoHeight * ResoWidth);
picture.resize(BytesPerPixel * ResoHeight * ResoWidth);
}
};
class Player : public QObject
{
Q_OBJECT
public:
explicit Player(QObject *parent = nullptr);
~Player();
void play(const std::string& path);
signals:
void newImage(const QImage& image);
private:
libvlc_instance_t* vlcInstance;
libvlc_media_player_t* player;
libvlc_media_t* media;
ImageData buffer;
};
Player.cpp 如下:
namespace {
void* lock_frame(void *opaque, void **planes)
{
ImageData* buf = (ImageData*)(opaque);
*planes = buf->raw.data();
return buf->picture.data();
}
void unlock_frame(void *opaque, void *picture, void *const *planes)
{
// will be logic to announce new image
}
unsigned format_callback(void** opaque, char* chroma, unsigned *width, unsigned *height, unsigned *pitches, unsigned *lines)
{
qDebug() << "chroma:" << QString(chroma) << "width:" << *width << ", height:" << *height;
*pitches= (*width) * BytesPerPixel;
*lines= *height;
return 1;
}
} // namespace
Player::Player(QObject* parent)
: QObject(parent)
, vlcInstance(libvlc_new(0, nullptr))
, player(libvlc_media_player_new(vlcInstance))
, media(nullptr)
{
}
Player::~Player()
{
libvlc_media_player_stop(player);
libvlc_media_player_release(player);
libvlc_release(vlcInstance);
}
void Player::play(const std::string& path)
{
media = libvlc_media_new_location(vlcInstance, path.c_str());
libvlc_media_player_set_media(player, media);
libvlc_media_release(media);
libvlc_video_set_callbacks(player, lock_frame, unlock_frame, nullptr, &buffer);
libvlc_video_set_format_callbacks(player, format_callback, nullptr);
libvlc_media_player_play(player);
}
J420 色度是 planar YUV format. Which means you have to provide 3 dimensional pitches
and lines
in format_callback
and 3 different planes
pointers (for each plane) in lock_frame
function. If you just want an RGB(RV24) image, see 。
我对使用 libvlc 库从 h264 流获取视频帧的解决方案感兴趣。我已将回调设置为 libvlc_video_set_format_callbacks
并从我的回调函数 (format_callback
) 参数中收到以下信息:色度:"J420"、宽度:1088、高度:1922
当我调用 main
Player p;
p.play("rtsp://path/to/camera?videocodec=h264");
它打印出以下错误
chroma "J420" width: 1088 , height: 1922
[00007fddfc001268] core vout display error: Failed to change zoom
[00007fddfc001268] core vout display error: Failed to set on top
[00007fddfc001268] core vout display error: Failed to change source AR
[h264 @ 0x7fde1c06cea0] error while decoding MB 24 111, bytestream -15
[swscaler @ 0x7fddfc002ca0] bad dst image pointers
[swscaler @ 0x7fddfc002ca0] bad dst image pointers
我的猜测是缓冲区及其大小有问题。在哪里,使用哪种类型和多大的缓冲区来逐帧获取视频?后面打算把帧数据转发给QImage。下面是 Player.h 文件:
const int ResoHeight = 1922;
const int ResoWidth = 1088;
const int BytesPerPixel = 3; // not sure about this
struct ImageData
{
QVector<unsigned char> raw;
QVector<unsigned char> picture;
ImageData()
{
raw.resize(BytesPerPixel * ResoHeight * ResoWidth);
picture.resize(BytesPerPixel * ResoHeight * ResoWidth);
}
};
class Player : public QObject
{
Q_OBJECT
public:
explicit Player(QObject *parent = nullptr);
~Player();
void play(const std::string& path);
signals:
void newImage(const QImage& image);
private:
libvlc_instance_t* vlcInstance;
libvlc_media_player_t* player;
libvlc_media_t* media;
ImageData buffer;
};
Player.cpp 如下:
namespace {
void* lock_frame(void *opaque, void **planes)
{
ImageData* buf = (ImageData*)(opaque);
*planes = buf->raw.data();
return buf->picture.data();
}
void unlock_frame(void *opaque, void *picture, void *const *planes)
{
// will be logic to announce new image
}
unsigned format_callback(void** opaque, char* chroma, unsigned *width, unsigned *height, unsigned *pitches, unsigned *lines)
{
qDebug() << "chroma:" << QString(chroma) << "width:" << *width << ", height:" << *height;
*pitches= (*width) * BytesPerPixel;
*lines= *height;
return 1;
}
} // namespace
Player::Player(QObject* parent)
: QObject(parent)
, vlcInstance(libvlc_new(0, nullptr))
, player(libvlc_media_player_new(vlcInstance))
, media(nullptr)
{
}
Player::~Player()
{
libvlc_media_player_stop(player);
libvlc_media_player_release(player);
libvlc_release(vlcInstance);
}
void Player::play(const std::string& path)
{
media = libvlc_media_new_location(vlcInstance, path.c_str());
libvlc_media_player_set_media(player, media);
libvlc_media_release(media);
libvlc_video_set_callbacks(player, lock_frame, unlock_frame, nullptr, &buffer);
libvlc_video_set_format_callbacks(player, format_callback, nullptr);
libvlc_media_player_play(player);
}
J420 色度是 planar YUV format. Which means you have to provide 3 dimensional pitches
and lines
in format_callback
and 3 different planes
pointers (for each plane) in lock_frame
function. If you just want an RGB(RV24) image, see