为什么我的信号槽比 QThreadPool+new+delete 慢?
Why my signal-slot is slower than QThreadPool+new+delete?
我正在阅读 Qt 的信号与槽 [1] 并注意到它声称信号和槽的开销比任何新操作或删除操作低得多。所以我做了一个测试:
#include <cmath>
#include <QtCore/QAtomicInt>
#include <QtCore/QCoreApplication>
#include <QtCore/QElapsedTimer>
#include <QtCore/QMetaObject>
#include <QtCore/QMetaMethod>
#include <QtCore/QObject>
#include <QtCore/QRunnable>
#include <QtCore/QTextStream>
#include <QtCore/QThread>
#include <QtCore/QThreadPool>
#include <QtCore/QTimer>
#include <QtCore/QVector>
using std::pow;
constexpr int const maxThreadCount(16);
constexpr int const maxIteration(100000);
constexpr int const maxPiDigit(1000);
void calcPi()
{
double sum(0);
for (int k(0); k < maxPiDigit; ++k) {
double a(4.0 / (k * 8 + 1));
double b(2.0 / (k * 8 + 4));
double c(1.0 / (k * 8 + 5));
double d(1.0 / (k * 8 + 6));
sum += pow(16, -k) * (a - b - c -d);
}
QTextStream out(stdout);
out << sum << endl;
}
class CalcPiWithQObject : public QObject
{
Q_OBJECT
public:
CalcPiWithQObject(QObject *parent = NULL);
public slots:
void start();
signals:
void finished();
}; // CalcPiWithQObject
CalcPiWithQObject::CalcPiWithQObject(QObject *parent):
QObject(parent)
{}
void CalcPiWithQObject::start()
{
calcPi();
finished();
}
class CalcPiWithQRunnable : public QRunnable
{
private:
static QAtomicInt count_;
public:
CalcPiWithQRunnable(QThreadPool *parent);
void run() override;
private:
QThreadPool *parent_;
}; // CalcPiWithQRunnable
QAtomicInt CalcPiWithQRunnable::count_(maxThreadCount);
CalcPiWithQRunnable::CalcPiWithQRunnable(QThreadPool *parent):
QRunnable(),
parent_(parent)
{
setAutoDelete(false);
}
void CalcPiWithQRunnable::run()
{
calcPi();
if (count_.fetchAndAddOrdered(1) < maxIteration) {
parent_->start(new CalcPiWithQRunnable(parent_));
}
delete this;
}
class PiTest : public QObject
{
Q_OBJECT
public:
PiTest(QObject *parent = NULL);
public slots:
void start();
void nextQObjectCall();
private:
QVector<QThread *> threads_;
QVector<CalcPiWithQObject *> calc_;
QThreadPool *threadPool_;
QElapsedTimer timer_;
int threadCount_;
int jobCount_;
}; // PiTest
PiTest::PiTest(QObject *parent):
QObject(parent),
threads_(maxThreadCount),
calc_(maxThreadCount),
threadPool_(new QThreadPool(this)),
threadCount_(maxThreadCount),
jobCount_(maxThreadCount)
{
threadPool_->setMaxThreadCount(maxThreadCount);
for (int i(0); i < maxThreadCount; ++i) {
threads_[i] = new QThread();
calc_[i] = new CalcPiWithQObject();
calc_[i]->moveToThread(threads_[i]);
QObject::connect(calc_[i], &CalcPiWithQObject::finished,
this, &PiTest::nextQObjectCall,
Qt::QueuedConnection);
QObject::connect(threads_[i], &QThread::started,
calc_[i], &CalcPiWithQObject::start,
Qt::QueuedConnection);
}
}
void PiTest::start()
{
timer_.start();
for (int i(0); i < maxThreadCount; ++i) {
threadPool_->start(new CalcPiWithQRunnable(threadPool_));
}
threadPool_->waitForDone();
int timePassed(timer_.elapsed());
QTextStream out(stdout);
out << "QThreadPool: " << timePassed << endl;
timer_.restart();
for (int i(0); i < maxThreadCount; ++i) {
threads_[i]->start();
}
}
static QMetaMethod nextCall(PiTest::staticMetaObject.method(PiTest::staticMetaObject.indexOfMethod("start")));
void PiTest::nextQObjectCall()
{
jobCount_++;
if (jobCount_ < maxIteration) {
nextCall.invoke(sender(), Qt::QueuedConnection);
QMetaObject::invokeMethod(sender(), "start",
Qt::QueuedConnection);
return;
}
threadCount_--;
if (threadCount_ == 0) {
for (int i(0); i < maxThreadCount; ++i) {
threads_[i]->quit();
}
int timePassed(timer_.elapsed());
QTextStream out(stdout);
out << "QThread: " << timePassed << endl;
qApp->quit();
}
}
int main(int argc, char *argv[])
{
QCoreApplication app(argc, argv);
PiTest *bench(new PiTest(qApp));
QTimer::singleShot(0, bench, SLOT(start()));
return qApp->exec();
}
#include "main_moc.cpp"
我运行在闲置的20核电脑上测试:
/usr/lib64/qt5/bin/moc -o main_moc.cpp main.cpp
clang++ -std=c++11 -fPIE -O2 -march=native -I/usr/include/qt5/ -L/usr/lib64/qt5 -lQt5Core -o bench main.cpp
./bench > test.out
grep QThread test.out
结果如下:
QThreadPool: 4803
QThread: 9285
我尝试了不同的参数,使用更长的 pi 计算和更少的作业,反之亦然,但结果大致相同。 QThread+signal/slots 总是落后。随着作业数量的增加,QThreadPool+new/delete 可以轻松胜过 QThread 高达 10 倍。
我对我的基准代码感到有些尴尬。我在这里误解了什么吗?如果 signal/slot 比 new/delete 快,我的基准测试有什么问题?
谢谢。
信号性能因连接类型而异。当你创建线程间连接时,连接是排队的,并使用事件循环来调度自己,而Qt中的事件循环不仅相当慢,而且上次我检查它没有提供任何方法来增加它的更新速度。
这使得跨线程的信号非常慢,我遇到过细粒度并发的情况,多线程导致性能下降而不是性能提升。
只是为了让您了解直接连接和排队连接之间的区别:
#define COUNT 5000
class Ping : public QObject {
Q_OBJECT
Q_SIGNAL void pong(uint);
public slots: void ping(uint c) { if (c < COUNT) emit pong(++c); else qDebug() << t.nsecsElapsed(); }
};
//...
QObject::connect(&p1, SIGNAL(pong(uint)), &p2, SLOT(ping(uint)), Qt::DirectConnection);
QObject::connect(&p2, SIGNAL(pong(uint)), &p1, SLOT(ping(uint)), Qt::DirectConnection);
//...
p1.ping(0);
结果:
Direct connection (in same thread) - 570504 nsec
Queued connection (in same thread) - 29670333 nsec
Queued connection (different threads) - 53343054 nsec
如您所见,线程间连接比直接连接慢 100 倍。而且我怀疑您链接到的文档是指直接连接。
总而言之,我会说你的测试一团糟。你真的应该精简它,让它变得简单,并专注于你提出的问题。
最后,直接连接可能比 new/delete 快,但排队连接肯定不是,它们要慢得多,这绝对是不同性能背后的关键因素。您链接到的文档中的声明与 QThread + worker
与 QRunnable + thread pool
性能 完全没有任何关系 。最后,在 both 情况下,您使用 both 动态内存 allocation/deallocation 和排队连接。
我正在阅读 Qt 的信号与槽 [1] 并注意到它声称信号和槽的开销比任何新操作或删除操作低得多。所以我做了一个测试:
#include <cmath>
#include <QtCore/QAtomicInt>
#include <QtCore/QCoreApplication>
#include <QtCore/QElapsedTimer>
#include <QtCore/QMetaObject>
#include <QtCore/QMetaMethod>
#include <QtCore/QObject>
#include <QtCore/QRunnable>
#include <QtCore/QTextStream>
#include <QtCore/QThread>
#include <QtCore/QThreadPool>
#include <QtCore/QTimer>
#include <QtCore/QVector>
using std::pow;
constexpr int const maxThreadCount(16);
constexpr int const maxIteration(100000);
constexpr int const maxPiDigit(1000);
void calcPi()
{
double sum(0);
for (int k(0); k < maxPiDigit; ++k) {
double a(4.0 / (k * 8 + 1));
double b(2.0 / (k * 8 + 4));
double c(1.0 / (k * 8 + 5));
double d(1.0 / (k * 8 + 6));
sum += pow(16, -k) * (a - b - c -d);
}
QTextStream out(stdout);
out << sum << endl;
}
class CalcPiWithQObject : public QObject
{
Q_OBJECT
public:
CalcPiWithQObject(QObject *parent = NULL);
public slots:
void start();
signals:
void finished();
}; // CalcPiWithQObject
CalcPiWithQObject::CalcPiWithQObject(QObject *parent):
QObject(parent)
{}
void CalcPiWithQObject::start()
{
calcPi();
finished();
}
class CalcPiWithQRunnable : public QRunnable
{
private:
static QAtomicInt count_;
public:
CalcPiWithQRunnable(QThreadPool *parent);
void run() override;
private:
QThreadPool *parent_;
}; // CalcPiWithQRunnable
QAtomicInt CalcPiWithQRunnable::count_(maxThreadCount);
CalcPiWithQRunnable::CalcPiWithQRunnable(QThreadPool *parent):
QRunnable(),
parent_(parent)
{
setAutoDelete(false);
}
void CalcPiWithQRunnable::run()
{
calcPi();
if (count_.fetchAndAddOrdered(1) < maxIteration) {
parent_->start(new CalcPiWithQRunnable(parent_));
}
delete this;
}
class PiTest : public QObject
{
Q_OBJECT
public:
PiTest(QObject *parent = NULL);
public slots:
void start();
void nextQObjectCall();
private:
QVector<QThread *> threads_;
QVector<CalcPiWithQObject *> calc_;
QThreadPool *threadPool_;
QElapsedTimer timer_;
int threadCount_;
int jobCount_;
}; // PiTest
PiTest::PiTest(QObject *parent):
QObject(parent),
threads_(maxThreadCount),
calc_(maxThreadCount),
threadPool_(new QThreadPool(this)),
threadCount_(maxThreadCount),
jobCount_(maxThreadCount)
{
threadPool_->setMaxThreadCount(maxThreadCount);
for (int i(0); i < maxThreadCount; ++i) {
threads_[i] = new QThread();
calc_[i] = new CalcPiWithQObject();
calc_[i]->moveToThread(threads_[i]);
QObject::connect(calc_[i], &CalcPiWithQObject::finished,
this, &PiTest::nextQObjectCall,
Qt::QueuedConnection);
QObject::connect(threads_[i], &QThread::started,
calc_[i], &CalcPiWithQObject::start,
Qt::QueuedConnection);
}
}
void PiTest::start()
{
timer_.start();
for (int i(0); i < maxThreadCount; ++i) {
threadPool_->start(new CalcPiWithQRunnable(threadPool_));
}
threadPool_->waitForDone();
int timePassed(timer_.elapsed());
QTextStream out(stdout);
out << "QThreadPool: " << timePassed << endl;
timer_.restart();
for (int i(0); i < maxThreadCount; ++i) {
threads_[i]->start();
}
}
static QMetaMethod nextCall(PiTest::staticMetaObject.method(PiTest::staticMetaObject.indexOfMethod("start")));
void PiTest::nextQObjectCall()
{
jobCount_++;
if (jobCount_ < maxIteration) {
nextCall.invoke(sender(), Qt::QueuedConnection);
QMetaObject::invokeMethod(sender(), "start",
Qt::QueuedConnection);
return;
}
threadCount_--;
if (threadCount_ == 0) {
for (int i(0); i < maxThreadCount; ++i) {
threads_[i]->quit();
}
int timePassed(timer_.elapsed());
QTextStream out(stdout);
out << "QThread: " << timePassed << endl;
qApp->quit();
}
}
int main(int argc, char *argv[])
{
QCoreApplication app(argc, argv);
PiTest *bench(new PiTest(qApp));
QTimer::singleShot(0, bench, SLOT(start()));
return qApp->exec();
}
#include "main_moc.cpp"
我运行在闲置的20核电脑上测试:
/usr/lib64/qt5/bin/moc -o main_moc.cpp main.cpp
clang++ -std=c++11 -fPIE -O2 -march=native -I/usr/include/qt5/ -L/usr/lib64/qt5 -lQt5Core -o bench main.cpp
./bench > test.out
grep QThread test.out
结果如下:
QThreadPool: 4803
QThread: 9285
我尝试了不同的参数,使用更长的 pi 计算和更少的作业,反之亦然,但结果大致相同。 QThread+signal/slots 总是落后。随着作业数量的增加,QThreadPool+new/delete 可以轻松胜过 QThread 高达 10 倍。
我对我的基准代码感到有些尴尬。我在这里误解了什么吗?如果 signal/slot 比 new/delete 快,我的基准测试有什么问题?
谢谢。
信号性能因连接类型而异。当你创建线程间连接时,连接是排队的,并使用事件循环来调度自己,而Qt中的事件循环不仅相当慢,而且上次我检查它没有提供任何方法来增加它的更新速度。
这使得跨线程的信号非常慢,我遇到过细粒度并发的情况,多线程导致性能下降而不是性能提升。
只是为了让您了解直接连接和排队连接之间的区别:
#define COUNT 5000
class Ping : public QObject {
Q_OBJECT
Q_SIGNAL void pong(uint);
public slots: void ping(uint c) { if (c < COUNT) emit pong(++c); else qDebug() << t.nsecsElapsed(); }
};
//...
QObject::connect(&p1, SIGNAL(pong(uint)), &p2, SLOT(ping(uint)), Qt::DirectConnection);
QObject::connect(&p2, SIGNAL(pong(uint)), &p1, SLOT(ping(uint)), Qt::DirectConnection);
//...
p1.ping(0);
结果:
Direct connection (in same thread) - 570504 nsec
Queued connection (in same thread) - 29670333 nsec
Queued connection (different threads) - 53343054 nsec
如您所见,线程间连接比直接连接慢 100 倍。而且我怀疑您链接到的文档是指直接连接。
总而言之,我会说你的测试一团糟。你真的应该精简它,让它变得简单,并专注于你提出的问题。
最后,直接连接可能比 new/delete 快,但排队连接肯定不是,它们要慢得多,这绝对是不同性能背后的关键因素。您链接到的文档中的声明与 QThread + worker
与 QRunnable + thread pool
性能 完全没有任何关系 。最后,在 both 情况下,您使用 both 动态内存 allocation/deallocation 和排队连接。