为什么 golang 通道比英特尔 tbb concurrent_queue 在测试 8 个生产者 1 个消费者时快得多

why golang channel is much faster than intel tbb concurrent_queue when test with 8 producer 1 consumer

我做了一个测试来比较 golang 通道和 C++ tbb 并发队列性能,我设置了 8 个 writer 和 1 个 reader,它们在不同的 threads.the 结果显示 golang 比 C++ 版本快得多(无论延迟和整体 send/recv 速度),是真的吗?或者我的代码有什么错误吗?

golang结果,单位为微秒

延迟 max:1505,平均:1073 发送 begin:1495593677683232、接收 end:1495593677901854、time:218622

package main

import (
    "flag"
    "time"
    "fmt"
    "sync"
    "runtime"
)

var (
    producer = flag.Int("producer", 8, "producer")
    consumer = flag.Int("consumer", 1, "consumer")
    start_signal sync.WaitGroup
)

const (
    TEST_NUM = 1000000
)

type Item struct  {
    id int
    sendtime int64
    recvtime int64
}

var g_vec[TEST_NUM] Item

func sender(out chan int, begin int, end int) {
    start_signal.Wait()
    runtime.LockOSThread()
    println("i am in sender", begin, end)
    for i:=begin; i < end; i++ {
        item := &g_vec[i]
        item.id = i
        item.sendtime = time.Now().UnixNano()/1000
        out<- i
    }
    println("sender finish")
}

func reader(out chan int, total int) {
    //runtime.LockOSThread()
    start_signal.Done()
    for i:=0; i<total;i++ {
        tmp :=<- out
        item := &g_vec[tmp]
        item.recvtime = time.Now().UnixNano()/1000
    }
    var lsum int64 = 0
    var lavg int64 = 0
    var lmax int64 = 0
    var lstart int64 = 0
    var lend int64 = 0
    for _, item:= range g_vec {
        if lstart > item.sendtime || lstart == 0 {
           lstart = item.sendtime
        }
        if lend < item.recvtime {
            lend = item.recvtime
        }

        ltmp := item.recvtime - item.sendtime
        lsum += ltmp
        if  ltmp > lmax {
            lmax = ltmp
        }
    }
    lavg = lsum /  TEST_NUM
    fmt.Printf("latency max:%v,avg:%v\n", lmax, lavg)
    fmt.Printf("send begin:%v,recv end:%v, time:%v", lstart, lend, lend-lstart)
}

func main() {
    runtime.GOMAXPROCS(10)
    out := make (chan int,5000)

    start_signal.Add(1)
    for i:=0 ;i<*producer;i++ {
        go sender(out,i*TEST_NUM/(*producer), (i+1)*TEST_NUM/(*producer))
    }
    reader(out, TEST_NUM)
}

C++,只有主要部分

concurrent_bounded_queueg_queue; max:558301,min:3,avg:403741(单位为微秒) start:1495594232068580,结束:1495594233497618,长度:1429038

static void sender(int start, int end)
{   
    for (int i=start; i < end; i++)
    {
        using namespace std::chrono;
        auto now = system_clock::now();
        auto now_ms = time_point_cast<microseconds>(now);
        auto value = now_ms.time_since_epoch();
        int64_t duration = value.count();

        Item &item = g_pvec->at(i);
        item.id = i;
        item.sendTime = duration;
        //std::cout << "sending " << i << "\n";
        g_queue.push(i);
    }
}


static void reader(int num)
{
    barrier.set_value();    
    for (int i=0;i<num;i++)
    {
        int v;
        g_queue.pop(v);
        Item &el = g_pvec->at(v);

        using namespace std::chrono;
        auto now = system_clock::now();
        auto now_ms = time_point_cast<microseconds>(now);
        auto value = now_ms.time_since_epoch();
        int64_t duration = value.count();

        el.recvTime = duration;
        //std::cout << "recv " << item.id << ":" << duration << "\n";
    }
    // caculate the result.
    int64_t lmax = 0;
    int64_t lmin = 100000000;
    int64_t lavg = 0;
    int64_t lsum = 0;
    int64_t lbegin = 0;
    int64_t lend = 0;
    for (auto &item : *g_pvec)
    {
        if (item.sendTime<lbegin || lbegin==0)
        {
            lbegin = item.sendTime;
        }
        if (item.recvTime>lend )
        {
            lend = item.recvTime;
        }

        lsum += item.recvTime - item.sendTime;
        lmax = max(item.recvTime - item.sendTime, lmax);
        lmin = min(item.recvTime - item.sendTime, lmin);
    }
    lavg = lsum / num;
    std::cout << "max:" << lmax << ",min:" << lmin << ",avg:" << lavg << "\n";
    std::cout << "start:" << lbegin << ",end:" << lend << ",length:" << lend-lbegin << "\n";
}

DEFINE_CODE_TEST(plain_queue_test)
{
    g_pvec = new std::vector<Item>();
    g_pvec->resize(TEST_NUM);    

    auto sf = barrier.get_future().share();

    std::vector<std::thread> vt;
    for (int i = 0; i < SENDER_NUM; i++)
    {
        vt.emplace_back([sf, i]{    
            sf.wait();
            sender(i*TEST_NUM / SENDER_NUM, (i + 1)*TEST_NUM / SENDER_NUM);
        });
    }


    std::cout << "create reader\n";
    std::thread rt(bind(reader, TEST_NUM));
    for (auto& t : vt)
    {
        t.join();
    }    
    rt.join();
}

(红色表示 cpu spin/overhead,绿色表示空闲)来自 vtune cpu 图表 我觉得 golang 通道具有更高效的互斥体(例如,它是否需要系统调用让 goroutine 与 C++ 互斥体休眠?)

从 VTune 跟踪我可以得出结论,TBB 队列不休眠,花费大量时间旋转,而 Go 版本有浅绿色区域,表明线程在 OS 同步上休眠。为什么更好?通常,它表示您的计算机超额订阅,因此通过 OS 进行的通信是有回报的。 那么,你超额认购了吗?如果是,我会说这是符合相应库理念的预期行为。 TBB 是为计算并行性而设计的,它在对抗超额订阅时不能很好地处理 IO 任务。 Go 专为 IO 任务而设计,因此内置并发与调度程序的 FIFO 策略对并行数字运算不友好。对于 IO 任务,建议超额订阅,因为它会影响甚至杀死计算并行性。