使用 tbb 从数组中进行并行保序选择

Parallel order-preserving selection from an array using tbb

我有一个range-image and want to convert it into a libpointmatcher point cloud。云是一个 Eigen::Matrix,有 4 行 (x,y,z,1),每个点都有几列。 范围图像是一个包含范围值 (z) 的 unsigned short* 数组和一个包含有关像素可见性信息的 unsigned char* 数组。

在串行中,我的代码如下所示:

//container to hold the data
std::vector<Eigen::Vector4d> vec;
vec.reserve(this->Height*this->Width);

//contains information about pixel visibility
unsigned char* mask_data = (unsigned char*)range_image.mask.ToPointer();
//contains the actual pixel data 
unsigned short* pixel_data = (unsigned short*)range_image.pixel.ToPointer();

for (int y =0;y < range_image.Height; y++)
{ 
    for (int x = 0; x < range_image.Width; x++)
    {   
        int index  =x+y*range_image.Width;
        if(*(mask_data+index) != 0)
        {               
            vec.push_back(Eigen::Vector4d(x,y,(double)*(data+index),1));
        }               
    }
}
// libpointmatcher point cloud with size of visible pixel
PM::Matrix features(4,vec.size());
PM::DataPoints::Labels featureLabels;
featureLabels.resize(4);
featureLabels[0] =  PM::DataPoints::Label::Label("x");
featureLabels[1] =  PM::DataPoints::Label::Label("y");
featureLabels[2] =  PM::DataPoints::Label::Label("z");
featureLabels[3] =  PM::DataPoints::Label::Label("pad");

//fill with data
for(int i = 0; i<vec.size(); i++)
{
    features.col(i) = vec[i];
}   

由于图像较大,此循环需要 500 毫秒来处理 840000 个点,这太慢了。现在我的想法是将上面的代码集成到一个并行函数中。问题是 Eigen::Matrix 不提供 push_back 功能,我事先不知道可见点的数量,我需要按正确顺序处理点云的点。

所以我需要一个并行算法来从我的范围图像中提取可见的 3D 点,并以正确的顺序将它们插入 Eigen::Matrix。我正在使用 Microsoft Visual Studio 2012,我可以使用 OpenMP 2.0TBB.感谢您的帮助:)

更新

根据 Arch D. Robison 的建议,我尝试了 tbb::parallel_scan。我传递了掩码数组和一个双数组来保存 3D 坐标。输出数组的大小是输入数组的四倍,用于存储同质 3D 数据 (x,y,z,1)。然后我将 otput 数组映射到 Eigen::Matrix。行数是固定的,cols 来自 parallel_scan。

size_t vec_size = width*height;
double* out = new double[vec_size * 4];
size_t m1 = Compress(mask, pixel, out, height, width,
 [](unsigned char x)  {return x != 0; });
Map<MatrixXd> features(out, 4, m1);

。这是来自 operator():

的代码
void operator()(const tbb::blocked_range2d<size_t, size_t>& r, Tag) {
    // Use local variables instead of member fields inside the loop,
    // to improve odds that values will be kept in registers.
    size_t j = sum;
    const unsigned char* m = in;
    const unsigned short* p = in2;
    T* values = out;
    size_t yend = r.rows().end();
    for (size_t y = r.rows().begin(); y != yend; ++y)
    {
        size_t xend = r.cols().end();
        for (size_t x = r.cols().begin(); x != xend; ++x)
        {
            size_t index = x + y*width;
            if (pred(m[index]))
            {
                if (Tag::is_final_scan())
                {
                    size_t idx = j*4;
                    values[idx] = (double)x;
                    values[idx + 1] = (double)y;
                    values[idx + 2] = p[index];
                    values[idx + 3] = 1.0;
                }
                ++j;
            }
        }
    }
    sum = j;
}

我现在比串行版本快 4 倍。您如何看待这种方法?我有没有错过任何想法并且有改进吗?谢谢

为什么不在 m_features(0,index) = x; 之前查看条件 *(m_maskData+index)==0

这里是一个如何做类似 std::copy_if using tbb::parallel_scan 的例子。关键方法是 operator(),通常每个子范围调用两次,一次用于预扫描,一次用于最终扫描。 (但请注意,TBB 在不需要时会省略预扫描。)这里预扫描只是进行计数,最终扫描进行最后的工作(包括重放计数)。请参阅 https://software.intel.com/sites/default/files/bc/2b/parallel_scan.pdf for more details on the methods. Another good references is https://www.cs.cmu.edu/~guyb/papers/Ble93.pdf ,其中显示了您可以使用并行扫描 (a.k.a. prefix-sum) 做的很多事情。

```

#include "tbb/parallel_scan.h"
#include "tbb/blocked_range.h"
#include <cstddef>

template<typename T, typename Pred>
class Body {
    const T* const in;
    T* const out;
    Pred pred;
    size_t sum;
public:
    Body( T* in_, T* out_, Pred pred_) :
        in(in_), out(out_), pred(pred_), sum(0)
    {}
    size_t getSum() const {return sum;}
    template<typename Tag>
    void operator()( const tbb::blocked_range<size_t>& r, Tag ) {
        // Use local variables instead of member fields inside the loop,
        // to improve odds that values will be kept in registers.
        size_t j = sum;
        const T* x = in;
        T* y = out;
        for( size_t i=r.begin(); i<r.end(); ++i ) {
            if( pred(x[i]) ) {
                if( Tag::is_final_scan() )
                    y[j] = x[i];
                ++j;
            }
        }
        sum = j;
    }
    // Splitting constructor used for parallel fork.
    // Note that it's sum(0), not sum(b.sum), because this
    // constructor will be used to compute a partial sum.
    // Method reverse_join will put together the two sub-sums.
    Body( Body& b, tbb::split ) :
        in(b.in), out(b.out), pred(b.pred), sum(0)
    {}
    // Join partial solutions computed by two Body objects.
    // Arguments "this" and "a" correspond to the splitting
    // constructor arguments "b" and "this".  That's why
    // it's called a reverse join.
    void reverse_join( Body& a ) {
        sum += a.sum;
    }
    void assign( Body& b ) {sum=b.sum;}
};

// Copy to out each element of in that satisfies pred.
// Return number of elements copied.
template<typename T, typename Pred>
size_t Compress( T* in, T* out, size_t n, Pred pred ) {
    Body<T,Pred> b(in,out,pred);
    tbb::parallel_scan(tbb::blocked_range<size_t>(0,n), b);
    return b.getSum();
}

#include <cmath>
#include <algorithm>
#include <cassert>

int main() {
    const size_t n = 10000000;
    float* a = new float[n];
    float* b = new float[n];
    float* c = new float[n];
    for( size_t i=0; i<n; ++i )
        a[i] = std::cos(float(i));
    size_t m1 = Compress(a, b, n, [](float x) {return x<0;});
    size_t m2 = std::copy_if(a, a+n, c, [](float x) {return x<0;})-c;
    assert(m1==m2);
    for( size_t i=0; i<n; ++i )
        assert(b[i]==c[i]);
}

```