使用 Eigen::VectorXd 减少 OpenMP

OpenMP reduction with Eigen::VectorXd

我正在尝试通过减少 OpenMP 来并行化以下循环;

#define EIGEN_DONT_PARALLELIZE
#include <iostream>
#include <cmath>
#include <string>
#include <eigen3/Eigen/Dense>
#include <eigen3/Eigen/Eigenvalues>
#include <omp.h>

using namespace Eigen;
using namespace std;

VectorXd integrand(double E)
{
    VectorXd answer(500000);
    double f = 5.*E + 32.*E*E*E*E;
    for (int j = 0; j !=50; j++)
        answer[j] =j*f;
    return answer;
}

int main()
{
    omp_set_num_threads(4);
    double start = 0.;
    double end = 1.;
    int n = 100;
    double h = (end - start)/(2.*n);

    VectorXd result(500000);
    result.fill(0.);
    double E = start;
    result = integrand(E);
    #pragma omp parallel
    { 
    #pragma omp for nowait
    for (int j = 1; j <= n; j++){
        E = start + (2*j - 1.)*h;
        result = result + 4.*integrand(E);
        if (j != n){
            E = start + 2*j*h;
            result = result + 2.*integrand(E);
        }
    }
    }
    for (int i=0; i <50 ; ++i)
        cout<< i+1 << " , "<< result[i] << endl;

    return 0;
}

这绝对比没有并行要快,但是对于所有 4 个线程,结果变化很大。当线程数设置为1时,输出是正确的。 如果有人可以帮助我,我将不胜感激...

我正在使用带有编译标志的 clang 编译器;

clang++-3.8 energy_integration.cpp -fopenmp=libiomp5

如果这是一个半身像,那么我将不得不学习实施 Boost::thread,或 std::thread...

您的代码没有为 OpenMP 定义自定义缩减来缩减 Eigen 对象。我不确定 clang 是否支持用户定义的缩减(参见 OpenMP 4 spec,第 180 页)。如果是这样,您可以声明减少并将 reduction(+:result) 添加到 #pragma omp for 行。如果没有,您可以通过如下更改代码来自己完成:

VectorXd result(500000); // This is the final result, not used by the threads
result.fill(0.);
double E = start;
result = integrand(E);
#pragma omp parallel
{
    // This is a private copy per thread. This resolves race conditions between threads
    VectorXd resultPrivate(500000);
    resultPrivate.fill(0.);
#pragma omp for nowait// reduction(+:result) // Assuming user-defined reductions aren't allowed
    for (int j = 1; j <= n; j++) {
        E = start + (2 * j - 1.)*h;
        resultPrivate = resultPrivate + 4.*integrand(E);
        if (j != n) {
            E = start + 2 * j*h;
            resultPrivate = resultPrivate + 2.*integrand(E);
        }
    }
#pragma omp critical
    {
        // Here we sum the results of each thread one at a time
        result += resultPrivate;
    }
}

您遇到的错误 () 似乎是由于大小不匹配造成的。虽然您的代码本身并没有一个微不足道的问题,但请不要忘记,当 OpenMP 启动每个线程时,它必须为每个线程初始化一个私有 VectorXd。如果提供 none,则默认值为 VectorXd()(大小为零)。使用此对象时,会发生大小不匹配。 omp declare reduction 的 "correct" 用法将包括初始化部分:

#pragma omp declare reduction (+: VectorXd: omp_out=omp_out+omp_in)\
     initializer(omp_priv=VectorXd::Zero(omp_orig.size()))

omp_priv是私有变量的名称。它由 VectorXd::Zero(...) 初始化。使用 omp_orig 指定大小。标准 (第 182 页,第 25-27 行)将其定义为:

The special identifier omp_orig can also appear in the initializer-clause and it will refer to the storage of the original variable to be reduced.

在我们的例子中(参见下面的完整示例),这是 result。所以 result.size() 是 500000 并且私有变量被初始化为正确的大小。

#include <iostream>
#include <string>
#include <Eigen/Core>
#include <omp.h>

using namespace Eigen;
using namespace std;

VectorXd integrand(double E)
{
    VectorXd answer(500000);
    double f = 5.*E + 32.*E*E*E*E;
    for (int j = 0; j != 50; j++)   answer[j] = j*f;
    return answer;
}

#pragma omp declare reduction (+: Eigen::VectorXd: omp_out=omp_out+omp_in)\
     initializer(omp_priv=VectorXd::Zero(omp_orig.size()))

int main()
{
    omp_set_num_threads(4);
    double start = 0.;
    double end = 1.;
    int n = 100;
    double h = (end - start) / (2.*n);

    VectorXd result(500000);
    result.fill(0.);
    double E = start;
    result = integrand(E);

#pragma omp parallel for reduction(+:result)
    for (int j = 1; j <= n; j++) {
        E = start + (2 * j - 1.)*h;
        result += (4.*integrand(E)).eval();
        if (j != n) {
            E = start + 2 * j*h;
            result += (2.*integrand(E)).eval();
        }
    }
    for (int i = 0; i < 50; ++i)
        cout << i + 1 << " , " << result[i] << endl;

    return 0;
}