使用组并行从 OpenACC 调用 CUDA 函数

Calling CUDA function from OpenACC with gang parallelism

我正在尝试从 openacc 调用 Zhang 的三对角求解器代码。 我执行以下操作:

我将他的代码放在一个单独的文件中并编译调用 pcr.cu

#include <cstdio>
#include <cuda_runtime.h>

__device__ void Solve_Kernel_PCR(float * alist, float * blist, float * clist, float * dlist, float * xlist, int iter_max, int DMax)
{

    int idx_row = blockIdx.x*blockDim.x + threadIdx.x;
    int row_max = DMax - 1;

   // printf("idx_row = %d iter_max= %d\n",idx_row, iter_max);

    int stride = 1;
    int next_stride = stride;

    float a1, b1, c1, d1;
    float k01, k21, c01, a21, d01, d21;

    bool next_or_ot = true;
    int accum;


 for (int iter = 0; iter < iter_max; iter++)
    {
        if ( next_or_ot ) {

            next_stride = stride<<1;

            // 1    for updating 'a'
            if ((idx_row - stride)<0) {
            // 1.1  if it is the 'first' line
                a1 = 0.0f;
                k01 = 0.0f;
                c01 = 0.0f;
                d01 = 0.0f;
            } else if ((idx_row - next_stride)<0) {
            // 1.2  if no place for 'a'
                a1 = 0.0f;
                k01 = alist[idx_row]/blist[idx_row - stride];
                c01 = clist[idx_row - stride]*k01;
                d01 = dlist[idx_row - stride]*k01;
            } else {
            // 1.3  for rest general rows
                k01 = alist[idx_row]/blist[idx_row - stride];
                a1 = -alist[idx_row - stride]*k01;
                c01 = clist[idx_row - stride]*k01;
                d01 = dlist[idx_row - stride]*k01;
            }

            // 2    for updating 'c'
            if ((idx_row + stride)>row_max) {
            // 2.1  if it is the 'last' line
                c1 = 0.0f;
                k21 = 0.0f;
                a21 = 0.0f;
                d21 = 0.0f;
            } else if ((idx_row + next_stride)>row_max) {
                c1 = 0.0f;
                k21 = clist[idx_row]/blist[idx_row + stride];
                a21 = alist[idx_row + stride]*k21;
                d21 = dlist[idx_row + stride]*k21;
            } else {
                k21 = clist[idx_row]/blist[idx_row + stride];
                c1 = -clist[idx_row + stride]*k21;
                a21 = alist[idx_row + stride]*k21;
                d21 = dlist[idx_row + stride]*k21;
            }
            // 3   for updating 'b'
            b1 = blist[idx_row] - c01 - a21;
            // 4   for updating 'd'
            d1 = dlist[idx_row] - d01 - d21;

            stride = next_stride;

      int pos = idx_row-2*stride;
            accum = 0;
            for ( size_t iter = 0; iter<5; iter++ ) {
                if (pos >=0 && pos < DMax) accum++;
                pos+=stride;
            }
            if (accum < 3) {
                next_or_ot = false;//Turn of for ever
            }

        }

        __syncthreads();

        alist[idx_row] = a1;
        blist[idx_row] = b1;
        clist[idx_row] = c1;
        dlist[idx_row] = d1;

    }

    if ( accum==1 ) {
        xlist[idx_row] = dlist[idx_row] / blist[idx_row];
    } else if ( (idx_row-stride)<0 ) {
        int i = idx_row; int k = idx_row+stride;
        float f = clist[i]/blist[k];
        xlist[i] = (dlist[i]-dlist[k]*f)/(blist[i]-alist[k]*f);
    } else {
        int i = idx_row - stride; int k = idx_row;
        float f = alist[k]/blist[i];
        xlist[k] = (dlist[k]-dlist[i]*f)/(blist[k]-clist[i]*f);
    }

}

和 main.cpp 文件:

 #pragma acc routine gang bind("_Z16Solve_Kernel_PCRPfS_S_S_S_ii")
__device__ void Solve_Kernel_PCR(float * alist, float * blist, float * clist, float * dlist, float * xlist, int iter_max, int DMax); 

int main(int argc, char *argsv[]) {

  size_t diagonal_size ;
 diagonal_size=atoi(argsv[1]);


  float *alist = (float *)malloc(sizeof(float) * diagonal_size);
  float *blist = (float *)malloc(sizeof(float) * diagonal_size);
  float *clist = (float *)malloc(sizeof(float) * diagonal_size);
  float *dlist = (float *)malloc(sizeof(float) * diagonal_size);
  float *xlist = (float *)malloc(sizeof(float) * diagonal_size);

  float delx=1./(diagonal_size-1);

  for (int i = 0; i < diagonal_size; i++) {
    alist[i] = 1.0f;
    blist[i] = -2.0f;
    clist[i] = 1.0f;
   // dlist[i] = -1.; // rand() % 100 + 1;
    xlist[i] = 0.0f;
  }

  float pi = atan(1.0) * 4.0;

  for (int i = 0; i < diagonal_size; i++) {
    dlist[i] = -pi * pi * sin(OMEGA * i * delx * pi) * delx * delx;
}
 alist[0] = 0.0;
  clist[diagonal_size - 1] = 0.0;
  int DMax = diagonal_size;
  int iter_max=count_iter(DMax);

  printf("iter_max= %d\n", count_iter(DMax) );

  int ngang=1;

  int N=diagonal_size;



 #pragma acc data copy(alist[0:diagonal_size],blist[0:diagonal_size],clist[0:diagonal_size],dlist[0:diagonal_size] ,xlist[0:diagonal_size]  )
    {    
    #pragma acc parallel num_gangs(ngang) 
   Solve_Kernel_PCR(alist, blist, clist,  dlist, xlist, iter_max, DMax);
    }
     for (size_t it = 0; it < diagonal_size; it++) {
        // std::cout << alist[it] << " " << blist[it] << " " << clist[it] << " " <<
        // xlist[it] << " " <<  dlist[it] << std::endl;
        //printf("%f \n", xlist[it]);
        printf("%f \n", dlist[it]);
      }

  float err0 = 0.0;
  float err1 = 0.0;
  for (int i = 1; i < diagonal_size-1; i++) {
    // printf("r %lf %lf %lf %lf\n ",dl[i], d[i],du[i], r[i]);
    err1 = fabs(dlist[i] - sin(OMEGA * i * delx * pi));
    if (err0 < err1) {
      err0 = err1;
    }
  }

  printf(" l infinity of Error = %lf \n",err0 );
}

这是一个简单的测试用例,因为我想从 openacc gang 例程中调用此函数。

编译cuda代码 "nvcc -rdc true -arch=sm_60" 并与 pgc++ 链接,链接正常。

但是输入值 > 32 的结果是错误的

我假设这是因为默认向量长度为​​ 32。 我需要能够处理高达 1023 的网格。这个问题有什么解决方案吗?

尝试添加标志 -ta=tesla:gvmode

"gvmode"(gang-vector 模式)是一个未记录的标志,它将在调用设备例程时禁用仅使用长度为 32 的向量的限制。默认情况下,PGI 将 OpenACC 例程的向量长度限制为 32。这是为了支持减少例程以及限制会损害性能的线程同步量。

虽然您的设备例程是用 CUDA 编写的,但禁用它应该没问题。此外,除了 -ta 之外,请务必使用 -Mcuda 进行编译,以告诉编译器您正在链接 CUDA 代码。

[根据评论收集的答案并添加为后代的社区 wiki 条目]