Cuda - 每个 Matrix 元素中的 2D 多个双和

Cuda - 2D Multiple double sums in each Matrix element

与 post () 相同的问题。如何在具有不同求和限制的 x 和 y 方向上执行 2D 块跨步。在 CPU 和 monolithic kernel 中可以看到 2D 算法。我为 CPU 添加了 openmp 以获得更公平的加速结果。如果有一种方法可以提高 CPU 函数的速度,我很乐意找出答案。

此版本的代码采用二维数组并将其展平为一维数组。我仍然使用 2D 线程 dim3 索引,因此我可以更直观地对双求和进行索引。

(p.s。一维跨步代码归功于用户 Robert Crovella。) 到目前为止的代码是,

#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <sys/time.h>
typedef double df;
#define USECPSEC 1000000ULL
#define BSX 1<<5
#define BSY 1<<5
#define N 100
#define M 100

const bool sync = true;
const bool nosync = false;
unsigned long long dtime_usec(unsigned long long start, bool use_sync = nosync){
  if (use_sync == sync) cudaDeviceSynchronize();
  timeval tv;
  gettimeofday(&tv, 0);
  return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}

int divUp(int a, int b) {return (a + b - 1) / b;}

float cpu_sum(int n, int m, df *a, df *b, df *c) {
   df q, r;
   #pragma omp parallel for collapse(2)
   for (int x = 0; x < n; x++) {
      for (int y = 0; y < m; y++) {
     q = 0.0f;
     for (int i = 0; i <= x; i++) {
        r = 0.0f;
        for (int j = 0; j <= y; j++) {
           r += a[i * n + j] * b[(x - i) * n + y - j];
        }
        for (int j = 1; j < m - y; j++) {
           r += a[i * n + j] * b[(x - i) * n + y + j] 
                + a[i * n + y + j] * b[(x - i) * n + j];
        }
        q += r;
     }
     for (int i = 1; i < n-x; i++) {
        r = 0.0f;
        for (int j = 0; j <= y; j++) {
           r += a[i * n + j] * b[(x + i) * n + y - j]
                + a[(x + i) * n + j] * b[ i * n + y - j];
        }
        for (int j = 1; j < m - y; j++) {
           r += a[i * n + j] * b[(x + i) * n + y + j] 
                + a[(x + i) * n + y + j] * b[(x + i) * n + j]

                +a[(x + i) * n + j] * b[i * n + y + j] 
                + a[(x + i) * n + y + j] * b[i * n + j];
        }
        q += r;
     }
      c[x * N + y] = 0.25f*q;
      }
   }
   return 0;
}

const int P2  = 5;
const int TPB = 1<<P2;
const unsigned row_mask = ~((0xFFFFFFFFU>>P2)<<P2);
__global__ void chebyprod_imp(int n, int m, df *a, df *b, df *c){
   __shared__ df sdata[TPB*TPB];
   int x = blockIdx.x;
   int y = blockIdx.y;
   int row_width_x = (((x)>(n-x))?(x):(n-x))+1;
   int row_width_y = (((y)>(m-y))?(y):(m-y))+1;
   int strides_x = (row_width_x>>P2) + ((row_width_x&row_mask)?1:0);
   int strides_y = (row_width_y>>P2) + ((row_width_y&row_mask)?1:0);
   int i = threadIdx.x;
   df tmp_a;
   df sum = 0.0f;
   for (int s=0; s < strides_x; s++) { // block-stride x loop
      int j = threadIdx.y;  
      for (int u=0; u < strides_y; u++) { // block-stride y loop
         if (i < n && j < m) {tmp_a = a[i * n + j];}
         if (i <= x) {
            if (j <= y) {sum += tmp_a * b[(x - i) * n + y - j];}
            if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(x - i) * n + y + j] 
                                          + a[i * n + y + j] * b[(x - i) * n + j];}
         }
         if ((i > 0) && (i < (n-x))) {

            if (j <= y) {sum += tmp_a * b[(x + i) * n + y - j]
                          + a[(x + i) * n + j] * b[ i * n + y - j];}
            if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(x + i) * n + y + j] 
                                          + a[(x + i) * n + y + j] * b[(x + i) * n + j]
                                          + a[(x + i) * n + j] * b[i * n + y + j] 
                                          + a[(x + i) * n + y + j] * b[i * n + j];}
         }
         j += TPB;     
      } 
      i += TPB;
   }
   sdata[threadIdx.x * TPB + threadIdx.y] = sum;
   for (int s = TPB>>1; s > 0; s>>=1) { // sweep reduction in x
      for (int u = TPB>>1; u > 0; u>>=1) { // sweep reduction in x
      __syncthreads();
         if (threadIdx.x < s && threadIdx.y < u) {
            sdata[threadIdx.x * TPB + threadIdx.y] += sdata[(threadIdx.x + s) * TPB + threadIdx.y + u];
         }
      }
   }
   if (!threadIdx.x && !threadIdx.y) c[x * n + y] = 0.25f*sdata[0];
}

__global__ void chebyprod(int n, int m, df *a, df *b, df *c){
   int x = blockIdx.x * blockDim.x + threadIdx.x;
   int y = blockIdx.y * blockDim.y + threadIdx.y;
   df q, r;
   if (x < n && y < m) {
      q = 0.0f;
      for (int i = 0; i <= x; i++) {
     r = 0.0f;
     for (int j = 0; j <= y; j++) {
        r += a[i * n + j] * b[(x - i) * n + y - j];
     }
     for (int j = 1; j < m - y; j++) {
        r += a[i * n + j] * b[(x - i) * n + y + j] 
             + a[i * n + y + j] * b[(x - i) * n + j];
     }
     q += r;
      }
      for (int i = 1; i < n-x; i++) {
     r = 0.0f;
     for (int j = 0; j <= y; j++) {
        r += a[i * n + j] * b[(x + i) * n + y - j]
             + a[(x + i) * n + j] * b[ i * n + y - j];
     }
     for (int j = 1; j < m - y; j++) {
        r += a[i * n + j] * b[(x + i) * n + y + j] 
             + a[(x + i) * n + y + j] * b[(x + i) * n + j]

             +a[(x + i) * n + j] * b[i * n + y + j] 
             + a[(x + i) * n + y + j] * b[i * n + j];
     }
     q += r;
      }
   c[x * N + y] = 0.25f*q;
   }
}

int main(void){
  int size = N*M*sizeof(df);
  df *a, *b, *c, *cc, *ci, *d_a, *d_b, *d_c, *d_ci;
  a  = (df*)malloc(size);
  b  = (df*)malloc(size);
  c  = (df*)malloc(size);
  cc = (df*)malloc(size);
  ci = (df*)malloc(size);

  cudaMalloc(&d_a, size); 
  cudaMalloc(&d_b, size);
  cudaMalloc(&d_c, size);
  cudaMalloc(&d_ci, size);
  #pragma omp parallel for collapse (2)
  for (int i = 0; i < N; i++) {
     for (int j = 0; j < M; j++) {
    a[i * M + j] = 0.1f;
    b[i * M + j] = 0.2f;
     }
  }

  unsigned long long  dt = dtime_usec(0);
  // Perform chebyprod on N elements
  cpu_sum(N, M, a, b, cc);
  dt = dtime_usec(dt,sync);
  printf("Time taken 2D CPU: %fs\n", dt/(float)USECPSEC);
  df dtc = dt/(float)USECPSEC;

  std::cout << "Vector cc: [ ";
  for (int k = 0; k < 10; ++k)
    std::cout << cc[k] << " ";
  std::cout <<"]\n";

  cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
  cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);

  dim3 dimBlock(BSX, BSY);
  dim3 dimGrid(divUp(N, BSX), divUp(M, BSY)); 

  //std::cout << "dimBlock: " << dimBlock << "\n dimGrid: " << dimGrid << "\n";
  dt = dtime_usec(0);
  // Perform chebyprod on N elements
  chebyprod<<< dimBlock, dimGrid >>>(N, M, d_a, d_b, d_c);
  dt = dtime_usec(dt,sync);
  printf("Time taken 2D monolithic kernel: %fs\n", dt/(float)USECPSEC);
  printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));

  cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);

  std::cout << "Vector c: [ ";
  for (int k = 0; k < 10; ++k)
    std::cout << c[k] << " ";
  std::cout <<"]\n";

  dt = dtime_usec(0);
  // Perform chebyprod on N elements
  chebyprod_imp<<< dimBlock, dimGrid >>>(N, M, d_a, d_b, d_ci);
  dt = dtime_usec(dt,sync);
  printf("Time taken 2D stride kernel: %fs\n", dt/(float)USECPSEC);

  cudaMemcpy(ci, d_ci, size, cudaMemcpyDeviceToHost);

  std::cout << "Vector ci: [ ";
  for (int k = 0; k < 10; ++k)
    std::cout << ci[k] << " ";
  std::cout <<"]\n";

  cudaFree(d_a);
  cudaFree(d_b);
  cudaFree(d_c); 
  cudaFree(d_ci);
  free(a);
  free(b);
  free(c);
  free(cc);
  free(ci);
}
  1. 对我来说,无论如何,如果我省略 -O3,CPU 代码的结果在我使用 OpenMP 支持和不支持 OpenMP 编译的情况下不匹配。如果我还指定 -O3,我似乎可以通过 OpenMP 编译获得正确的结果。我不确定为什么这对正确性很重要,尽管它显然对 CPU 代码性能有影响。

  2. 您的网格和块大小似乎倒退了:

    chebyprod<<< dimBlock, dimGrid >>>(....
    

    第一个内核配置参数是网格维度,而不是块维度。我不确定这是怎么发生的,因为你在 .

  3. 中正确地完成了它
  4. 和上一个问题一样,我们需要选择一个线程策略并正确地实现它。您似乎对跨步感到困惑,所以希望下面的代码能澄清一些事情。我将在这里使用的线程策略是每个输出点一个 warp。经纱是一组线程,在 x 方向上的维度为 32(线程),在 y 方向上为 1。因此,循环步幅将在 x 方向增加 32,但在 y 方向仅增加 1,以覆盖整个 space。线程策略的选择也会影响网格大小。

  5. 你似乎把我认为应该存在的两个维度的关系搞乱了。 x方向,Nn应该都是连通的。同样y方向,Mm都应该相连(例如,M是y方向的维度)。

  6. 当涉及到 2D 线程块时,我们希望为 GPU 上的合并安排索引,这样包含 threadIdx.x 的索引不会乘以任何东西。 (合并的一个简化语句是我们希望 warp 中的相邻线程访问内存中的相邻元素。由于 threadIdx.x 随着我们在 warp 中从一个线程到另一个线程增加 1,我们想使用这个特性来生成相邻内存索引。如果我们将 threadIdx.x 乘以除 1 以外的任何值,我们就会打破这种模式。)你有这个反转 - 其中包括 threadIdx.x 的索引通常乘以行维度(N , 或 n).这确实是不正确的,也不利于良好的合并访问。为了解决这个问题,我们想要转置索引并转置 ab(因此 c)的数据存储。在下面的代码中,我已经为 ab 的数据设置转置了索引,相关索引也已经在跨步内核中转置(仅)。在您的非跨步内核以及您的 CPU 版本中,我 未转置 索引,如果需要,我将其作为练习留给您。对于结果,在数值上并不重要,因为你的整个 a 矩阵在每个位置都有相同的值,并且可以对你的 b 矩阵做出类似的陈述。那么,对于这个示例代码,在数值上,转置(或不转置)对结果没有影响。但这对性能很重要(至少是跨步内核)。 另请注意,我相信在 "monolithic" 内核上执行索引 "transpose" 也应该会提高其性能。 我不知道这是否会影响内核的性能CPU版本。

我还在之前的回答中添加了 const __restrict__ 用法。根据我的测试,在 "smaller" GPU 上,这提供了显着的性能优势。然而,这并不是正确性所必需的。这是一个经过上述更改的工作示例,它为所有 3 个测试用例提供了数字匹配的结果:

$ cat t1498.cu
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <time.h>
#include <sys/time.h>
typedef double df;
#define USECPSEC 1000000ULL
#define BSX 1<<5
#define BSY 1<<5
#define N 100
#define M 100

const bool sync = true;
const bool nosync = false;
unsigned long long dtime_usec(unsigned long long start, bool use_sync = nosync){
  if (use_sync == sync) cudaDeviceSynchronize();
  timeval tv;
  gettimeofday(&tv, 0);
  return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}

int divUp(int a, int b) {return (a + b - 1) / b;}

void cpu_sum(int n, int m, df *a, df *b, df *c) {
   df q, r;
   #pragma omp parallel for collapse(2)
   for (int x = 0; x < n; x++) {
     for (int y = 0; y < m; y++) {
       q = 0.0f;
       for (int i = 0; i <= x; i++) {
         r = 0.0f;
         for (int j = 0; j <= y; j++) {
           r += a[i * n + j] * b[(x - i) * n + y - j];
           }
         for (int j = 1; j < m - y; j++) {
           r += a[i * n + j] * b[(x - i) * n + y + j]
                + a[i * n + y + j] * b[(x - i) * n + j];
           }
         q += r;
         }
       for (int i = 1; i < n-x; i++) {
         r = 0.0f;
         for (int j = 0; j <= y; j++) {
           r += a[i * n + j] * b[(x + i) * n + y - j]
                + a[(x + i) * n + j] * b[ i * n + y - j];
           }
         for (int j = 1; j < m - y; j++) {
           r += a[i * n + j] * b[(x + i) * n + y + j]
                + a[(x + i) * n + y + j] * b[(x + i) * n + j]

                +a[(x + i) * n + j] * b[i * n + y + j]
                + a[(x + i) * n + y + j] * b[i * n + j];
           }
         q += r;
         }
       c[x * N + y] = 0.25f*q;
       }
     }
}

// choose one warp per output point
const int P2  = 5;  // assumes warp size is 32
const unsigned row_mask = ~((0xFFFFFFFFU>>P2)<<P2);
__global__ void chebyprod_imp(int n, int m, const df * __restrict__ a, const df * __restrict__ b, df * __restrict__ c){
   int x = blockIdx.x;
   int y = threadIdx.y+blockDim.y*blockIdx.y;
   int width_x = (((x)>(n-x))?(x):(n-x))+1;
   int height_y = (((y)>(m-y))?(y):(m-y))+1;
   int strides_x = (width_x>>P2) + ((width_x&row_mask)?1:0);
   int strides_y = height_y;
   int i = threadIdx.x;
   df tmp_a;
   df sum = 0.0f;
   if ((x < n) && (y < m)){
   for (int s=0; s < strides_x; s++) { // warp-stride x loop
     for (int j=0; j < strides_y; j++) { // y loop
       if (i < n && j < m) {tmp_a = a[j * n + i];}
       if (i <= x) {
         if (j <= y) {sum += tmp_a * b[(y - j) * n + x - i];}
         if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(y+j) * n + x - i] + a[(y+j)* n + i] * b[j*n+(x - i)];}
         }
       if ((i > 0) && (i < (n-x))) {

         if (j <= y) {sum += tmp_a * b[(y-j) * n + x+i] + a[j*n + (x + i)] * b[(y - j)*n + i];}
         if ((j > 0) && (j < (m-y)))
           {sum += tmp_a * b[(y+j) * n + x+i]
                +  a[(y+j) * n + x + i] * b[j*n+(x + i)]
                +  a[j*n + (x + i)] * b[(y+j)*n + i]
                +  a[(y+j)*n + x + i] * b[j*n+i];}
         }
      }
      i += 32;
   }
  // warp-shuffle reduction
    for (int offset = warpSize>>1; offset > 0; offset >>= 1)
      sum += __shfl_down_sync(0xFFFFFFFFU, sum, offset);
    if (!threadIdx.x) c[y*m+x] = 0.25f*sum;}
}

__global__ void chebyprod(int n, int m, df *a, df *b, df *c){
   int x = blockIdx.x * blockDim.x + threadIdx.x;
   int y = blockIdx.y * blockDim.y + threadIdx.y;
   df q, r;
   if (x < n && y < m) {
      q = 0.0f;
      for (int i = 0; i <= x; i++) {
     r = 0.0f;
     for (int j = 0; j <= y; j++) {
        r += a[i * n + j] * b[(x - i) * n + y - j];
     }
     for (int j = 1; j < m - y; j++) {
        r += a[i * n + j] * b[(x - i) * n + y + j]
             + a[i * n + y + j] * b[(x - i) * n + j];
     }
     q += r;
      }
      for (int i = 1; i < n-x; i++) {
     r = 0.0f;
     for (int j = 0; j <= y; j++) {
        r += a[i * n + j] * b[(x + i) * n + y - j]
             + a[(x + i) * n + j] * b[ i * n + y - j];
     }
     for (int j = 1; j < m - y; j++) {
        r += a[i * n + j] * b[(x + i) * n + y + j]
             + a[(x + i) * n + y + j] * b[(x + i) * n + j]

             +a[(x + i) * n + j] * b[i * n + y + j]
             + a[(x + i) * n + y + j] * b[i * n + j];
     }
     q += r;
      }
   c[x * N + y] = 0.25f*q;
   }
}

int main(void){
  int size = N*M*sizeof(df);
  df *a, *b, *c, *cc, *ci, *d_a, *d_b, *d_c, *d_ci;
  a  = (df*)malloc(size);
  b  = (df*)malloc(size);
  c  = (df*)malloc(size);
  cc = (df*)malloc(size);
  ci = (df*)malloc(size);

  cudaMalloc(&d_a, size);
  cudaMalloc(&d_b, size);
  cudaMalloc(&d_c, size);
  cudaMalloc(&d_ci, size);
  #pragma omp parallel for collapse (2)
  for (int j = 0; j < M; j++) {
    for (int i = 0; i < N; i++) {
      a[j * N + i] = 0.1f;
      b[j * N + i] = 0.2f;
      }
    }

  unsigned long long  dt = dtime_usec(0);
  // Perform chebyprod on N elements
  cpu_sum(N, M, a, b, cc);
  dt = dtime_usec(dt,sync);
  printf("Time taken 2D CPU: %fs\n", dt/(float)USECPSEC);
  df dtc = dt/(float)USECPSEC;

  std::cout << "Vector cc: [ ";
  for (int k = 0; k < 10; ++k)
    std::cout << cc[k] << " ";
  std::cout <<"]\n";

  cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
  cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);

  dim3 dimBlock(BSX, BSY);
  dim3 dimGrid(divUp(N, BSX), divUp(M, BSY));

  //std::cout << "dimBlock: " << dimBlock << "\n dimGrid: " << dimGrid << "\n";
  dt = dtime_usec(0);
  // Perform chebyprod on N elements
  chebyprod<<< dimGrid, dimBlock >>>(N, M, d_a, d_b, d_c);
  dt = dtime_usec(dt,sync);
  printf("Time taken 2D monolithic kernel: %fs\n", dt/(float)USECPSEC);
  printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));

  cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);

  std::cout << "Vector c: [ ";
  for (int k = 0; k < 10; ++k)
    std::cout << c[k] << " ";
  std::cout <<"]\n";

  dt = dtime_usec(0);
  // Perform chebyprod on N elements
  dim3 dimGrid2(N, (M+dimBlock.y-1)/dimBlock.y);
  chebyprod_imp<<< dimGrid2, dimBlock >>>(N, M, d_a, d_b, d_ci);
  dt = dtime_usec(dt,sync);
  printf("Time taken 2D stride kernel: %fs\n", dt/(float)USECPSEC);
  printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));

  cudaMemcpy(ci, d_ci, size, cudaMemcpyDeviceToHost);

  std::cout << "Vector ci: [ ";
  for (int k = 0; k < 10; ++k)
    std::cout << ci[k] << " ";
  std::cout <<"]\n";
  df max_error = 0;
  for (int k = 0; k < N*M; k++)
    max_error = fmax(max_error, fabs(c[k] - ci[k]));
  std::cout << "Max diff = " << max_error << std::endl;

  cudaFree(d_a);
  cudaFree(d_b);
  cudaFree(d_c);
  cudaFree(d_ci);
  free(a);
  free(b);
  free(c);
  free(cc);
  free(ci);
}
$ nvcc -O3 -Xcompiler -fopenmp -arch=sm_52 -o t1498 t1498.cu
$ ./t1498
Time taken 2D CPU: 0.034830s
Vector cc: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D monolithic kernel: 0.033687s
Speedup: 1.033930s
Vector c: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D stride kernel: 0.013526s
Speedup: 2.575041s
Vector ci: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Max diff = 8.52651e-13
$

CUDA 10.1.105、Fedora 29、GTX 960

请注意,当我们 运行 在 Tesla V100 上进行相同的测试时,它可以充分利用跨步内核情况下可用的 "extra" 线程,好处更加明显:

$ OMP_NUM_THREADS=32 ./t1498
Time taken 2D CPU: 0.031610s
Vector cc: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D monolithic kernel: 0.018228s
Speedup: 1.734145s
Vector c: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D stride kernel: 0.000731s
Speedup: 43.242137s
Vector ci: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Max diff = 8.52651e-13

如果你在单片内核上执行索引 "transpose" 类似于我在跨步内核中所做的那样,我认为你最终会遇到与你最终的情况大致相似的性能情况在最后一个问题中。在 "small" GPU 上,与整体内核相比,跨步内核的性能优势很小或没有。在 "large" GPU 上改进了 ~5 倍。