寄存器中 AVX 排列和随机播放的 CUDA 翻译
CUDA translation of AVX permute and shuffle in registers
我正在尝试将 AVX 例程转换为 CUDA,大部分工作都非常简单。但是,由于缺乏简单的示例,我无法理解该翻译中的两部分。
如何对寄存器浮点变量(长度始终为 32)执行任意排列?我看到 __shfl_sync 会这样做的建议,但没有示例显示这一点。我想用长度为 8 的数组做的简单案例的 numpy 版本:
"""
a == some float32 array of length 8;
specific patterns will always cycle mod 4
"""
b = a[[3,2,1,0,7,6,5,4]]
如何将两个寄存器浮点数合并成一个寄存器浮点数?在 numpy 中,一个简单的例子是:
"""
a == some float32 array of length 8
b == some other float32 array of length 8
specific patterns will always cycle mod 4
"""
c = numpy.array([a[0],a[1], b[0],b[1],
a[4],a[5], b[4],b[5]])
对于了解 AVX 内在函数的任何人,问题 1 与 _mm256_permute_ps
的翻译有关,问题二与 _mm256_shuffle_ps
.
的翻译有关
How do I perform arbitrary permutations of a register float variable (always of length 32)? I have seen suggestions that __shfl_sync will do this, but no example showing this. A numpy version of a simple case of what I want to do with length 8 array:
a == some float32 array of length 8; specific patterns will always cycle mod 4 """ b = a[[3,2,1,0,7,6,5,4]]
$ cat t1486.cu
#include <stdio.h>
__global__ void k(int *pattern){
float my_val = (float)threadIdx.x + 0.1f;
my_val = __shfl_sync(0xFFFFFFFF, my_val, pattern[threadIdx.x]);
printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}
int main(){
int pattern[32] = {3,2,1,0,7,6,5,4};
for (int i = 8; i<32; i++) pattern[i] = i;
int *d_pattern;
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1486 t1486.cu
$ cuda-memcheck ./t1486
========= CUDA-MEMCHECK
warp lane: 0, val: 3.100000
warp lane: 1, val: 2.100000
warp lane: 2, val: 1.100000
warp lane: 3, val: 0.100000
warp lane: 4, val: 7.100000
warp lane: 5, val: 6.100000
warp lane: 6, val: 5.100000
warp lane: 7, val: 4.100000
warp lane: 8, val: 8.100000
warp lane: 9, val: 9.100000
warp lane: 10, val: 10.100000
warp lane: 11, val: 11.100000
warp lane: 12, val: 12.100000
warp lane: 13, val: 13.100000
warp lane: 14, val: 14.100000
warp lane: 15, val: 15.100000
warp lane: 16, val: 16.100000
warp lane: 17, val: 17.100000
warp lane: 18, val: 18.100000
warp lane: 19, val: 19.100000
warp lane: 20, val: 20.100000
warp lane: 21, val: 21.100000
warp lane: 22, val: 22.100000
warp lane: 23, val: 23.100000
warp lane: 24, val: 24.100000
warp lane: 25, val: 25.100000
warp lane: 26, val: 26.100000
warp lane: 27, val: 27.100000
warp lane: 28, val: 28.100000
warp lane: 29, val: 29.100000
warp lane: 30, val: 30.100000
warp lane: 31, val: 31.100000
========= ERROR SUMMARY: 0 errors
$
对于问题 2,我唯一能想到的似乎微不足道。正如我在对问题 1 的回答中所建议的,考虑 32 项 float
数组的一种方法是让数组“散布”在经线上。我相信这与 AVX 样式处理最相符。
如果我们遵循那个,那么问题 2 的代码可能很简单:
$ cat t1487.cu
#include <stdio.h>
__global__ void k(int *pattern){
float my_vals[2] = {1.1f, 2.2f};
float my_val = my_vals[pattern[threadIdx.x]];
printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}
int main(){
int pattern[32] = {0,0,1,1,0,0,1,1};
for (int i = 8; i<32; i++) pattern[i] = 0;
int *d_pattern;
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1487 t1487.cu
$ cuda-memcheck ./t1487
========= CUDA-MEMCHECK
warp lane: 0, val: 1.100000
warp lane: 1, val: 1.100000
warp lane: 2, val: 2.200000
warp lane: 3, val: 2.200000
warp lane: 4, val: 1.100000
warp lane: 5, val: 1.100000
warp lane: 6, val: 2.200000
warp lane: 7, val: 2.200000
warp lane: 8, val: 1.100000
warp lane: 9, val: 1.100000
warp lane: 10, val: 1.100000
warp lane: 11, val: 1.100000
warp lane: 12, val: 1.100000
warp lane: 13, val: 1.100000
warp lane: 14, val: 1.100000
warp lane: 15, val: 1.100000
warp lane: 16, val: 1.100000
warp lane: 17, val: 1.100000
warp lane: 18, val: 1.100000
warp lane: 19, val: 1.100000
warp lane: 20, val: 1.100000
warp lane: 21, val: 1.100000
warp lane: 22, val: 1.100000
warp lane: 23, val: 1.100000
warp lane: 24, val: 1.100000
warp lane: 25, val: 1.100000
warp lane: 26, val: 1.100000
warp lane: 27, val: 1.100000
warp lane: 28, val: 1.100000
warp lane: 29, val: 1.100000
warp lane: 30, val: 1.100000
warp lane: 31, val: 1.100000
========= ERROR SUMMARY: 0 errors
$
如果这是为了学习练习,那太好了。如果您的兴趣是稳健地实现 4x4 批处理矩阵逆,我鼓励您使用 CUBLAS.
我有第二个问题 2 的解决方案,是我在 Robert 发布他的问题之前制定的。我将不得不再研究一下已接受的内容,但在这一点上我很高兴有多个选择。
$ cat t1486.cu
#include <stdio.h>
__device__ unsigned pat[4];
const unsigned hpat[4] = {1, 1, 0, 0};
__global__ void k(int *pattern){
float my_val = (float)threadIdx.x + 0.0f;
float my_val1 = (float)threadIdx.x + 32.0f;
float out_val = 0.0;
out_val = my_val*pat[threadIdx.x%4];
out_val += __shfl_up_sync(0xFFFFFFFF, my_val1, 2, 4)*(1-pat[threadIdx.x%4]);
printf("warp lane: %d, val: %f\n", threadIdx.x&31, out_val);
}
int main(){
int pattern[32] = {3,2,1,0,7,6,5,4};
for (int i = 8; i<32; i++) pattern[i] = i;
int *d_pattern;
cudaMemcpyToSymbol(pat, hpat, 4*sizeof(unsigned));
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1486 t1486.cu
$ ./t1486
warp lane: 0, val: 0.000000
warp lane: 1, val: 1.000000
warp lane: 2, val: 32.000000
warp lane: 3, val: 33.000000
warp lane: 4, val: 4.000000
warp lane: 5, val: 5.000000
warp lane: 6, val: 36.000000
warp lane: 7, val: 37.000000
warp lane: 8, val: 8.000000
warp lane: 9, val: 9.000000
warp lane: 10, val: 40.000000
warp lane: 11, val: 41.000000
warp lane: 12, val: 12.000000
warp lane: 13, val: 13.000000
warp lane: 14, val: 44.000000
warp lane: 15, val: 45.000000
warp lane: 16, val: 16.000000
warp lane: 17, val: 17.000000
warp lane: 18, val: 48.000000
warp lane: 19, val: 49.000000
warp lane: 20, val: 20.000000
warp lane: 21, val: 21.000000
warp lane: 22, val: 52.000000
warp lane: 23, val: 53.000000
warp lane: 24, val: 24.000000
warp lane: 25, val: 25.000000
warp lane: 26, val: 56.000000
warp lane: 27, val: 57.000000
warp lane: 28, val: 28.000000
warp lane: 29, val: 29.000000
warp lane: 30, val: 60.000000
warp lane: 31, val: 61.000000
我正在尝试将 AVX 例程转换为 CUDA,大部分工作都非常简单。但是,由于缺乏简单的示例,我无法理解该翻译中的两部分。
如何对寄存器浮点变量(长度始终为 32)执行任意排列?我看到 __shfl_sync 会这样做的建议,但没有示例显示这一点。我想用长度为 8 的数组做的简单案例的 numpy 版本:
""" a == some float32 array of length 8; specific patterns will always cycle mod 4 """ b = a[[3,2,1,0,7,6,5,4]]
如何将两个寄存器浮点数合并成一个寄存器浮点数?在 numpy 中,一个简单的例子是:
""" a == some float32 array of length 8 b == some other float32 array of length 8 specific patterns will always cycle mod 4 """ c = numpy.array([a[0],a[1], b[0],b[1], a[4],a[5], b[4],b[5]])
对于了解 AVX 内在函数的任何人,问题 1 与 _mm256_permute_ps
的翻译有关,问题二与 _mm256_shuffle_ps
.
How do I perform arbitrary permutations of a register float variable (always of length 32)? I have seen suggestions that __shfl_sync will do this, but no example showing this. A numpy version of a simple case of what I want to do with length 8 array:
a == some float32 array of length 8; specific patterns will always cycle mod 4 """ b = a[[3,2,1,0,7,6,5,4]]
$ cat t1486.cu
#include <stdio.h>
__global__ void k(int *pattern){
float my_val = (float)threadIdx.x + 0.1f;
my_val = __shfl_sync(0xFFFFFFFF, my_val, pattern[threadIdx.x]);
printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}
int main(){
int pattern[32] = {3,2,1,0,7,6,5,4};
for (int i = 8; i<32; i++) pattern[i] = i;
int *d_pattern;
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1486 t1486.cu
$ cuda-memcheck ./t1486
========= CUDA-MEMCHECK
warp lane: 0, val: 3.100000
warp lane: 1, val: 2.100000
warp lane: 2, val: 1.100000
warp lane: 3, val: 0.100000
warp lane: 4, val: 7.100000
warp lane: 5, val: 6.100000
warp lane: 6, val: 5.100000
warp lane: 7, val: 4.100000
warp lane: 8, val: 8.100000
warp lane: 9, val: 9.100000
warp lane: 10, val: 10.100000
warp lane: 11, val: 11.100000
warp lane: 12, val: 12.100000
warp lane: 13, val: 13.100000
warp lane: 14, val: 14.100000
warp lane: 15, val: 15.100000
warp lane: 16, val: 16.100000
warp lane: 17, val: 17.100000
warp lane: 18, val: 18.100000
warp lane: 19, val: 19.100000
warp lane: 20, val: 20.100000
warp lane: 21, val: 21.100000
warp lane: 22, val: 22.100000
warp lane: 23, val: 23.100000
warp lane: 24, val: 24.100000
warp lane: 25, val: 25.100000
warp lane: 26, val: 26.100000
warp lane: 27, val: 27.100000
warp lane: 28, val: 28.100000
warp lane: 29, val: 29.100000
warp lane: 30, val: 30.100000
warp lane: 31, val: 31.100000
========= ERROR SUMMARY: 0 errors
$
对于问题 2,我唯一能想到的似乎微不足道。正如我在对问题 1 的回答中所建议的,考虑 32 项 float
数组的一种方法是让数组“散布”在经线上。我相信这与 AVX 样式处理最相符。
如果我们遵循那个,那么问题 2 的代码可能很简单:
$ cat t1487.cu
#include <stdio.h>
__global__ void k(int *pattern){
float my_vals[2] = {1.1f, 2.2f};
float my_val = my_vals[pattern[threadIdx.x]];
printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}
int main(){
int pattern[32] = {0,0,1,1,0,0,1,1};
for (int i = 8; i<32; i++) pattern[i] = 0;
int *d_pattern;
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1487 t1487.cu
$ cuda-memcheck ./t1487
========= CUDA-MEMCHECK
warp lane: 0, val: 1.100000
warp lane: 1, val: 1.100000
warp lane: 2, val: 2.200000
warp lane: 3, val: 2.200000
warp lane: 4, val: 1.100000
warp lane: 5, val: 1.100000
warp lane: 6, val: 2.200000
warp lane: 7, val: 2.200000
warp lane: 8, val: 1.100000
warp lane: 9, val: 1.100000
warp lane: 10, val: 1.100000
warp lane: 11, val: 1.100000
warp lane: 12, val: 1.100000
warp lane: 13, val: 1.100000
warp lane: 14, val: 1.100000
warp lane: 15, val: 1.100000
warp lane: 16, val: 1.100000
warp lane: 17, val: 1.100000
warp lane: 18, val: 1.100000
warp lane: 19, val: 1.100000
warp lane: 20, val: 1.100000
warp lane: 21, val: 1.100000
warp lane: 22, val: 1.100000
warp lane: 23, val: 1.100000
warp lane: 24, val: 1.100000
warp lane: 25, val: 1.100000
warp lane: 26, val: 1.100000
warp lane: 27, val: 1.100000
warp lane: 28, val: 1.100000
warp lane: 29, val: 1.100000
warp lane: 30, val: 1.100000
warp lane: 31, val: 1.100000
========= ERROR SUMMARY: 0 errors
$
如果这是为了学习练习,那太好了。如果您的兴趣是稳健地实现 4x4 批处理矩阵逆,我鼓励您使用 CUBLAS.
我有第二个问题 2 的解决方案,是我在 Robert 发布他的问题之前制定的。我将不得不再研究一下已接受的内容,但在这一点上我很高兴有多个选择。
$ cat t1486.cu
#include <stdio.h>
__device__ unsigned pat[4];
const unsigned hpat[4] = {1, 1, 0, 0};
__global__ void k(int *pattern){
float my_val = (float)threadIdx.x + 0.0f;
float my_val1 = (float)threadIdx.x + 32.0f;
float out_val = 0.0;
out_val = my_val*pat[threadIdx.x%4];
out_val += __shfl_up_sync(0xFFFFFFFF, my_val1, 2, 4)*(1-pat[threadIdx.x%4]);
printf("warp lane: %d, val: %f\n", threadIdx.x&31, out_val);
}
int main(){
int pattern[32] = {3,2,1,0,7,6,5,4};
for (int i = 8; i<32; i++) pattern[i] = i;
int *d_pattern;
cudaMemcpyToSymbol(pat, hpat, 4*sizeof(unsigned));
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1486 t1486.cu
$ ./t1486
warp lane: 0, val: 0.000000
warp lane: 1, val: 1.000000
warp lane: 2, val: 32.000000
warp lane: 3, val: 33.000000
warp lane: 4, val: 4.000000
warp lane: 5, val: 5.000000
warp lane: 6, val: 36.000000
warp lane: 7, val: 37.000000
warp lane: 8, val: 8.000000
warp lane: 9, val: 9.000000
warp lane: 10, val: 40.000000
warp lane: 11, val: 41.000000
warp lane: 12, val: 12.000000
warp lane: 13, val: 13.000000
warp lane: 14, val: 44.000000
warp lane: 15, val: 45.000000
warp lane: 16, val: 16.000000
warp lane: 17, val: 17.000000
warp lane: 18, val: 48.000000
warp lane: 19, val: 49.000000
warp lane: 20, val: 20.000000
warp lane: 21, val: 21.000000
warp lane: 22, val: 52.000000
warp lane: 23, val: 53.000000
warp lane: 24, val: 24.000000
warp lane: 25, val: 25.000000
warp lane: 26, val: 56.000000
warp lane: 27, val: 57.000000
warp lane: 28, val: 28.000000
warp lane: 29, val: 29.000000
warp lane: 30, val: 60.000000
warp lane: 31, val: 61.000000