cuBLAS 矩阵求逆比 MATLAB 慢得多
cuBLAS matrix inverse much slower than MATLAB
在我当前的项目中,我正在尝试使用 cuBLAS 计算大型 (n > 2000) 矩阵的逆。执行了逆向计算,但由于某些原因,计算时间比在 MATLAB 中完成的计算时间要慢得多。
我附上了使用我的任一语言实现对随机矩阵执行的示例计算以及性能结果。
对于可能导致这种放缓的原因的任何帮助或建议,我们将不胜感激。
提前谢谢你。
比较
cuBLAS vs. MATLAB
N = 500 : cuBLAS ~ 0.130 sec, MATLAB ~ 0.066 sec -> ~1.97x slower
N = 1000 : cuBLAS ~ 0.898 sec, MATLAB ~ 0.311 sec -> ~2.89x slower
N = 2000 : cuBLAS ~ 6.667 sec, MATLAB ~ 0.659 sec -> ~10.12x slower
N = 4000 : cuBLAS ~ 51.860 sec, MATLAB ~ 4.296 sec -> ~12.07x slower
C++代码
#include <string>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <conio.h>
#define CUDA_CALL(res, str) { if (res != cudaSuccess) { printf("CUDA Error : %s : %s %d : ERR %s\n", str, __FILE__, __LINE__, cudaGetErrorName(res)); } }
#define CUBLAS_CALL(res, str) { if (res != CUBLAS_STATUS_SUCCESS) { printf("CUBLAS Error : %s : %s %d : ERR %d\n", str, __FILE__, __LINE__, int(res)); } }
static cudaEvent_t cu_TimerStart;
static cudaEvent_t cu_TimerStop;
void d_CUDATimerStart(void)
{
CUDA_CALL(cudaEventCreate(&cu_TimerStart), "Failed to create start event!");
CUDA_CALL(cudaEventCreate(&cu_TimerStop), "Failed to create stop event!");
CUDA_CALL(cudaEventRecord(cu_TimerStart), "Failed to record start event!");
}
float d_CUDATimerStop(void)
{
CUDA_CALL(cudaEventRecord(cu_TimerStop), "Failed to record stop event!");
CUDA_CALL(cudaEventSynchronize(cu_TimerStop), "Failed to synch stop event!");
float ms;
CUDA_CALL(cudaEventElapsedTime(&ms, cu_TimerStart, cu_TimerStop), "Failed to elapse events!");
CUDA_CALL(cudaEventDestroy(cu_TimerStart), "Failed to destroy start event!");
CUDA_CALL(cudaEventDestroy(cu_TimerStop), "Failed to destroy stop event!");
return ms;
}
float* d_GetInv(float* L, int n)
{
cublasHandle_t cu_cublasHandle;
CUBLAS_CALL(cublasCreate(&cu_cublasHandle), "Failed to initialize cuBLAS!");
float** adL;
float** adC;
float* dL;
float* dC;
int* dLUPivots;
int* dLUInfo;
size_t szA = n * n * sizeof(float);
CUDA_CALL(cudaMalloc(&adL, sizeof(float*)), "Failed to allocate adL!");
CUDA_CALL(cudaMalloc(&adC, sizeof(float*)), "Failed to allocate adC!");
CUDA_CALL(cudaMalloc(&dL, szA), "Failed to allocate dL!");
CUDA_CALL(cudaMalloc(&dC, szA), "Failed to allocate dC!");
CUDA_CALL(cudaMalloc(&dLUPivots, n * sizeof(int)), "Failed to allocate dLUPivots!");
CUDA_CALL(cudaMalloc(&dLUInfo, sizeof(int)), "Failed to allocate dLUInfo!");
CUDA_CALL(cudaMemcpy(dL, L, szA, cudaMemcpyHostToDevice), "Failed to copy to dL!");
CUDA_CALL(cudaMemcpy(adL, &dL, sizeof(float*), cudaMemcpyHostToDevice), "Failed to copy to adL!");
CUDA_CALL(cudaMemcpy(adC, &dC, sizeof(float*), cudaMemcpyHostToDevice), "Failed to copy to adC!");
d_CUDATimerStart();
CUBLAS_CALL(cublasSgetrfBatched(cu_cublasHandle, n, adL, n, dLUPivots, dLUInfo, 1), "Failed to perform LU decomp operation!");
CUDA_CALL(cudaDeviceSynchronize(), "Failed to synchronize after kernel call!");
CUBLAS_CALL(cublasSgetriBatched(cu_cublasHandle, n, (const float **)adL, n, dLUPivots, adC, n, dLUInfo, 1), "Failed to perform Inverse operation!");
CUDA_CALL(cudaDeviceSynchronize(), "Failed to synchronize after kernel call!");
float timed = d_CUDATimerStop();
printf("cublas inverse in: %.5f ms.\n", timed);
float* res = (float*)malloc(szA);
CUDA_CALL(cudaMemcpy(res, dC, szA, cudaMemcpyDeviceToHost), "Failed to copy to res!");
CUDA_CALL(cudaFree(adL), "Failed to free adL!");
CUDA_CALL(cudaFree(adC), "Failed to free adC!");
CUDA_CALL(cudaFree(dL), "Failed to free dL!");
CUDA_CALL(cudaFree(dC), "Failed to free dC!");
CUDA_CALL(cudaFree(dLUPivots), "Failed to free dLUPivots!");
CUDA_CALL(cudaFree(dLUInfo), "Failed to free dLUInfo!");
CUBLAS_CALL(cublasDestroy(cu_cublasHandle), "Failed to destroy cuBLAS!");
return res;
}
int main()
{
int n = 1000;
float* L = (float*)malloc(n * n * sizeof(float));
for(int i = 0; i < n * n; i++)
L[i] = ((float)rand()/(float)(RAND_MAX));
float* inv = d_GetInv(L, n);
printf("done.");
_getch();
return 0;
}
MATLAB代码
A = rand(1000);
tic
X = inv(A);
toc
系统信息:
显卡:GTX 780 3gb
CPU:i7-4790S @ 3.20 GHz
正如@RobertCrovella 所说,您不应将批量小矩阵 API 用于单个大矩阵求逆。
基本上您可以使用与代码中相同的方法,但使用 getrf()
和 getri()
的非批处理版本可以最大限度地提高大型矩阵的性能。
对于 getrf()
你可以在这里找到它。
http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf
对于getri()
,虽然CUDA工具包没有提供getri()
来解决AX=I
,其中A
是由getrf()
进行LU分解的,它确实提供了一个getrs()
来解决AX=B
。您需要做的就是在调用 getrs()
.
之前设置 B=I
http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrs
在我当前的项目中,我正在尝试使用 cuBLAS 计算大型 (n > 2000) 矩阵的逆。执行了逆向计算,但由于某些原因,计算时间比在 MATLAB 中完成的计算时间要慢得多。
我附上了使用我的任一语言实现对随机矩阵执行的示例计算以及性能结果。
对于可能导致这种放缓的原因的任何帮助或建议,我们将不胜感激。
提前谢谢你。
比较
cuBLAS vs. MATLAB
N = 500 : cuBLAS ~ 0.130 sec, MATLAB ~ 0.066 sec -> ~1.97x slower
N = 1000 : cuBLAS ~ 0.898 sec, MATLAB ~ 0.311 sec -> ~2.89x slower
N = 2000 : cuBLAS ~ 6.667 sec, MATLAB ~ 0.659 sec -> ~10.12x slower
N = 4000 : cuBLAS ~ 51.860 sec, MATLAB ~ 4.296 sec -> ~12.07x slower
C++代码
#include <string>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <conio.h>
#define CUDA_CALL(res, str) { if (res != cudaSuccess) { printf("CUDA Error : %s : %s %d : ERR %s\n", str, __FILE__, __LINE__, cudaGetErrorName(res)); } }
#define CUBLAS_CALL(res, str) { if (res != CUBLAS_STATUS_SUCCESS) { printf("CUBLAS Error : %s : %s %d : ERR %d\n", str, __FILE__, __LINE__, int(res)); } }
static cudaEvent_t cu_TimerStart;
static cudaEvent_t cu_TimerStop;
void d_CUDATimerStart(void)
{
CUDA_CALL(cudaEventCreate(&cu_TimerStart), "Failed to create start event!");
CUDA_CALL(cudaEventCreate(&cu_TimerStop), "Failed to create stop event!");
CUDA_CALL(cudaEventRecord(cu_TimerStart), "Failed to record start event!");
}
float d_CUDATimerStop(void)
{
CUDA_CALL(cudaEventRecord(cu_TimerStop), "Failed to record stop event!");
CUDA_CALL(cudaEventSynchronize(cu_TimerStop), "Failed to synch stop event!");
float ms;
CUDA_CALL(cudaEventElapsedTime(&ms, cu_TimerStart, cu_TimerStop), "Failed to elapse events!");
CUDA_CALL(cudaEventDestroy(cu_TimerStart), "Failed to destroy start event!");
CUDA_CALL(cudaEventDestroy(cu_TimerStop), "Failed to destroy stop event!");
return ms;
}
float* d_GetInv(float* L, int n)
{
cublasHandle_t cu_cublasHandle;
CUBLAS_CALL(cublasCreate(&cu_cublasHandle), "Failed to initialize cuBLAS!");
float** adL;
float** adC;
float* dL;
float* dC;
int* dLUPivots;
int* dLUInfo;
size_t szA = n * n * sizeof(float);
CUDA_CALL(cudaMalloc(&adL, sizeof(float*)), "Failed to allocate adL!");
CUDA_CALL(cudaMalloc(&adC, sizeof(float*)), "Failed to allocate adC!");
CUDA_CALL(cudaMalloc(&dL, szA), "Failed to allocate dL!");
CUDA_CALL(cudaMalloc(&dC, szA), "Failed to allocate dC!");
CUDA_CALL(cudaMalloc(&dLUPivots, n * sizeof(int)), "Failed to allocate dLUPivots!");
CUDA_CALL(cudaMalloc(&dLUInfo, sizeof(int)), "Failed to allocate dLUInfo!");
CUDA_CALL(cudaMemcpy(dL, L, szA, cudaMemcpyHostToDevice), "Failed to copy to dL!");
CUDA_CALL(cudaMemcpy(adL, &dL, sizeof(float*), cudaMemcpyHostToDevice), "Failed to copy to adL!");
CUDA_CALL(cudaMemcpy(adC, &dC, sizeof(float*), cudaMemcpyHostToDevice), "Failed to copy to adC!");
d_CUDATimerStart();
CUBLAS_CALL(cublasSgetrfBatched(cu_cublasHandle, n, adL, n, dLUPivots, dLUInfo, 1), "Failed to perform LU decomp operation!");
CUDA_CALL(cudaDeviceSynchronize(), "Failed to synchronize after kernel call!");
CUBLAS_CALL(cublasSgetriBatched(cu_cublasHandle, n, (const float **)adL, n, dLUPivots, adC, n, dLUInfo, 1), "Failed to perform Inverse operation!");
CUDA_CALL(cudaDeviceSynchronize(), "Failed to synchronize after kernel call!");
float timed = d_CUDATimerStop();
printf("cublas inverse in: %.5f ms.\n", timed);
float* res = (float*)malloc(szA);
CUDA_CALL(cudaMemcpy(res, dC, szA, cudaMemcpyDeviceToHost), "Failed to copy to res!");
CUDA_CALL(cudaFree(adL), "Failed to free adL!");
CUDA_CALL(cudaFree(adC), "Failed to free adC!");
CUDA_CALL(cudaFree(dL), "Failed to free dL!");
CUDA_CALL(cudaFree(dC), "Failed to free dC!");
CUDA_CALL(cudaFree(dLUPivots), "Failed to free dLUPivots!");
CUDA_CALL(cudaFree(dLUInfo), "Failed to free dLUInfo!");
CUBLAS_CALL(cublasDestroy(cu_cublasHandle), "Failed to destroy cuBLAS!");
return res;
}
int main()
{
int n = 1000;
float* L = (float*)malloc(n * n * sizeof(float));
for(int i = 0; i < n * n; i++)
L[i] = ((float)rand()/(float)(RAND_MAX));
float* inv = d_GetInv(L, n);
printf("done.");
_getch();
return 0;
}
MATLAB代码
A = rand(1000);
tic
X = inv(A);
toc
系统信息:
显卡:GTX 780 3gb
CPU:i7-4790S @ 3.20 GHz
正如@RobertCrovella 所说,您不应将批量小矩阵 API 用于单个大矩阵求逆。
基本上您可以使用与代码中相同的方法,但使用 getrf()
和 getri()
的非批处理版本可以最大限度地提高大型矩阵的性能。
对于 getrf()
你可以在这里找到它。
http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf
对于getri()
,虽然CUDA工具包没有提供getri()
来解决AX=I
,其中A
是由getrf()
进行LU分解的,它确实提供了一个getrs()
来解决AX=B
。您需要做的就是在调用 getrs()
.
B=I
http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrs