LU 分解在 LAPACK 和 cuBLAS/cuSOLVER 之间收到不同的结果

LU factorization receives different results between LAPACK and cuBLAS/cuSOLVER

我正在测试一些场景,其中函数 dgetrf 在与 cuBLAS/cuSOLVER 一起使用时与为 LAPACK 编写时返回的方式不同。例如,我正在查看以下矩阵的 LU 分解:

2.0 4.0 1.0 -3.0 0.0

-1.0 -2.0 2.0 4.0 0.0

4.0 2.0 -3.0 5.0 0.0

5.0 -4.0 -3.0 1.0 0.0

0.0 0.0 0.0 0.0 0.0

我首先尝试从 cuBLAS/cuSOLVER 调用 dgetrf,如下所示(警告,前方测试代码丑陋!)

    #include <cblas.h>
    #include <time.h>
    #include <stdio.h>
    #include <string.h>
    #include <cuda_runtime.h>
    #include <cublas_v2.h>
    #include <cusolverDn.h>

    int main(int argc, char** argv){

        const int matrixSize = 5;

        int i, j;
        double arrA[matrixSize][matrixSize] = {
            {2.0, 4.0, 1.0, -3.0, 0.0},
            {-1.0, -2.0, 2.0, 4.0, 0.0},
            {4.0, 2.0, -3.0, 5.0, 0.0},
            {5.0, -4.0, -3.0, 1.0, 0.0},
            {0.0, 0.0, 0.0, 0.0, 0.0}
        };

        double *arrADev, *workArray;
        double **matrixArray;
        int *pivotArray;
        int *infoArray;
        double flat[matrixSize*matrixSize] = {0};
        cublasHandle_t cublasHandle;
        cublasStatus_t cublasStatus;
        cudaError_t error;

        cudaError cudaStatus;
        cusolverStatus_t cusolverStatus;
        cusolverDnHandle_t cusolverHandle;

        double *matrices[2];


        error = cudaMalloc(&arrADev,  sizeof(double) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&matrixArray,  sizeof(double*) * 2);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&pivotArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&infoArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        cublasStatus = cublasCreate(&cublasHandle);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //maps matrix to flat vector
        for(i=0; i<matrixSize; i++){
            for(j=0; j<matrixSize; j++){
                flat[i+j*matrixSize] = arrA[i][j];
            }
        }

        //copy matrix A to device
        cublasStatus = cublasSetMatrix(matrixSize, matrixSize, sizeof(double), flat, matrixSize, arrADev, matrixSize);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //save matrix address
        matrices[0] = arrADev;

        //copy matrices references to device
        error = cudaMemcpy(matrixArray, matrices, sizeof(double*)*1, cudaMemcpyHostToDevice);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        int Lwork;
        // calculate buffer size for cuSOLVER LU factorization
        cusolverStatus = cusolverDnDgetrf_bufferSize(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, &Lwork);
        cudaStatus = cudaMalloc((void**)&workArray, Lwork*sizeof(double));

        // cuBLAS LU factorization
        cublasStatus = cublasDgetrfBatched(cublasHandle, matrixSize, matrixArray, matrixSize, pivotArray, infoArray, 1);
        if (cublasStatus == CUBLAS_STATUS_SUCCESS)
            printf("cuBLAS DGETRF SUCCESSFUL! \n");
        else
            printf("cuBLAS DGETRF UNSUCCESSFUL! \n");

        // cuSOLVER LU factorization
        cusolverStatus = cusolverDnCreate(&cusolverHandle);
        cusolverStatus = cusolverDnDgetrf(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, workArray, pivotArray, infoArray);
        if (cusolverStatus == CUSOLVER_STATUS_SUCCESS)
            printf("cuSOLVER DGETRF SUCCESSFUL! \n");
        else
            printf("cuSOLVER DGETRF UNSUCCESSFUL! \n");

        return 0;
    }

以上代码的输出是

    cuBLAS DGETRF SUCCESSFUL!
    cuSOLVER DGETRF SUCCESSFUL!

当我尝试对 LAPACK 做同样的事情时(警告:更丑陋的代码!):

    #include <iostream>
    #include <vector>

    using namespace std;

    extern "C" void dgetrf_(int* dim1, int* dim2, double* a, int* lda, int* ipiv, int* info);
    extern "C" void dgetrs_(char *TRANS, int *N, int *NRHS, double *A, int *LDA, int *IPIV, double *B, int *LDB, int *INFO );

    int main()
    {
       char trans = 'N';
       int dim = 5;
       int LDA = dim;
       int info;

       vector<double> a,b;

       a.push_back(2.0); a.push_back(4.0); a.push_back(1.0); a.push_back(-3.0); a.push_back(0.0);
       a.push_back(-1.0); a.push_back(-2.0); a.push_back(2.0); a.push_back(4.0); a.push_back(0.0);
       a.push_back(4.0); a.push_back(2.0); a.push_back(-3.0); a.push_back(5.0); a.push_back(0.0);
       a.push_back(5.0); a.push_back(-4.0); a.push_back(-3.0); a.push_back(1.0); a.push_back(0.0);
       a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0);

       int ipiv[5];
       dgetrf_(&dim, &dim, &*a.begin(), &LDA, ipiv, &info);
       if (info == 0)
           printf("dgetrf successful\n");
       else
           printf("dgetrf unsuccessful\n");

       return 0;
    }

我得到的输出是

    dgetrf unsuccessful

我知道它们是不同的库,但这种行为是预期的吗?

当我编译您的 CUDA 代码时,我收到一条警告,提示在设置其值之前正在使用 cusolver 句柄。您不应该忽略此类警告,因为您在调整大小函数中的用法不正确。然而,这不是这里的问题。

我认为您的两个测试用例之间没有任何区别。您似乎错误地解释了结果。

查看 netlib documentation,我们看到 info 值为 5 意味着 U(5,5) 为零,这对将来的使用会有问题。这并不意味着 dgetrf 因式分解在您打印时成功或不成功,而是意味着您的输入数据。事实上分解已经完成,正如文档中明确指出的那样。

同样,仅通过查看 cusolver 函数的函数 return 值,我们无法获得有关该条件的任何信息。为了发现类似于 lapack 报告的信息,其 necessary to look at the infoArray values.

通过这些更改,您的代码报告相同的内容(信息值为 5):

$ cat t1556.cu
    #include <time.h>
    #include <stdio.h>
    #include <string.h>
    #include <cuda_runtime.h>
    #include <cublas_v2.h>
    #include <cusolverDn.h>

    int main(int argc, char** argv){

        const int matrixSize = 5;

        int i, j;
        double arrA[matrixSize][matrixSize] = {
            {2.0, 4.0, 1.0, -3.0, 0.0},
            {-1.0, -2.0, 2.0, 4.0, 0.0},
            {4.0, 2.0, -3.0, 5.0, 0.0},
            {5.0, -4.0, -3.0, 1.0, 0.0},
            {0.0, 0.0, 0.0, 0.0, 0.0}
        };

        double *arrADev, *workArray;
        double **matrixArray;
        int *pivotArray;
        int *infoArray;
        double flat[matrixSize*matrixSize] = {0};
        cublasHandle_t cublasHandle;
        cublasStatus_t cublasStatus;
        cudaError_t error;

        cudaError cudaStatus;
        cusolverStatus_t cusolverStatus;
        cusolverDnHandle_t cusolverHandle;

        double *matrices[2];


        error = cudaMalloc(&arrADev,  sizeof(double) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&matrixArray,  sizeof(double*) * 2);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&pivotArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&infoArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        cublasStatus = cublasCreate(&cublasHandle);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //maps matrix to flat vector
        for(i=0; i<matrixSize; i++){
            for(j=0; j<matrixSize; j++){
                flat[i+j*matrixSize] = arrA[i][j];
            }
        }

        //copy matrix A to device
        cublasStatus = cublasSetMatrix(matrixSize, matrixSize, sizeof(double), flat, matrixSize, arrADev, matrixSize);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //save matrix address
        matrices[0] = arrADev;

        //copy matrices references to device
        error = cudaMemcpy(matrixArray, matrices, sizeof(double*)*1, cudaMemcpyHostToDevice);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        int Lwork;
        // calculate buffer size for cuSOLVER LU factorization
        cusolverStatus = cusolverDnCreate(&cusolverHandle);
        cusolverStatus = cusolverDnDgetrf_bufferSize(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, &Lwork);
        cudaStatus = cudaMalloc((void**)&workArray, Lwork*sizeof(double));

        // cuBLAS LU factorization
        cublasStatus = cublasDgetrfBatched(cublasHandle, matrixSize, matrixArray, matrixSize, pivotArray, infoArray, 1);
        if (cublasStatus == CUBLAS_STATUS_SUCCESS)
            printf("cuBLAS DGETRF SUCCESSFUL! \n");
        else
            printf("cuBLAS DGETRF UNSUCCESSFUL! \n");

        // cuSOLVER LU factorization
        cusolverStatus = cusolverDnDgetrf(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, workArray, pivotArray, infoArray);
        if (cusolverStatus == CUSOLVER_STATUS_SUCCESS)
            printf("cuSOLVER DGETRF SUCCESSFUL! \n");
        else
            printf("cuSOLVER DGETRF UNSUCCESSFUL! \n");
        int *hinfoArray = (int *)malloc(matrixSize*matrixSize*sizeof(int));
        cudaMemcpy(hinfoArray, infoArray, matrixSize*matrixSize*sizeof(int), cudaMemcpyDeviceToHost);
        for (int i = 0; i < matrixSize*matrixSize; i++) printf("%d,", hinfoArray[i]);
        printf("\n");
        return 0;
    }
$ nvcc -o t1556 t1556.cu -lcublas -lcusolver
t1556.cu(30): warning: variable "cudaStatus" was set but never used

$ ./t1556
cuBLAS DGETRF SUCCESSFUL!
cuSOLVER DGETRF SUCCESSFUL!
5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
$ cat t1557.cpp
    #include <iostream>
    #include <vector>
    #include <lapacke/lapacke.h>
    using namespace std;

//    extern "C" void dgetrf_(int* dim1, int* dim2, double* a, int* lda, int* ipiv, int* info);
//    extern "C" void dgetrs_(char *TRANS, int *N, int *NRHS, double *A, int *LDA, int *IPIV, double *B, int *LDB, int *INFO );

    int main()
    {
       char trans = 'N';
       int dim = 5;
       int LDA = dim;
       int info;

       vector<double> a,b;

       a.push_back(2.0); a.push_back(4.0); a.push_back(1.0); a.push_back(-3.0); a.push_back(0.0);
       a.push_back(-1.0); a.push_back(-2.0); a.push_back(2.0); a.push_back(4.0); a.push_back(0.0);
       a.push_back(4.0); a.push_back(2.0); a.push_back(-3.0); a.push_back(5.0); a.push_back(0.0);
       a.push_back(5.0); a.push_back(-4.0); a.push_back(-3.0); a.push_back(1.0); a.push_back(0.0);
       a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0);

       int ipiv[5];
       LAPACK_dgetrf(&dim, &dim, &*a.begin(), &LDA, ipiv, &info);
       printf("info = %d\n", info);
       if (info == 0)
           printf("dgetrf successful\n");
       else
           printf("dgetrf unsuccessful\n");

       return 0;
    }
$ g++ t1557.cpp -o t1557 -llapack
$ ./t1557
info = 5
dgetrf unsuccessful
$

我用的是centOS安装的lapack

centOS 7、CUDA 10.1.243、特斯拉 V100。