为什么 cuMemAddressReserve() 因 CUDA_INVALID_VALUE 而失败?

Why is cuMemAddressReserve() failing with CUDA_INVALID_VALUE?

考虑以下程序(用 C 语法编写):

#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>

int main() {
    CUresult result;
    unsigned int init_flags = 0;
    result = cuInit(init_flags);
    if (result != CUDA_SUCCESS) { exit(EXIT_FAILURE); }
    CUcontext ctx;
    unsigned int ctx_create_flags = 0;
    CUdevice device_id = 0;
    result = cuCtxCreate(&ctx, ctx_create_flags, device_id);
    // Note: The created context is also made the current context,
    // so we are _in_ a context from now on.
    if (result != CUDA_SUCCESS) { exit(EXIT_FAILURE); }
    CUdeviceptr requested = 0;
    CUdeviceptr reserved;
    size_t size = 0x20000;
    size_t alignment = 0; // default
    unsigned long long reserve_flags = 0;

    // -----------------------------------
    // ==>> FAILURE on next statement <<==
    // -----------------------------------

    result = cuMemAddressReserve(&reserved, size, alignment, requested, reserve_flags);
    if (result != CUDA_SUCCESS) {
        const char* error_string;
        cuGetErrorString(result, &error_string);
        fprintf(stderr, "cuMemAddressReserve() failed: %s\n", error_string);
        exit(EXIT_FAILURE);
    }
    return 0;
}

尝试预订时失败:

cuMemAddressReserve() failed: invalid argument

我的论点有什么问题?是尺寸吗?对齐方式?请求地址 0?如果是后者——当我真的不在乎时,我怎么知道要请求什么地址?

如果我没记错的话,虚拟内存管理函数的大小必须是 CUDA 分配粒度的倍数。请参阅 cuMemGetAllocationGranularity 和此博客 post https://developer.nvidia.com/blog/introducing-low-level-gpu-virtual-memory-management/

以下内容适用于我的机器。

#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>

int main() {
    CUresult result;
    unsigned int init_flags = 0;
    result = cuInit(init_flags);
    if (result != CUDA_SUCCESS) { exit(EXIT_FAILURE); }
    CUcontext ctx;
    unsigned int ctx_create_flags = 0;
    CUdevice device_id = 0;
    result = cuCtxCreate(&ctx, ctx_create_flags, device_id);
    // Note: The created context is also made the current context,
    // so we are _in_ a context from now on.
    if (result != CUDA_SUCCESS) { exit(EXIT_FAILURE); }
    CUdeviceptr requested = 0;
    CUdeviceptr reserved;
    size_t size = 0x20000;
    size_t alignment = 0; // default
    unsigned long long reserve_flags = 0;

    size_t granularity;
    CUmemAllocationProp prop;
    prop.type = CU_MEM_ALLOCATION_TYPE_PINNED;
    prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
    prop.location.id = (int)0;
    prop.win32HandleMetaData = NULL;
    result = cuMemGetAllocationGranularity (&granularity, &prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM );
    if (result != CUDA_SUCCESS) { exit(EXIT_FAILURE); }
    printf("minimum granularity %lu\n", granularity);


    size_t padded_size = ((granularity + size - 1) / granularity) * granularity;
    result = cuMemAddressReserve(&reserved, padded_size, alignment, requested, reserve_flags);
    if (result != CUDA_SUCCESS) {
        const char* error_string;
        cuGetErrorString(result, &error_string);
        fprintf(stderr, "cuMemAddressReserve() failed: %s\n", error_string);
        exit(EXIT_FAILURE);
    }
    return 0;
}

tl;dr:您的保留区域大小不是(某些设备的)分配粒度的倍数。

正如@AbatorAbetor 所建议的,cuMemAddressReserve() 隐式要求内存区域的大小是某个粒度值的倍数。尽管 0x20000 看起来是一个足够大的值(2^21 字节......系统内存页面通常为 4 KiB = 2^12 字节)- NVIDIA GPU 在这里要求很高。

例如,具有约 4GB 内存的 Pascal GTX 1050 Ti GPU 的粒度为 0x200000,或 2 MiB - 是您尝试分配的粒度的 16 倍。

现在,如果我们有两个粒度值不同的设备会怎样?我们需要使用最小公倍数吗?谁知道呢

无论如何,底线:在分配和保留之前始终检查粒度。

我已将此作为文档错误提交给 NVIDIA,bug 3486420(但您可能无法遵循 link,因为 NVIDIA 向用户隐藏了他们的错误)。