更改 CUDA Graph 节点中的设备不可用吗?
Is changing the device in a CUDA Graph node unavailable?
我试图通过创建这个主机节点来更改 CUDA 图中的当前设备:
cudaGraph_t graph;
// Node #1: Create the 1st setDevice
cudaHostNodeParams hostNodeParams = {0};
memset(&hostNodeParams, 0, sizeof(hostNodeParams));
hostNodeParams.fn = [](void *data) {
int passed_device_ordinal = *(int *)(data);
cout << "CUDA-Graph: in the host node: changing the device to: "
<< passed_device_ordinal << endl;
CUDA_CHECK(cudaSetDevice(passed_device_ordinal));
};
hostNodeParams.userData = (void *)&device_1;
// Node #1: Add the 1st setDevice
CUDA_CHECK(cudaGraphAddHostNode(&setDevice_1, graph, ©_0to1, 1,
&hostNodeParams));
当运行代码时,我得到这个输出:
CUDA-Graph: in the host node: changing the device to: 1
Error operation not permitted at line 68 in file src/MultiGPU.cu
是否可以在 CUDA 图形中更改设备?
在图形执行期间,无法通过主机回调更改当前设备,因为不允许回调进行 cuda api 调用。
有两种方法可以指定图中的内核将在其上执行的设备。
使用流捕获创建多 GPU 图形。
手动构建图表时,节点将分配给当前活动的设备。在添加内核之前使用 cudaSetDevice。
以下代码通过在每个 gpu 上执行(内核、主机的 memcpy、主机回调)的简单管道演示了两者。
#include <thread>
#include <future>
#include <chrono>
#include <array>
#include <vector>
#include <cassert>
__global__
void kernel(int* data){
*data = 42;
}
struct CallbackData{
int* pinnedBuffer;
std::vector<int>* vec;
};
void callback(void* args){
CallbackData* data = static_cast<CallbackData*>(args);
data->vec->push_back(*data->pinnedBuffer);
}
int main(){
constexpr int numDevices = 2;
std::array<int, numDevices> deviceIds{0,1};
constexpr int numIterations = 100;
std::array<cudaStream_t, numDevices> streams{};
std::array<cudaEvent_t, numDevices> events{};
std::array<int*, numDevices> deviceBuffers{};
std::array<int*, numDevices> pinnedBuffers{};
std::array<std::vector<int>, numDevices> vectors{};
std::array<CallbackData, numDevices> callbackArgs{};
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
cudaStreamCreate(&streams[i]);
cudaEventCreate(&events[i], cudaEventDisableTiming);
cudaMalloc(&deviceBuffers[i], sizeof(int));
cudaMallocHost(&pinnedBuffers[i], sizeof(int));
vectors[i].reserve(numIterations);
callbackArgs[i].pinnedBuffer = pinnedBuffers[i];
callbackArgs[i].vec = &vectors[i];
}
cudaSetDevice(deviceIds[0]);
cudaStream_t mainstream;
cudaStreamCreate(&mainstream);
cudaEvent_t mainevent;
cudaEventCreate(&mainevent, cudaEventDisableTiming);
auto launch = [&](){
cudaEventRecord(mainevent, mainstream);
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
auto& stream = streams[i];
cudaStreamWaitEvent(stream, mainevent);
for(int k = 0; k < numIterations; k++){
kernel<<<1,1,0,stream>>>(deviceBuffers[i]);
cudaMemcpyAsync(pinnedBuffers[i], deviceBuffers[i], sizeof(int), cudaMemcpyDeviceToHost, stream);
cudaLaunchHostFunc(stream, callback, (void*)&callbackArgs[i]);
}
cudaEventRecord(events[i], stream);
cudaStreamWaitEvent(mainstream, events[i]);
}
cudaSetDevice(deviceIds[0]);
};
// no graph
launch();
cudaStreamSynchronize(mainstream);
for(int i = 0; i < numDevices; i++){
assert(vectors[i].size() == numIterations);
for(auto x : vectors[i]){
assert(x == 42);
}
vectors[i].clear();
}
//stream capture graph
{
cudaStreamBeginCapture(mainstream, cudaStreamCaptureModeRelaxed);
launch();
cudaGraph_t graph;
cudaStreamEndCapture(mainstream, &graph);
cudaGraphExec_t execGraph;
cudaGraphNode_t errorNode;
cudaError_t status = cudaGraphInstantiate(&execGraph, graph, &errorNode, nullptr, 0);
assert(status == cudaSuccess) ;
cudaGraphDestroy(graph);
cudaGraphLaunch(execGraph, mainstream);
cudaStreamSynchronize(mainstream);
for(int i = 0; i < numDevices; i++){
assert(vectors[i].size() == numIterations);
for(auto x : vectors[i]){
assert(x == 42);
}
vectors[i].clear();
}
cudaGraphExecDestroy(execGraph);
}
//construct graph manually
{
cudaGraph_t graph;
cudaGraphCreate(&graph, 0);
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
cudaGraphNode_t* prev = nullptr;
cudaGraphNode_t kernelNode;
cudaGraphNode_t memcpyNode;
cudaGraphNode_t hostNode;
cudaKernelNodeParams kernelNodeParams{};
kernelNodeParams.func = (void *)kernel;
kernelNodeParams.gridDim = dim3(1, 1, 1);
kernelNodeParams.blockDim = dim3(1, 1, 1);
kernelNodeParams.sharedMemBytes = 0;
void *kernelArgs[1] = {(void *)&deviceBuffers[i]};
kernelNodeParams.kernelParams = kernelArgs;
kernelNodeParams.extra = NULL;
cudaHostNodeParams hostNodeParams{};
hostNodeParams.fn = callback;
hostNodeParams.userData = &callbackArgs[i];
for(int k = 0; k < numIterations; k++){
cudaGraphAddKernelNode(&kernelNode, graph, prev, (prev == nullptr ? 0 : 1), &kernelNodeParams);
cudaGraphAddMemcpyNode1D(&memcpyNode, graph, &kernelNode, 1, pinnedBuffers[i], deviceBuffers[i], sizeof(int), cudaMemcpyDeviceToHost);
cudaGraphAddHostNode(&hostNode, graph, &memcpyNode, 1, &hostNodeParams);
prev = &hostNode;
}
cudaSetDevice(deviceIds[0]);
}
cudaGraphExec_t execGraph;
cudaGraphNode_t errorNode;
cudaError_t status = cudaGraphInstantiate(&execGraph, graph, &errorNode, nullptr, 0);
assert(status == cudaSuccess) ;
cudaGraphDestroy(graph);
cudaGraphLaunch(execGraph, mainstream);
cudaStreamSynchronize(mainstream);
for(int i = 0; i < numDevices; i++){
assert(vectors[i].size() == numIterations);
for(auto x : vectors[i]){
assert(x == 42);
}
vectors[i].clear();
}
cudaGraphExecDestroy(execGraph);
}
cudaEventDestroy(mainevent);
cudaStreamDestroy(mainstream);
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
cudaStreamDestroy(streams[i]);
cudaEventDestroy(events[i]);
cudaFree(deviceBuffers[i]);
cudaFreeHost(pinnedBuffers[i]);
}
}
我试图通过创建这个主机节点来更改 CUDA 图中的当前设备:
cudaGraph_t graph;
// Node #1: Create the 1st setDevice
cudaHostNodeParams hostNodeParams = {0};
memset(&hostNodeParams, 0, sizeof(hostNodeParams));
hostNodeParams.fn = [](void *data) {
int passed_device_ordinal = *(int *)(data);
cout << "CUDA-Graph: in the host node: changing the device to: "
<< passed_device_ordinal << endl;
CUDA_CHECK(cudaSetDevice(passed_device_ordinal));
};
hostNodeParams.userData = (void *)&device_1;
// Node #1: Add the 1st setDevice
CUDA_CHECK(cudaGraphAddHostNode(&setDevice_1, graph, ©_0to1, 1,
&hostNodeParams));
当运行代码时,我得到这个输出:
CUDA-Graph: in the host node: changing the device to: 1
Error operation not permitted at line 68 in file src/MultiGPU.cu
是否可以在 CUDA 图形中更改设备?
在图形执行期间,无法通过主机回调更改当前设备,因为不允许回调进行 cuda api 调用。
有两种方法可以指定图中的内核将在其上执行的设备。
使用流捕获创建多 GPU 图形。
手动构建图表时,节点将分配给当前活动的设备。在添加内核之前使用 cudaSetDevice。
以下代码通过在每个 gpu 上执行(内核、主机的 memcpy、主机回调)的简单管道演示了两者。
#include <thread>
#include <future>
#include <chrono>
#include <array>
#include <vector>
#include <cassert>
__global__
void kernel(int* data){
*data = 42;
}
struct CallbackData{
int* pinnedBuffer;
std::vector<int>* vec;
};
void callback(void* args){
CallbackData* data = static_cast<CallbackData*>(args);
data->vec->push_back(*data->pinnedBuffer);
}
int main(){
constexpr int numDevices = 2;
std::array<int, numDevices> deviceIds{0,1};
constexpr int numIterations = 100;
std::array<cudaStream_t, numDevices> streams{};
std::array<cudaEvent_t, numDevices> events{};
std::array<int*, numDevices> deviceBuffers{};
std::array<int*, numDevices> pinnedBuffers{};
std::array<std::vector<int>, numDevices> vectors{};
std::array<CallbackData, numDevices> callbackArgs{};
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
cudaStreamCreate(&streams[i]);
cudaEventCreate(&events[i], cudaEventDisableTiming);
cudaMalloc(&deviceBuffers[i], sizeof(int));
cudaMallocHost(&pinnedBuffers[i], sizeof(int));
vectors[i].reserve(numIterations);
callbackArgs[i].pinnedBuffer = pinnedBuffers[i];
callbackArgs[i].vec = &vectors[i];
}
cudaSetDevice(deviceIds[0]);
cudaStream_t mainstream;
cudaStreamCreate(&mainstream);
cudaEvent_t mainevent;
cudaEventCreate(&mainevent, cudaEventDisableTiming);
auto launch = [&](){
cudaEventRecord(mainevent, mainstream);
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
auto& stream = streams[i];
cudaStreamWaitEvent(stream, mainevent);
for(int k = 0; k < numIterations; k++){
kernel<<<1,1,0,stream>>>(deviceBuffers[i]);
cudaMemcpyAsync(pinnedBuffers[i], deviceBuffers[i], sizeof(int), cudaMemcpyDeviceToHost, stream);
cudaLaunchHostFunc(stream, callback, (void*)&callbackArgs[i]);
}
cudaEventRecord(events[i], stream);
cudaStreamWaitEvent(mainstream, events[i]);
}
cudaSetDevice(deviceIds[0]);
};
// no graph
launch();
cudaStreamSynchronize(mainstream);
for(int i = 0; i < numDevices; i++){
assert(vectors[i].size() == numIterations);
for(auto x : vectors[i]){
assert(x == 42);
}
vectors[i].clear();
}
//stream capture graph
{
cudaStreamBeginCapture(mainstream, cudaStreamCaptureModeRelaxed);
launch();
cudaGraph_t graph;
cudaStreamEndCapture(mainstream, &graph);
cudaGraphExec_t execGraph;
cudaGraphNode_t errorNode;
cudaError_t status = cudaGraphInstantiate(&execGraph, graph, &errorNode, nullptr, 0);
assert(status == cudaSuccess) ;
cudaGraphDestroy(graph);
cudaGraphLaunch(execGraph, mainstream);
cudaStreamSynchronize(mainstream);
for(int i = 0; i < numDevices; i++){
assert(vectors[i].size() == numIterations);
for(auto x : vectors[i]){
assert(x == 42);
}
vectors[i].clear();
}
cudaGraphExecDestroy(execGraph);
}
//construct graph manually
{
cudaGraph_t graph;
cudaGraphCreate(&graph, 0);
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
cudaGraphNode_t* prev = nullptr;
cudaGraphNode_t kernelNode;
cudaGraphNode_t memcpyNode;
cudaGraphNode_t hostNode;
cudaKernelNodeParams kernelNodeParams{};
kernelNodeParams.func = (void *)kernel;
kernelNodeParams.gridDim = dim3(1, 1, 1);
kernelNodeParams.blockDim = dim3(1, 1, 1);
kernelNodeParams.sharedMemBytes = 0;
void *kernelArgs[1] = {(void *)&deviceBuffers[i]};
kernelNodeParams.kernelParams = kernelArgs;
kernelNodeParams.extra = NULL;
cudaHostNodeParams hostNodeParams{};
hostNodeParams.fn = callback;
hostNodeParams.userData = &callbackArgs[i];
for(int k = 0; k < numIterations; k++){
cudaGraphAddKernelNode(&kernelNode, graph, prev, (prev == nullptr ? 0 : 1), &kernelNodeParams);
cudaGraphAddMemcpyNode1D(&memcpyNode, graph, &kernelNode, 1, pinnedBuffers[i], deviceBuffers[i], sizeof(int), cudaMemcpyDeviceToHost);
cudaGraphAddHostNode(&hostNode, graph, &memcpyNode, 1, &hostNodeParams);
prev = &hostNode;
}
cudaSetDevice(deviceIds[0]);
}
cudaGraphExec_t execGraph;
cudaGraphNode_t errorNode;
cudaError_t status = cudaGraphInstantiate(&execGraph, graph, &errorNode, nullptr, 0);
assert(status == cudaSuccess) ;
cudaGraphDestroy(graph);
cudaGraphLaunch(execGraph, mainstream);
cudaStreamSynchronize(mainstream);
for(int i = 0; i < numDevices; i++){
assert(vectors[i].size() == numIterations);
for(auto x : vectors[i]){
assert(x == 42);
}
vectors[i].clear();
}
cudaGraphExecDestroy(execGraph);
}
cudaEventDestroy(mainevent);
cudaStreamDestroy(mainstream);
for(int i = 0; i < numDevices; i++){
cudaSetDevice(deviceIds[i]);
cudaStreamDestroy(streams[i]);
cudaEventDestroy(events[i]);
cudaFree(deviceBuffers[i]);
cudaFreeHost(pinnedBuffers[i]);
}
}