NCCL 计划的 NVProf
NVProf for NCCL program
当我想使用 NVProf 来解决所有 --metrics 的 NCCL 问题时,分析结果总是 return 我喜欢
==2781== NVPROF is profiling process 2781, command: ./nccl_example 2 16
==2781== Profiling application: ./nccl_example 2 16
==2781== Profiling result:
没有 events/metrics 被分析。
我的简单nccl程序
#include <stdio.h>
#include "cuda_runtime.h"
#include "nccl.h"
int main(int argc, char* argv[])
{
ncclComm_t comms[4];
// managing 4 devices
int nDev = 3;
int size = 32*1024*1024;
int devs[4] = {0, 1, 2};
//allocating and initializing device buffers
float** sendbuff = (float**)malloc(nDev * sizeof(float*));
float** recvbuff = (float**)malloc(nDev * sizeof(float*));
cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev);
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(i));
CUDACHECK(cudaMalloc(sendbuff + i, size * sizeof(float)));
CUDACHECK(cudaMalloc(recvbuff + i, size * sizeof(float)));
CUDACHECK(cudaMemset(sendbuff[i], 1, size * sizeof(float)));
CUDACHECK(cudaMemset(recvbuff[i], 0, size * sizeof(float)));
CUDACHECK(cudaStreamCreate(s+i));
}
//initializing NCCL
NCCLCHECK(ncclCommInitAll(comms, nDev, devs));
//calling NCCL communication API. Group API is required when using
//multiple devices per thread
NCCLCHECK(ncclGroupStart());
for (int i = 0; i < nDev; ++i)
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)recvbuff[i],
size, ncclFloat, ncclSum, comms[i], s[i]));
NCCLCHECK(ncclGroupEnd());
//synchronizing on CUDA streams to wait for completion of NCCL operation
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(i));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
//free device buffers
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(i));
CUDACHECK(cudaFree(sendbuff[i]));
CUDACHECK(cudaFree(recvbuff[i]));
}
//finalizing NCCL
for(int i = 0; i < nDev; ++i)
ncclCommDestroy(comms[i]);
printf("Success \n");
return 0;
}
因为我需要知道 NCCL API 的详细指标,这样我才能更深入地了解它的性能。
该行为是预期的。
默认收集的 events, metrics, 属于 CUDA 设备代码 activity。要查看可能具有指导意义的内容,请尝试使用 --print-gpu-trace
开关进行分析(并删除 --metrics all
)。
documented“指标”不适用于 NCCL 正在进行的操作(数据复制)。它们适用于 CUDA 内核(即 CUDA 设备代码 activity)。
nvprof
似乎有可以为 NVLink activity 收集的指标。要查看这些,在适用的系统(例如具有 NVLink)上,运行 一个命令,例如:
nvprof --query-metrics
或
nvprof --query-metrics |grep -i nvlink
当我想使用 NVProf 来解决所有 --metrics 的 NCCL 问题时,分析结果总是 return 我喜欢
==2781== NVPROF is profiling process 2781, command: ./nccl_example 2 16
==2781== Profiling application: ./nccl_example 2 16
==2781== Profiling result:
没有 events/metrics 被分析。 我的简单nccl程序
#include <stdio.h>
#include "cuda_runtime.h"
#include "nccl.h"
int main(int argc, char* argv[])
{
ncclComm_t comms[4];
// managing 4 devices
int nDev = 3;
int size = 32*1024*1024;
int devs[4] = {0, 1, 2};
//allocating and initializing device buffers
float** sendbuff = (float**)malloc(nDev * sizeof(float*));
float** recvbuff = (float**)malloc(nDev * sizeof(float*));
cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev);
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(i));
CUDACHECK(cudaMalloc(sendbuff + i, size * sizeof(float)));
CUDACHECK(cudaMalloc(recvbuff + i, size * sizeof(float)));
CUDACHECK(cudaMemset(sendbuff[i], 1, size * sizeof(float)));
CUDACHECK(cudaMemset(recvbuff[i], 0, size * sizeof(float)));
CUDACHECK(cudaStreamCreate(s+i));
}
//initializing NCCL
NCCLCHECK(ncclCommInitAll(comms, nDev, devs));
//calling NCCL communication API. Group API is required when using
//multiple devices per thread
NCCLCHECK(ncclGroupStart());
for (int i = 0; i < nDev; ++i)
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)recvbuff[i],
size, ncclFloat, ncclSum, comms[i], s[i]));
NCCLCHECK(ncclGroupEnd());
//synchronizing on CUDA streams to wait for completion of NCCL operation
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(i));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
//free device buffers
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(i));
CUDACHECK(cudaFree(sendbuff[i]));
CUDACHECK(cudaFree(recvbuff[i]));
}
//finalizing NCCL
for(int i = 0; i < nDev; ++i)
ncclCommDestroy(comms[i]);
printf("Success \n");
return 0;
}
因为我需要知道 NCCL API 的详细指标,这样我才能更深入地了解它的性能。
该行为是预期的。
默认收集的events, metrics, 属于 CUDA 设备代码 activity。要查看可能具有指导意义的内容,请尝试使用 --print-gpu-trace
开关进行分析(并删除 --metrics all
)。
documented“指标”不适用于 NCCL 正在进行的操作(数据复制)。它们适用于 CUDA 内核(即 CUDA 设备代码 activity)。
nvprof
似乎有可以为 NVLink activity 收集的指标。要查看这些,在适用的系统(例如具有 NVLink)上,运行 一个命令,例如:
nvprof --query-metrics
或
nvprof --query-metrics |grep -i nvlink