CUDA 中的递归 returns 非法内存访问
Recursion in CUDA returns illegal memory access
我正在编写一个数值积分程序,它实现了具有自适应步长的梯形法则。无需过多赘述,该算法使用递归来计算具有指定相对容差的给定区间内编码数学函数的积分。
我简化了发布代码,但保留了所有要点,因此某些部分可能看起来不必要或过于复杂。这是:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
#include <iostream>
#include <iomanip>
class Integral {
public:
double value; // the value of the integral
__device__ __host__ Integral() : value(0) {};
__device__ __host__ Integral& operator+=(Integral &I);
};
__device__ Integral trapezoid(double a, double b, double tolerance, double fa, double fb);
__device__ __host__ double f(double x); // the integrand function
const int BLOCKS = 1;
const int THREADS = 1;
__global__ void controller(Integral *blockIntegrals, double a, double b, double tolerance) {
extern __shared__ Integral threadIntegrals[]; // an array of thread-local integrals
double fa = f(a), fb = f(b);
threadIntegrals[threadIdx.x] += trapezoid(a, b, tolerance, fa, fb);
blockIntegrals[blockIdx.x] += threadIntegrals[0];
}
int main() {
// *************** Input parameters ***************
double a = 1, b = 800; // integration bounds
double tolerance = 1e-7;
// ************************************************
cudaError cudaStatus;
Integral blockIntegrals[BLOCKS]; // an array of total integrals computed by each block
Integral *devBlockIntegrals;
cudaStatus = cudaMalloc((void**)&devBlockIntegrals, BLOCKS * sizeof(Integral));
if (cudaStatus != cudaSuccess)
std::cout << "cudaMalloc failed!\n";
double estimate = 0; // a rough 10-point estimate of the whole integral
double h = (b - a) / 10;
for (int i = 0; i < 10; i++)
estimate += f(a + i*h);
estimate *= h;
tolerance *= estimate; // compute relative tolerance
controller<<<BLOCKS, THREADS, THREADS*sizeof(Integral)>>>(devBlockIntegrals, a, b, tolerance);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
std::cout << "addKernel launch failed: " << cudaGetErrorString(cudaStatus) << "\n";
cudaStatus = cudaMemcpy(blockIntegrals, devBlockIntegrals, BLOCKS * sizeof(Integral), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
std::cout << "cudaMemcpy failed: " << cudaGetErrorString(cudaStatus) << "\n";
Integral result; // final result
for (int i = 0; i < BLOCKS; i++) // final reduction that sums the results of all blocks
result += blockIntegrals[i];
std::cout << "Integral = " << std::setprecision(15) << result.value;
cudaFree(devBlockIntegrals);
getchar();
return 0;
}
__device__ double f(double x) {
return log(x);
}
__device__ Integral trapezoid(double a, double b, double tolerance, double fa, double fb) {
double h = b - a; // compute the new step
double I1 = h*(fa + fb) / 2; // compute the first integral
double m = (a + b) / 2; // find the middle point
double fm = f(m); // function value at the middle point
h = h / 2; // make step two times smaller
double I2 = h*(0.5*fa + fm + 0.5*fb); // compute the second integral
Integral I;
if (abs(I2 - I1) <= tolerance) { // if tolerance is satisfied
I.value = I2;
}
else { // if tolerance is not satisfied
if (tolerance > 1e-15) // check that we are not requiring too high precision
tolerance /= 2; // request higher precision in every half
I += trapezoid(a, m, tolerance, fa, fm); // integrate the first half [a m]
I += trapezoid(m, b, tolerance, fm, fb); // integrate the second half [m b]
}
return I;
}
__device__ Integral& Integral::operator+=(Integral &I) {
this->value += I.value;
return *this;
}
为简单起见,我在这里只使用了一个线程。
现在,如果我 运行 这段代码,我会收到一条消息 "cudaMemcpy failed: an illegal memory access was encountered"。当我 运行 "cuda-memcheck" 我得到这个错误:
========= Invalid __local__ write of size 4
========= at 0x00000b18 in C:/Users/User/Desktop/Integrator Stack/Integrator_GPU/kernel.cu:73:controller(Integral*, double, double, double)
========= by thread (0,0,0) in block (0,0,0)
========= Address 0x00fff8ac is out of bounds
它说问题出在第 73 行
double m = (a + b) / 2;
难道我现在运行内存不足了?
如果我通过在 main
中将右边界从 b = 800
更改为 b = 700
来缩小积分区间,程序运行良好,并给出正确的结果。
为什么只是创建一个新变量时会出现非法内存访问错误?
此外,我有一个 相同 CPU 版本的程序,它运行完美,所以计算算法很可能是正确的。
Could it be that at this point I am running out of memory?
不完全是。我猜你 运行 超出调用堆栈 space 随着递归深度的增加。运行时为每个线程调用堆栈分配预设默认值,通常约为 1kb(尽管它可能因硬件和 CUDA 版本而异)。如果该函数的递归深度超过 16,我认为它不会花费很长时间。
您可以使用 cudaDeviceGetLimit
查询每个线程的确切堆栈大小并使用 cudaDeviceSetLimit
更改它,这可能会让您的代码在较大的递归深度下正确工作。一般来说,CUDA 中的高度递归代码不是一个好主意,编译器和硬件使用约定循环会比深度递归代码做得更好。
我正在编写一个数值积分程序,它实现了具有自适应步长的梯形法则。无需过多赘述,该算法使用递归来计算具有指定相对容差的给定区间内编码数学函数的积分。 我简化了发布代码,但保留了所有要点,因此某些部分可能看起来不必要或过于复杂。这是:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
#include <iostream>
#include <iomanip>
class Integral {
public:
double value; // the value of the integral
__device__ __host__ Integral() : value(0) {};
__device__ __host__ Integral& operator+=(Integral &I);
};
__device__ Integral trapezoid(double a, double b, double tolerance, double fa, double fb);
__device__ __host__ double f(double x); // the integrand function
const int BLOCKS = 1;
const int THREADS = 1;
__global__ void controller(Integral *blockIntegrals, double a, double b, double tolerance) {
extern __shared__ Integral threadIntegrals[]; // an array of thread-local integrals
double fa = f(a), fb = f(b);
threadIntegrals[threadIdx.x] += trapezoid(a, b, tolerance, fa, fb);
blockIntegrals[blockIdx.x] += threadIntegrals[0];
}
int main() {
// *************** Input parameters ***************
double a = 1, b = 800; // integration bounds
double tolerance = 1e-7;
// ************************************************
cudaError cudaStatus;
Integral blockIntegrals[BLOCKS]; // an array of total integrals computed by each block
Integral *devBlockIntegrals;
cudaStatus = cudaMalloc((void**)&devBlockIntegrals, BLOCKS * sizeof(Integral));
if (cudaStatus != cudaSuccess)
std::cout << "cudaMalloc failed!\n";
double estimate = 0; // a rough 10-point estimate of the whole integral
double h = (b - a) / 10;
for (int i = 0; i < 10; i++)
estimate += f(a + i*h);
estimate *= h;
tolerance *= estimate; // compute relative tolerance
controller<<<BLOCKS, THREADS, THREADS*sizeof(Integral)>>>(devBlockIntegrals, a, b, tolerance);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
std::cout << "addKernel launch failed: " << cudaGetErrorString(cudaStatus) << "\n";
cudaStatus = cudaMemcpy(blockIntegrals, devBlockIntegrals, BLOCKS * sizeof(Integral), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
std::cout << "cudaMemcpy failed: " << cudaGetErrorString(cudaStatus) << "\n";
Integral result; // final result
for (int i = 0; i < BLOCKS; i++) // final reduction that sums the results of all blocks
result += blockIntegrals[i];
std::cout << "Integral = " << std::setprecision(15) << result.value;
cudaFree(devBlockIntegrals);
getchar();
return 0;
}
__device__ double f(double x) {
return log(x);
}
__device__ Integral trapezoid(double a, double b, double tolerance, double fa, double fb) {
double h = b - a; // compute the new step
double I1 = h*(fa + fb) / 2; // compute the first integral
double m = (a + b) / 2; // find the middle point
double fm = f(m); // function value at the middle point
h = h / 2; // make step two times smaller
double I2 = h*(0.5*fa + fm + 0.5*fb); // compute the second integral
Integral I;
if (abs(I2 - I1) <= tolerance) { // if tolerance is satisfied
I.value = I2;
}
else { // if tolerance is not satisfied
if (tolerance > 1e-15) // check that we are not requiring too high precision
tolerance /= 2; // request higher precision in every half
I += trapezoid(a, m, tolerance, fa, fm); // integrate the first half [a m]
I += trapezoid(m, b, tolerance, fm, fb); // integrate the second half [m b]
}
return I;
}
__device__ Integral& Integral::operator+=(Integral &I) {
this->value += I.value;
return *this;
}
为简单起见,我在这里只使用了一个线程。 现在,如果我 运行 这段代码,我会收到一条消息 "cudaMemcpy failed: an illegal memory access was encountered"。当我 运行 "cuda-memcheck" 我得到这个错误:
========= Invalid __local__ write of size 4
========= at 0x00000b18 in C:/Users/User/Desktop/Integrator Stack/Integrator_GPU/kernel.cu:73:controller(Integral*, double, double, double)
========= by thread (0,0,0) in block (0,0,0)
========= Address 0x00fff8ac is out of bounds
它说问题出在第 73 行
double m = (a + b) / 2;
难道我现在运行内存不足了?
如果我通过在 main
中将右边界从 b = 800
更改为 b = 700
来缩小积分区间,程序运行良好,并给出正确的结果。
为什么只是创建一个新变量时会出现非法内存访问错误?
此外,我有一个 相同 CPU 版本的程序,它运行完美,所以计算算法很可能是正确的。
Could it be that at this point I am running out of memory?
不完全是。我猜你 运行 超出调用堆栈 space 随着递归深度的增加。运行时为每个线程调用堆栈分配预设默认值,通常约为 1kb(尽管它可能因硬件和 CUDA 版本而异)。如果该函数的递归深度超过 16,我认为它不会花费很长时间。
您可以使用 cudaDeviceGetLimit
查询每个线程的确切堆栈大小并使用 cudaDeviceSetLimit
更改它,这可能会让您的代码在较大的递归深度下正确工作。一般来说,CUDA 中的高度递归代码不是一个好主意,编译器和硬件使用约定循环会比深度递归代码做得更好。