OpenCL 内核不会被执行
OpenCL kernel doesn't get to be executed
有一套规则,每条规则对应一定的元胞自动机。我需要检查每个规则的双射性 属性。由于它们太多了(准确地说是 2^32),我决定将我的 GPU 用于此目的。但是大约一周后,我仍然在为一个错误而苦苦挣扎。
简而言之,当内核入队并且它的执行据说是在 GPU 上执行时,GPU 的使用就好像它是空闲的一样。此外,在我向内核代码添加几条语句以查看内核是否正在执行之后,我没有发现这些语句的迹象,因此内核本身也没有被执行。此外,所有错误代码都等于CL_SUCCESS。我可能会出错,因为我是 OpenCL 编程的新手,非常感谢任何帮助。
这是带有一些缩写的主机端代码:
#define CL_USE_DEPRECATED_OPENCL_2_0_APIS
//some includes here
#define GLOBAL_SIZE 4096
#define LOCAL_SIZE 256
#define GLOBAL_SCOPE 0xffffffff
int main()
{
//we assume that global_scope divides evenly into global_size
//and therefore there is no need in processing remainder
long rules_per_thread = GLOBAL_SCOPE / GLOBAL_SIZE;
int * starts = new int[GLOBAL_SIZE];
int * stops = new int[GLOBAL_SIZE];
int count = 0;
for (int i = 0; i < GLOBAL_SIZE; i++) {
starts[i] = count;
count += rules_per_thread;
stops[i] = count;
count++;
}
...
/*obtainig platform, device, building program*/
...
/*====CREATING BUFFERS====*/
//buffer for storing approved automata
const int bufSize = 10000; //size of buffer picked at random guess; might need to add some space later
uint32_t* bijective_aut = new uint32_t[bufSize];
std::fill(&bijective_aut[0], &bijective_aut[bufSize - 1], 0);
//first value in array serves as global iterator over array
//and initially is set to base offset
bijective_aut[0] = 3;
//second value serves as indicator of array length
bijective_aut[1] = bufSize;
cl::Buffer buf(context, CL_MEM_READ_WRITE, sizeof(uint32_t) * bufSize);
cl::Buffer starts_buf(context, CL_MEM_READ_ONLY, sizeof(int) * GLOBAL_SIZE);
cl::Buffer stops_buf(context, CL_MEM_READ_ONLY, sizeof(int) * GLOBAL_SIZE);
/*====SETTING UP COMMAND QUEUE====*/
cl::CommandQueue queue(context, device);
err = queue.enqueueWriteBuffer(buf, CL_FALSE, 0, sizeof(uint32_t) * bufSize, bijective_aut);
err = queue.enqueueWriteBuffer(starts_buf, CL_FALSE, 0, sizeof(int) * GLOBAL_SIZE, starts);
err = queue.enqueueWriteBuffer(stops_buf, CL_FALSE, 0, sizeof(int) * GLOBAL_SIZE, stops);
/*====CREATING KERNEL, SETTING ITS VARIABLES====*/
cl::Kernel bc_kernel(program, "bijection_check", &err);
err = bc_kernel.setArg(0, buf);
err = bc_kernel.setArg(1, starts_buf);
err = bc_kernel.setArg(2, stops_buf);
/*====EXECUTING KERNEL====*/
cl::Event event;
err = queue.enqueueNDRangeKernel(bc_kernel, cl::NullRange, cl::NDRange(GLOBAL_SIZE), cl::NDRange(LOCAL_SIZE), nullptr, &event);
event.wait();
err = queue.enqueueReadBuffer(buf, CL_FALSE, 0, sizeof(uint32_t) * bufSize, bijective_aut);
cl::finish();
}
然后是内核代码:
__kernel void bijection_check (
__global uint * bijective_rules, //stores approved bijective rules
__global const uint * starts,
__global const uint * stops
)
{
__private int idx = get_global_id(0);
int iterator = bijective_rules[0]; //inditates next free cell to write in
int start = starts[idx];
int stop = stops[idx];
bool check = true;
//there is some variables required for test
//iterating over rules between *start* and *stop*
for (uint rule = start; rule < stop; rule++)
{
...
/*then there goes test of the rule for bijectivity*/
...
//writing current rule to general list if it turned to be bijective
if ((check == true) && (iterator < 10000))
{
bijective_rules[iterator] = rule;
bijective_rules[0]++;
}
else
{
bijective_rules[2]++;
}
}
bijective_rules[3]++;
}
从执行后从缓冲区中读取的数组来看,最后的两条语句都没有执行一次。也就是说,在内核执行后 bijective_rules 数组处于与之前在主机端定义的完全相同的状态。
您有竞争条件:您读取 bijective_rules[0];
,但其他线程可能同时执行 bijective_rules[0]++;
,从而读取和写入该内存位置。如果两个线程将不同的数据写入同一个内存地址,就会出现竞争条件,并且由两个线程中的哪一个来决定结果是随机的。所以你的结果将是随机的,不可重复的。
如果多个线程需要在同一内存位置递增一个值,请使用原子函数atomic_inc
。当一个线程正在处理它时,原子函数会阻塞内存位置,所有其他线程都必须等待。
要消除竞争条件,请从缓冲区的一个副本(或一个特定的内存地址)读取并写入第二个副本(或地址)。这样,您永远不会写入其他并发线程正在读取的内存。
有一套规则,每条规则对应一定的元胞自动机。我需要检查每个规则的双射性 属性。由于它们太多了(准确地说是 2^32),我决定将我的 GPU 用于此目的。但是大约一周后,我仍然在为一个错误而苦苦挣扎。
简而言之,当内核入队并且它的执行据说是在 GPU 上执行时,GPU 的使用就好像它是空闲的一样。此外,在我向内核代码添加几条语句以查看内核是否正在执行之后,我没有发现这些语句的迹象,因此内核本身也没有被执行。此外,所有错误代码都等于CL_SUCCESS。我可能会出错,因为我是 OpenCL 编程的新手,非常感谢任何帮助。
这是带有一些缩写的主机端代码:
#define CL_USE_DEPRECATED_OPENCL_2_0_APIS
//some includes here
#define GLOBAL_SIZE 4096
#define LOCAL_SIZE 256
#define GLOBAL_SCOPE 0xffffffff
int main()
{
//we assume that global_scope divides evenly into global_size
//and therefore there is no need in processing remainder
long rules_per_thread = GLOBAL_SCOPE / GLOBAL_SIZE;
int * starts = new int[GLOBAL_SIZE];
int * stops = new int[GLOBAL_SIZE];
int count = 0;
for (int i = 0; i < GLOBAL_SIZE; i++) {
starts[i] = count;
count += rules_per_thread;
stops[i] = count;
count++;
}
...
/*obtainig platform, device, building program*/
...
/*====CREATING BUFFERS====*/
//buffer for storing approved automata
const int bufSize = 10000; //size of buffer picked at random guess; might need to add some space later
uint32_t* bijective_aut = new uint32_t[bufSize];
std::fill(&bijective_aut[0], &bijective_aut[bufSize - 1], 0);
//first value in array serves as global iterator over array
//and initially is set to base offset
bijective_aut[0] = 3;
//second value serves as indicator of array length
bijective_aut[1] = bufSize;
cl::Buffer buf(context, CL_MEM_READ_WRITE, sizeof(uint32_t) * bufSize);
cl::Buffer starts_buf(context, CL_MEM_READ_ONLY, sizeof(int) * GLOBAL_SIZE);
cl::Buffer stops_buf(context, CL_MEM_READ_ONLY, sizeof(int) * GLOBAL_SIZE);
/*====SETTING UP COMMAND QUEUE====*/
cl::CommandQueue queue(context, device);
err = queue.enqueueWriteBuffer(buf, CL_FALSE, 0, sizeof(uint32_t) * bufSize, bijective_aut);
err = queue.enqueueWriteBuffer(starts_buf, CL_FALSE, 0, sizeof(int) * GLOBAL_SIZE, starts);
err = queue.enqueueWriteBuffer(stops_buf, CL_FALSE, 0, sizeof(int) * GLOBAL_SIZE, stops);
/*====CREATING KERNEL, SETTING ITS VARIABLES====*/
cl::Kernel bc_kernel(program, "bijection_check", &err);
err = bc_kernel.setArg(0, buf);
err = bc_kernel.setArg(1, starts_buf);
err = bc_kernel.setArg(2, stops_buf);
/*====EXECUTING KERNEL====*/
cl::Event event;
err = queue.enqueueNDRangeKernel(bc_kernel, cl::NullRange, cl::NDRange(GLOBAL_SIZE), cl::NDRange(LOCAL_SIZE), nullptr, &event);
event.wait();
err = queue.enqueueReadBuffer(buf, CL_FALSE, 0, sizeof(uint32_t) * bufSize, bijective_aut);
cl::finish();
}
然后是内核代码:
__kernel void bijection_check (
__global uint * bijective_rules, //stores approved bijective rules
__global const uint * starts,
__global const uint * stops
)
{
__private int idx = get_global_id(0);
int iterator = bijective_rules[0]; //inditates next free cell to write in
int start = starts[idx];
int stop = stops[idx];
bool check = true;
//there is some variables required for test
//iterating over rules between *start* and *stop*
for (uint rule = start; rule < stop; rule++)
{
...
/*then there goes test of the rule for bijectivity*/
...
//writing current rule to general list if it turned to be bijective
if ((check == true) && (iterator < 10000))
{
bijective_rules[iterator] = rule;
bijective_rules[0]++;
}
else
{
bijective_rules[2]++;
}
}
bijective_rules[3]++;
}
从执行后从缓冲区中读取的数组来看,最后的两条语句都没有执行一次。也就是说,在内核执行后 bijective_rules 数组处于与之前在主机端定义的完全相同的状态。
您有竞争条件:您读取 bijective_rules[0];
,但其他线程可能同时执行 bijective_rules[0]++;
,从而读取和写入该内存位置。如果两个线程将不同的数据写入同一个内存地址,就会出现竞争条件,并且由两个线程中的哪一个来决定结果是随机的。所以你的结果将是随机的,不可重复的。
如果多个线程需要在同一内存位置递增一个值,请使用原子函数atomic_inc
。当一个线程正在处理它时,原子函数会阻塞内存位置,所有其他线程都必须等待。
要消除竞争条件,请从缓冲区的一个副本(或一个特定的内存地址)读取并写入第二个副本(或地址)。这样,您永远不会写入其他并发线程正在读取的内存。