几次循环后 GPU 速度变慢
GPU slows way down after a few loops
我一直在尝试执行分步法对 Gross-Pitaevskii 方程进行数值积分。我的代码使用 python 按预期执行,但为了提高性能,我一直在使用 PyOpenCL 对其进行调整,以便它可以 运行 在 GPU 上。我似乎已经开始工作了,因为它与我的 python 代码(CPU 上的 运行ning)给出的结果一致,但它花费的时间比我预期的要多得多。可以在下面找到一个工作示例:
###########################################################################
# IMPORTS NECESSARY TO RUN
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import scipy.fftpack as sp
import time as time
import pyopencl as cl
import pyopencl.array as cl_array
from reikna.cluda import ocl_api
from reikna.core import Computation, Parameter, Annotation
from reikna.fft import FFT, FFTShift
################################################################################
# DEFINE SYSTEM PARAMETERS
Lx = 1000 # length of system in x-direction
Ly = 500 # length of system in y-direction
dx = 0.4 # space step
dt = 0.1*(dx**2) # calculated timestep
Nx = int(Lx/dx) # number of points in x-direction
Ny = int(Ly/dx) # number of points in y-direction
# create frequency space coordinate grid
kx = np.array([-Nx*np.pi/Lx+2.0*np.pi*i/Lx for i in range(Nx)]) #x-wavenumbers
# ^ used when we Fourier transform
ky = np.array([-Ny*np.pi/Ly+2.0*np.pi*i/Ly for i in range(Ny)]) #x-wavenumbers
kxx, kyy = np.meshgrid(kx, ky, sparse=True) #wavenumbergrid
#################################################################################
# GENERATE TRAP POTENTIAL AND INITIAL VALUES
# define the trap potential matrix (constant)
# it has a value of 100 at the edge, and zero in the bulk
Vmat = 100.0*np.ones((Ny, Nx))
Vmat[4:-4,4:-4] = np.zeros((Ny - 8, Nx - 8))
# the initial wavefunction is zero at the edge (where the potential is nonzero)
# and 1 in the bulk (where the potential is zero).
U0 = np.zeros((Ny, Nx))
U0[4:-4,4:-4] = np.ones((Ny - 8, Nx - 8))
U = U0
###################################################################################
# PASS ARRAYS TO DEVICE
# define the PyOpenCL queue
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# define the Reikna thread
api = ocl_api()
thr = api.Thread(queue)
# make sure all of the data types are correct
U = U.astype(np.complex128)
Vmat = Vmat.astype(np.float64)
L_op = np.exp(-1j*dt*0.5*( kxx**2+kyy**2 )).astype(np.complex128)
idtmat = np.array(1j*dt).astype(np.complex128) # we will use the 1j below
# pass the arrays to the device, all using pyopencl, can later use w/ reikna
op_dev = cl_array.to_device(queue, U)
Vmat_dev = cl_array.to_device(queue, Vmat)
L_op_dev = cl_array.to_device(queue, L_op)
idt_dev = cl_array.to_device(queue, idtmat)
###################################################################################
# PYOPENCL KERNEL DEFINITIONS
gpe = cl.Program(ctx, """
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#define PYOPENCL_DEFINE_CDOUBLE
#include <pyopencl-complex.h>
__kernel void nonlinear_operator(__global const cdouble_t *U, __global const double *Vmat, __global const cdouble_t *idt, __global cdouble_t *U_aft)
{
// get thread id numbers
int gid0 = get_global_id(0);
int gid1 = get_global_id(1);
// get the global size of the blocks
int num0 = get_global_size(0);
// the real value that gets exponentiated
__local double mag;
mag = cdouble_abs(U[gid0 + num0 * gid1]) * cdouble_abs(U[gid0 + num0 * gid1]) + Vmat[gid0 + num0 * gid1];
// exponentiate and multiply real exponent by complex wavefct
U_aft[gid0 + num0 * gid1] = cdouble_mul( cdouble_exp( cdouble_mulr(idt[0], -mag) ), U[gid0 + num0 * gid1]);
}
__kernel void laplacian_operator(__global const cdouble_t *C, __global const cdouble_t *L_op, __global cdouble_t *C_aft)
{
// get thread id numbers
int gid0 = get_global_id(0);
int gid1 = get_global_id(1);
// get the global sizes of the various blocks
int num0 = get_global_size(0);
// exponentiate and multiply real exponent by complex wavefct
C_aft[gid0 + num0 * gid1] = cdouble_mul( L_op[gid0 + num0 * gid1], C[gid0 + num0 * gid1]);
}
""").build()
###################################################################################
# REIKNA KERNEL DEFINITIONS
fft = FFT(U)
fftc = fft.compile(thr)
shift = FFTShift(U)
shiftc = shift.compile(thr)
##############################################################################
# RUNNING THE KERNELS, TIMING INCLUDED
t0 = time.time()
t_loop = []
for i in range(100):
t0_loop = time.time()
# apply the nonlinear operator
gpe.nonlinear_operator(queue, op_dev.shape, None, op_dev.data, Vmat_dev.data, idt_dev.data, op_dev.data)
# transform to frequency space
fftc(op_dev, op_dev)
# apply the shift operator to get zero frequency components to center
shiftc(op_dev, op_dev)
# apply the Laplacian operator in frequency space
gpe.laplacian_operator(queue, op_dev.shape, None, op_dev.data, L_op_dev.data, op_dev.data)
# shift back
shiftc(op_dev, op_dev)
# transform back to position space
fftc(op_dev, op_dev, inverse=True)
t_loop.append(time.time() - t0_loop)
Delta_t = time.time()-t0
# Copy the array back from the device
t_copy = time.time()
final_array = op_dev.get()
Delta_tcopy = time.time()-t_copy
##################################################################################
# COMPARE TO CALCULATION DONE ON CPU
t1 = time.time()
cpu_U = U
for i in range(100):
cpu_U=np.exp( -1j*dt*( Vmat + cpu_U*np.conjugate(cpu_U) ))*cpu_U #advance w/ N op
cpu_C=sp.fftshift(sp.fft2(cpu_U)) # transform to fourier space
cpu_C=np.exp( -1j*dt*0.5*( kxx**2+kyy**2 ) )*cpu_C # advance w/ L op
cpu_U=sp.ifft2(sp.fftshift(cpu_C)) # transform back
Delta_t1 = time.time() - t1
test = np.amax(abs(final_array-cpu_U))
if test <= 10**(-6.0):
print('Correct!')
print('GPU takes '+str(Delta_t)+' sec.')
print('Copying takes '+str(Delta_tcopy)+' sec.')
print('CPU Python takes '+str(Delta_t1)+' sec.')
else:
print('Not correct!')
print(test)
################################################################################
# WRITE OUT THE INDIVIDUAL LOOP TIMES INTO A FILE
target = open('loop_times.txt','w')
for i in range(len(t_loop)):
target.write('Loop number '+str(i)+' takes '+str(t_loop[i])+' seconds to complete.')
target.write('\n')
target.close()
如果此代码是 运行,则表明 CPU 和 GPU 结果一致。然而,运行在 Tesla K40c 上运行它显示 GPU 仅 运行 比 CPU 快大约 10 倍。检查 loop_times.txt 文件,其中包含 GPU 上每个循环的计时信息,表明循环最初非常快。尽管有这个初始速度,但在大约 20 次循环后,它们变得比以前慢 200 倍,并在其余计算中保持该速度。有谁知道为什么会这样?我最好的猜测是内存有问题。 PyOpenCL 文档 here 声明“...基于 pyopencl.array.Array 的代码可以很容易地 运行 进入问题[s],因为为每个中间结果分配了一个新的内存区域”。不过,我不确定这是问题所在,因为我没有声明中间数组。
我在下面的 Tesla K40c 上包含了来自 运行 的 loop_times.txt 文件,以防这有助于诊断问题。提前致谢!
Loop number 0 takes 0.00145196914673 seconds to complete.
Loop number 1 takes 0.000530004501343 seconds to complete.
Loop number 2 takes 0.000539064407349 seconds to complete.
Loop number 3 takes 0.000540018081665 seconds to complete.
Loop number 4 takes 0.000539064407349 seconds to complete.
Loop number 5 takes 0.00052809715271 seconds to complete.
Loop number 6 takes 0.000566959381104 seconds to complete.
Loop number 7 takes 0.000523090362549 seconds to complete.
Loop number 8 takes 0.000649929046631 seconds to complete.
Loop number 9 takes 0.000531196594238 seconds to complete.
Loop number 10 takes 0.000524997711182 seconds to complete.
Loop number 11 takes 0.000524997711182 seconds to complete.
Loop number 12 takes 0.000520944595337 seconds to complete.
Loop number 13 takes 0.000530004501343 seconds to complete.
Loop number 14 takes 0.000522136688232 seconds to complete.
Loop number 15 takes 0.000525951385498 seconds to complete.
Loop number 16 takes 0.000523090362549 seconds to complete.
Loop number 17 takes 0.0888669490814 seconds to complete.
Loop number 18 takes 0.102005958557 seconds to complete.
Loop number 19 takes 0.102015972137 seconds to complete.
Loop number 20 takes 0.102032899857 seconds to complete.
Loop number 21 takes 0.101969957352 seconds to complete.
Loop number 22 takes 0.102008104324 seconds to complete.
Loop number 23 takes 0.102007865906 seconds to complete.
Loop number 24 takes 0.10200715065 seconds to complete.
Loop number 25 takes 0.102005004883 seconds to complete.
Loop number 26 takes 0.102000951767 seconds to complete.
Loop number 27 takes 0.102005004883 seconds to complete.
Loop number 28 takes 0.102003097534 seconds to complete.
Loop number 29 takes 0.101999998093 seconds to complete.
Loop number 30 takes 0.101995944977 seconds to complete.
Loop number 31 takes 0.101994037628 seconds to complete.
Loop number 32 takes 0.10199713707 seconds to complete.
Loop number 33 takes 0.101987838745 seconds to complete.
Loop number 34 takes 0.102010011673 seconds to complete.
Loop number 35 takes 0.102000951767 seconds to complete.
Loop number 36 takes 0.102009057999 seconds to complete.
Loop number 37 takes 0.102005004883 seconds to complete.
Loop number 38 takes 0.102013111115 seconds to complete.
Loop number 39 takes 0.102020025253 seconds to complete.
Loop number 40 takes 0.102008104324 seconds to complete.
Loop number 41 takes 0.102012872696 seconds to complete.
Loop number 42 takes 0.102003097534 seconds to complete.
Loop number 43 takes 0.102008104324 seconds to complete.
Loop number 44 takes 0.101997852325 seconds to complete.
Loop number 45 takes 0.102009773254 seconds to complete.
Loop number 46 takes 0.102011919022 seconds to complete.
Loop number 47 takes 0.101995944977 seconds to complete.
Loop number 48 takes 0.102001905441 seconds to complete.
Loop number 49 takes 0.102009057999 seconds to complete.
Loop number 50 takes 0.101994037628 seconds to complete.
Loop number 51 takes 0.102015018463 seconds to complete.
Loop number 52 takes 0.10200715065 seconds to complete.
Loop number 53 takes 0.102021932602 seconds to complete.
Loop number 54 takes 0.102017879486 seconds to complete.
Loop number 55 takes 0.102023124695 seconds to complete.
Loop number 56 takes 0.102003097534 seconds to complete.
Loop number 57 takes 0.102006912231 seconds to complete.
Loop number 58 takes 0.10199713707 seconds to complete.
Loop number 59 takes 0.102031946182 seconds to complete.
Loop number 60 takes 0.102022171021 seconds to complete.
Loop number 61 takes 0.102020025253 seconds to complete.
Loop number 62 takes 0.102014064789 seconds to complete.
Loop number 63 takes 0.102007865906 seconds to complete.
Loop number 64 takes 0.101998090744 seconds to complete.
Loop number 65 takes 0.102015018463 seconds to complete.
Loop number 66 takes 0.102014064789 seconds to complete.
Loop number 67 takes 0.102025032043 seconds to complete.
Loop number 68 takes 0.102019071579 seconds to complete.
Loop number 69 takes 0.102022886276 seconds to complete.
Loop number 70 takes 0.102005958557 seconds to complete.
Loop number 71 takes 0.102015972137 seconds to complete.
Loop number 72 takes 0.102024078369 seconds to complete.
Loop number 73 takes 0.101996898651 seconds to complete.
Loop number 74 takes 0.102014064789 seconds to complete.
Loop number 75 takes 0.10201215744 seconds to complete.
Loop number 76 takes 0.102012872696 seconds to complete.
Loop number 77 takes 0.101979017258 seconds to complete.
Loop number 78 takes 0.101991176605 seconds to complete.
Loop number 79 takes 0.102010011673 seconds to complete.
Loop number 80 takes 0.102005958557 seconds to complete.
Loop number 81 takes 0.102019071579 seconds to complete.
Loop number 82 takes 0.102010965347 seconds to complete.
Loop number 83 takes 0.102006912231 seconds to complete.
Loop number 84 takes 0.101999044418 seconds to complete.
Loop number 85 takes 0.102009057999 seconds to complete.
Loop number 86 takes 0.102022886276 seconds to complete.
Loop number 87 takes 0.10201382637 seconds to complete.
Loop number 88 takes 0.101995944977 seconds to complete.
Loop number 89 takes 0.102017879486 seconds to complete.
Loop number 90 takes 0.102014064789 seconds to complete.
Loop number 91 takes 0.10200214386 seconds to complete.
Loop number 92 takes 0.101999998093 seconds to complete.
Loop number 93 takes 0.102025032043 seconds to complete.
Loop number 94 takes 0.102019071579 seconds to complete.
Loop number 95 takes 0.101996898651 seconds to complete.
Loop number 96 takes 0.102020025253 seconds to complete.
Loop number 97 takes 0.101989984512 seconds to complete.
Loop number 98 takes 0.102004051208 seconds to complete.
Loop number 99 takes 0.102003097534 seconds to complete.
由于您没有在每次迭代结束时同步队列,因此您测量的实质上是排队时间。似乎到第 17 次迭代时,队列缓冲区已满,每个新内核调用都必须等到另一个内核完成并从队列中删除。从那一刻开始,这会为您提供大致正确的时间。当我 运行 它在我的机器上(带 gf755m 的 iMac)时,队列实际上接受了所有 100 次内核迭代,然后我必须等待一分钟才能完成所有这些迭代,给我这样的结果
GPU takes 0.055264949798583984 sec.
Copying takes 52.194445848464966 sec.
如果要测量实际迭代时间,需要在每次迭代结束时同步队列:
queue.finish()
或者,等效地,通过 Reikna API
thr.synchronize()
有几点值得注意。
我在 运行 你的代码中遇到了一些 st运行ge 地址边界错误。这似乎是由于 PyOpenCL 的复杂值处理函数如 cdouble_mul
实际上是宏,当你给它们访问内存的 AST 位时会出现错误,例如
cdouble_mul(L_op[gid0 + num0 * gid1], C[gid0 + num0 * gid1])
如果在将值传递给宏之前预加载这些值,错误就会消失:
cdouble_t L_op_val = L_op[gid0 + num0 * gid1];
cdouble_t C_val = C[gid0 + num0 * gid1];
cdouble_mul(L_op_val, C_val);
您不需要从 1j*dt
中创建一个 1 元素数组。您可以将它作为标量传递给内核。
也许您已经意识到这一点,但为了以防万一:您将数组形状作为全局大小传递给您的内核,因此在它们内部您实际上是将 NxM 数组作为 MxN 数组进行寻址。只要数组在内存中是连续的,并且您的所有操作都是按元素进行的,就没有关系,但请务必牢记这一点。
如果我使用 Reikna 编写 nonlinear_operator
内核作为
gpe2 = reikna.algorithms.PureParallel([
Parameter('U', Annotation(U, 'i')),
Parameter('Vmat', Annotation(Vmat, 'i')),
Parameter('idt', Annotation(idtmat.reshape(1)[0], 's')),
Parameter('U_aft', Annotation(U, 'o'))],
"""
${U.ctype} U = ${U.load_same};
${Vmat.ctype} U_squared = ${norm}(U);
${Vmat.ctype} mag = U_squared + ${Vmat.load_same};
${U_aft.store_same}(
${mul_cc}(
${exp}(${mul_cr}(${idt}, -mag)),
U));
""",
render_kwds=dict(
norm=reikna.cluda.functions.norm(U.dtype),
mul_cr=reikna.cluda.functions.mul(U.dtype, Vmat.dtype),
mul_cc=reikna.cluda.functions.mul(U.dtype, U.dtype),
exp=reikna.cluda.functions.exp(U.dtype)))
gpe2c = gpe2.compile(thr)
# in the loop
gpe2c(op_dev, Vmat_dev, idtmat[0], op_dev)
它的运行速度是原始内核的两倍。当然,这并不重要,因为大部分时间都花在了FFT上。
同样,您可能已经意识到这一点,但 Reikna 针对非 2 的幂次方问题大小的 FFT 使用 Bluestein 的算法,这基本上使工作量翻了一番。此外,与其在每次迭代中在 GPU 上进行两次 fftshift,不如在循环之前移动一次 L_op
会更快(您也可以使用 numpy.fft.fftfreq
生成它,这会为您提供正确的频率顺序离开)。
我一直在尝试执行分步法对 Gross-Pitaevskii 方程进行数值积分。我的代码使用 python 按预期执行,但为了提高性能,我一直在使用 PyOpenCL 对其进行调整,以便它可以 运行 在 GPU 上。我似乎已经开始工作了,因为它与我的 python 代码(CPU 上的 运行ning)给出的结果一致,但它花费的时间比我预期的要多得多。可以在下面找到一个工作示例:
###########################################################################
# IMPORTS NECESSARY TO RUN
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import scipy.fftpack as sp
import time as time
import pyopencl as cl
import pyopencl.array as cl_array
from reikna.cluda import ocl_api
from reikna.core import Computation, Parameter, Annotation
from reikna.fft import FFT, FFTShift
################################################################################
# DEFINE SYSTEM PARAMETERS
Lx = 1000 # length of system in x-direction
Ly = 500 # length of system in y-direction
dx = 0.4 # space step
dt = 0.1*(dx**2) # calculated timestep
Nx = int(Lx/dx) # number of points in x-direction
Ny = int(Ly/dx) # number of points in y-direction
# create frequency space coordinate grid
kx = np.array([-Nx*np.pi/Lx+2.0*np.pi*i/Lx for i in range(Nx)]) #x-wavenumbers
# ^ used when we Fourier transform
ky = np.array([-Ny*np.pi/Ly+2.0*np.pi*i/Ly for i in range(Ny)]) #x-wavenumbers
kxx, kyy = np.meshgrid(kx, ky, sparse=True) #wavenumbergrid
#################################################################################
# GENERATE TRAP POTENTIAL AND INITIAL VALUES
# define the trap potential matrix (constant)
# it has a value of 100 at the edge, and zero in the bulk
Vmat = 100.0*np.ones((Ny, Nx))
Vmat[4:-4,4:-4] = np.zeros((Ny - 8, Nx - 8))
# the initial wavefunction is zero at the edge (where the potential is nonzero)
# and 1 in the bulk (where the potential is zero).
U0 = np.zeros((Ny, Nx))
U0[4:-4,4:-4] = np.ones((Ny - 8, Nx - 8))
U = U0
###################################################################################
# PASS ARRAYS TO DEVICE
# define the PyOpenCL queue
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# define the Reikna thread
api = ocl_api()
thr = api.Thread(queue)
# make sure all of the data types are correct
U = U.astype(np.complex128)
Vmat = Vmat.astype(np.float64)
L_op = np.exp(-1j*dt*0.5*( kxx**2+kyy**2 )).astype(np.complex128)
idtmat = np.array(1j*dt).astype(np.complex128) # we will use the 1j below
# pass the arrays to the device, all using pyopencl, can later use w/ reikna
op_dev = cl_array.to_device(queue, U)
Vmat_dev = cl_array.to_device(queue, Vmat)
L_op_dev = cl_array.to_device(queue, L_op)
idt_dev = cl_array.to_device(queue, idtmat)
###################################################################################
# PYOPENCL KERNEL DEFINITIONS
gpe = cl.Program(ctx, """
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#define PYOPENCL_DEFINE_CDOUBLE
#include <pyopencl-complex.h>
__kernel void nonlinear_operator(__global const cdouble_t *U, __global const double *Vmat, __global const cdouble_t *idt, __global cdouble_t *U_aft)
{
// get thread id numbers
int gid0 = get_global_id(0);
int gid1 = get_global_id(1);
// get the global size of the blocks
int num0 = get_global_size(0);
// the real value that gets exponentiated
__local double mag;
mag = cdouble_abs(U[gid0 + num0 * gid1]) * cdouble_abs(U[gid0 + num0 * gid1]) + Vmat[gid0 + num0 * gid1];
// exponentiate and multiply real exponent by complex wavefct
U_aft[gid0 + num0 * gid1] = cdouble_mul( cdouble_exp( cdouble_mulr(idt[0], -mag) ), U[gid0 + num0 * gid1]);
}
__kernel void laplacian_operator(__global const cdouble_t *C, __global const cdouble_t *L_op, __global cdouble_t *C_aft)
{
// get thread id numbers
int gid0 = get_global_id(0);
int gid1 = get_global_id(1);
// get the global sizes of the various blocks
int num0 = get_global_size(0);
// exponentiate and multiply real exponent by complex wavefct
C_aft[gid0 + num0 * gid1] = cdouble_mul( L_op[gid0 + num0 * gid1], C[gid0 + num0 * gid1]);
}
""").build()
###################################################################################
# REIKNA KERNEL DEFINITIONS
fft = FFT(U)
fftc = fft.compile(thr)
shift = FFTShift(U)
shiftc = shift.compile(thr)
##############################################################################
# RUNNING THE KERNELS, TIMING INCLUDED
t0 = time.time()
t_loop = []
for i in range(100):
t0_loop = time.time()
# apply the nonlinear operator
gpe.nonlinear_operator(queue, op_dev.shape, None, op_dev.data, Vmat_dev.data, idt_dev.data, op_dev.data)
# transform to frequency space
fftc(op_dev, op_dev)
# apply the shift operator to get zero frequency components to center
shiftc(op_dev, op_dev)
# apply the Laplacian operator in frequency space
gpe.laplacian_operator(queue, op_dev.shape, None, op_dev.data, L_op_dev.data, op_dev.data)
# shift back
shiftc(op_dev, op_dev)
# transform back to position space
fftc(op_dev, op_dev, inverse=True)
t_loop.append(time.time() - t0_loop)
Delta_t = time.time()-t0
# Copy the array back from the device
t_copy = time.time()
final_array = op_dev.get()
Delta_tcopy = time.time()-t_copy
##################################################################################
# COMPARE TO CALCULATION DONE ON CPU
t1 = time.time()
cpu_U = U
for i in range(100):
cpu_U=np.exp( -1j*dt*( Vmat + cpu_U*np.conjugate(cpu_U) ))*cpu_U #advance w/ N op
cpu_C=sp.fftshift(sp.fft2(cpu_U)) # transform to fourier space
cpu_C=np.exp( -1j*dt*0.5*( kxx**2+kyy**2 ) )*cpu_C # advance w/ L op
cpu_U=sp.ifft2(sp.fftshift(cpu_C)) # transform back
Delta_t1 = time.time() - t1
test = np.amax(abs(final_array-cpu_U))
if test <= 10**(-6.0):
print('Correct!')
print('GPU takes '+str(Delta_t)+' sec.')
print('Copying takes '+str(Delta_tcopy)+' sec.')
print('CPU Python takes '+str(Delta_t1)+' sec.')
else:
print('Not correct!')
print(test)
################################################################################
# WRITE OUT THE INDIVIDUAL LOOP TIMES INTO A FILE
target = open('loop_times.txt','w')
for i in range(len(t_loop)):
target.write('Loop number '+str(i)+' takes '+str(t_loop[i])+' seconds to complete.')
target.write('\n')
target.close()
如果此代码是 运行,则表明 CPU 和 GPU 结果一致。然而,运行在 Tesla K40c 上运行它显示 GPU 仅 运行 比 CPU 快大约 10 倍。检查 loop_times.txt 文件,其中包含 GPU 上每个循环的计时信息,表明循环最初非常快。尽管有这个初始速度,但在大约 20 次循环后,它们变得比以前慢 200 倍,并在其余计算中保持该速度。有谁知道为什么会这样?我最好的猜测是内存有问题。 PyOpenCL 文档 here 声明“...基于 pyopencl.array.Array 的代码可以很容易地 运行 进入问题[s],因为为每个中间结果分配了一个新的内存区域”。不过,我不确定这是问题所在,因为我没有声明中间数组。
我在下面的 Tesla K40c 上包含了来自 运行 的 loop_times.txt 文件,以防这有助于诊断问题。提前致谢!
Loop number 0 takes 0.00145196914673 seconds to complete.
Loop number 1 takes 0.000530004501343 seconds to complete.
Loop number 2 takes 0.000539064407349 seconds to complete.
Loop number 3 takes 0.000540018081665 seconds to complete.
Loop number 4 takes 0.000539064407349 seconds to complete.
Loop number 5 takes 0.00052809715271 seconds to complete.
Loop number 6 takes 0.000566959381104 seconds to complete.
Loop number 7 takes 0.000523090362549 seconds to complete.
Loop number 8 takes 0.000649929046631 seconds to complete.
Loop number 9 takes 0.000531196594238 seconds to complete.
Loop number 10 takes 0.000524997711182 seconds to complete.
Loop number 11 takes 0.000524997711182 seconds to complete.
Loop number 12 takes 0.000520944595337 seconds to complete.
Loop number 13 takes 0.000530004501343 seconds to complete.
Loop number 14 takes 0.000522136688232 seconds to complete.
Loop number 15 takes 0.000525951385498 seconds to complete.
Loop number 16 takes 0.000523090362549 seconds to complete.
Loop number 17 takes 0.0888669490814 seconds to complete.
Loop number 18 takes 0.102005958557 seconds to complete.
Loop number 19 takes 0.102015972137 seconds to complete.
Loop number 20 takes 0.102032899857 seconds to complete.
Loop number 21 takes 0.101969957352 seconds to complete.
Loop number 22 takes 0.102008104324 seconds to complete.
Loop number 23 takes 0.102007865906 seconds to complete.
Loop number 24 takes 0.10200715065 seconds to complete.
Loop number 25 takes 0.102005004883 seconds to complete.
Loop number 26 takes 0.102000951767 seconds to complete.
Loop number 27 takes 0.102005004883 seconds to complete.
Loop number 28 takes 0.102003097534 seconds to complete.
Loop number 29 takes 0.101999998093 seconds to complete.
Loop number 30 takes 0.101995944977 seconds to complete.
Loop number 31 takes 0.101994037628 seconds to complete.
Loop number 32 takes 0.10199713707 seconds to complete.
Loop number 33 takes 0.101987838745 seconds to complete.
Loop number 34 takes 0.102010011673 seconds to complete.
Loop number 35 takes 0.102000951767 seconds to complete.
Loop number 36 takes 0.102009057999 seconds to complete.
Loop number 37 takes 0.102005004883 seconds to complete.
Loop number 38 takes 0.102013111115 seconds to complete.
Loop number 39 takes 0.102020025253 seconds to complete.
Loop number 40 takes 0.102008104324 seconds to complete.
Loop number 41 takes 0.102012872696 seconds to complete.
Loop number 42 takes 0.102003097534 seconds to complete.
Loop number 43 takes 0.102008104324 seconds to complete.
Loop number 44 takes 0.101997852325 seconds to complete.
Loop number 45 takes 0.102009773254 seconds to complete.
Loop number 46 takes 0.102011919022 seconds to complete.
Loop number 47 takes 0.101995944977 seconds to complete.
Loop number 48 takes 0.102001905441 seconds to complete.
Loop number 49 takes 0.102009057999 seconds to complete.
Loop number 50 takes 0.101994037628 seconds to complete.
Loop number 51 takes 0.102015018463 seconds to complete.
Loop number 52 takes 0.10200715065 seconds to complete.
Loop number 53 takes 0.102021932602 seconds to complete.
Loop number 54 takes 0.102017879486 seconds to complete.
Loop number 55 takes 0.102023124695 seconds to complete.
Loop number 56 takes 0.102003097534 seconds to complete.
Loop number 57 takes 0.102006912231 seconds to complete.
Loop number 58 takes 0.10199713707 seconds to complete.
Loop number 59 takes 0.102031946182 seconds to complete.
Loop number 60 takes 0.102022171021 seconds to complete.
Loop number 61 takes 0.102020025253 seconds to complete.
Loop number 62 takes 0.102014064789 seconds to complete.
Loop number 63 takes 0.102007865906 seconds to complete.
Loop number 64 takes 0.101998090744 seconds to complete.
Loop number 65 takes 0.102015018463 seconds to complete.
Loop number 66 takes 0.102014064789 seconds to complete.
Loop number 67 takes 0.102025032043 seconds to complete.
Loop number 68 takes 0.102019071579 seconds to complete.
Loop number 69 takes 0.102022886276 seconds to complete.
Loop number 70 takes 0.102005958557 seconds to complete.
Loop number 71 takes 0.102015972137 seconds to complete.
Loop number 72 takes 0.102024078369 seconds to complete.
Loop number 73 takes 0.101996898651 seconds to complete.
Loop number 74 takes 0.102014064789 seconds to complete.
Loop number 75 takes 0.10201215744 seconds to complete.
Loop number 76 takes 0.102012872696 seconds to complete.
Loop number 77 takes 0.101979017258 seconds to complete.
Loop number 78 takes 0.101991176605 seconds to complete.
Loop number 79 takes 0.102010011673 seconds to complete.
Loop number 80 takes 0.102005958557 seconds to complete.
Loop number 81 takes 0.102019071579 seconds to complete.
Loop number 82 takes 0.102010965347 seconds to complete.
Loop number 83 takes 0.102006912231 seconds to complete.
Loop number 84 takes 0.101999044418 seconds to complete.
Loop number 85 takes 0.102009057999 seconds to complete.
Loop number 86 takes 0.102022886276 seconds to complete.
Loop number 87 takes 0.10201382637 seconds to complete.
Loop number 88 takes 0.101995944977 seconds to complete.
Loop number 89 takes 0.102017879486 seconds to complete.
Loop number 90 takes 0.102014064789 seconds to complete.
Loop number 91 takes 0.10200214386 seconds to complete.
Loop number 92 takes 0.101999998093 seconds to complete.
Loop number 93 takes 0.102025032043 seconds to complete.
Loop number 94 takes 0.102019071579 seconds to complete.
Loop number 95 takes 0.101996898651 seconds to complete.
Loop number 96 takes 0.102020025253 seconds to complete.
Loop number 97 takes 0.101989984512 seconds to complete.
Loop number 98 takes 0.102004051208 seconds to complete.
Loop number 99 takes 0.102003097534 seconds to complete.
由于您没有在每次迭代结束时同步队列,因此您测量的实质上是排队时间。似乎到第 17 次迭代时,队列缓冲区已满,每个新内核调用都必须等到另一个内核完成并从队列中删除。从那一刻开始,这会为您提供大致正确的时间。当我 运行 它在我的机器上(带 gf755m 的 iMac)时,队列实际上接受了所有 100 次内核迭代,然后我必须等待一分钟才能完成所有这些迭代,给我这样的结果
GPU takes 0.055264949798583984 sec.
Copying takes 52.194445848464966 sec.
如果要测量实际迭代时间,需要在每次迭代结束时同步队列:
queue.finish()
或者,等效地,通过 Reikna API
thr.synchronize()
有几点值得注意。
我在 运行 你的代码中遇到了一些 st运行ge 地址边界错误。这似乎是由于 PyOpenCL 的复杂值处理函数如
cdouble_mul
实际上是宏,当你给它们访问内存的 AST 位时会出现错误,例如cdouble_mul(L_op[gid0 + num0 * gid1], C[gid0 + num0 * gid1])
如果在将值传递给宏之前预加载这些值,错误就会消失:
cdouble_t L_op_val = L_op[gid0 + num0 * gid1]; cdouble_t C_val = C[gid0 + num0 * gid1]; cdouble_mul(L_op_val, C_val);
您不需要从
1j*dt
中创建一个 1 元素数组。您可以将它作为标量传递给内核。也许您已经意识到这一点,但为了以防万一:您将数组形状作为全局大小传递给您的内核,因此在它们内部您实际上是将 NxM 数组作为 MxN 数组进行寻址。只要数组在内存中是连续的,并且您的所有操作都是按元素进行的,就没有关系,但请务必牢记这一点。
如果我使用 Reikna 编写
nonlinear_operator
内核作为gpe2 = reikna.algorithms.PureParallel([ Parameter('U', Annotation(U, 'i')), Parameter('Vmat', Annotation(Vmat, 'i')), Parameter('idt', Annotation(idtmat.reshape(1)[0], 's')), Parameter('U_aft', Annotation(U, 'o'))], """ ${U.ctype} U = ${U.load_same}; ${Vmat.ctype} U_squared = ${norm}(U); ${Vmat.ctype} mag = U_squared + ${Vmat.load_same}; ${U_aft.store_same}( ${mul_cc}( ${exp}(${mul_cr}(${idt}, -mag)), U)); """, render_kwds=dict( norm=reikna.cluda.functions.norm(U.dtype), mul_cr=reikna.cluda.functions.mul(U.dtype, Vmat.dtype), mul_cc=reikna.cluda.functions.mul(U.dtype, U.dtype), exp=reikna.cluda.functions.exp(U.dtype))) gpe2c = gpe2.compile(thr) # in the loop gpe2c(op_dev, Vmat_dev, idtmat[0], op_dev)
它的运行速度是原始内核的两倍。当然,这并不重要,因为大部分时间都花在了FFT上。
同样,您可能已经意识到这一点,但 Reikna 针对非 2 的幂次方问题大小的 FFT 使用 Bluestein 的算法,这基本上使工作量翻了一番。此外,与其在每次迭代中在 GPU 上进行两次 fftshift,不如在循环之前移动一次
L_op
会更快(您也可以使用numpy.fft.fftfreq
生成它,这会为您提供正确的频率顺序离开)。