如何使 numba @jit 使用所有 cpu 个核心(并行化 numba @jit)
How to make numba @jit use all cpu cores (parallelize numba @jit)
我正在使用 numbas @jit
装饰器在 python 中添加两个 numpy 数组。如果我使用 @jit
与 python
相比,性能是如此之高。
然而,即使我传入 @numba.jit(nopython = True, parallel = True, nogil = True)
,它 也没有利用所有 CPU 个核心 。
有什么方法可以利用所有 CPU 个内核和 numba @jit
。
这是我的代码:
import time
import numpy as np
import numba
SIZE = 2147483648 * 6
a = np.full(SIZE, 1, dtype = np.int32)
b = np.full(SIZE, 1, dtype = np.int32)
c = np.ndarray(SIZE, dtype = np.int32)
@numba.jit(nopython = True, parallel = True, nogil = True)
def add(a, b, c):
for i in range(SIZE):
c[i] = a[i] + b[i]
start = time.time()
add(a, b, c)
end = time.time()
print(end - start)
您可以将 parallel=True
传递给任何 numba jitted 函数,但这并不意味着它总是利用所有内核。您必须了解 numba 使用一些启发式方法使代码并行执行,有时这些启发式方法根本找不到代码中的任何内容来并行化。当前有一个 pull request,因此如果无法做到 "parallel",它会发出警告。所以它更像是一个 "please make it execute in parallel if possible" 参数而不是 "enforce parallel execution".
但是,如果您确实知道可以并行化代码,则始终可以手动使用线程或进程。只需调整 example of using multi-threading from the numba docs:
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import math
import threading
from timeit import repeat
import numpy as np
from numba import jit
nthreads = 4
size = 10**7 # CHANGED
# CHANGED
def func_np(a, b):
"""
Control function using Numpy.
"""
return a + b
# CHANGED
@jit('void(double[:], double[:], double[:])', nopython=True, nogil=True)
def inner_func_nb(result, a, b):
"""
Function under test.
"""
for i in range(len(result)):
result[i] = a[i] + b[i]
def timefunc(correct, s, func, *args, **kwargs):
"""
Benchmark *func* and print out its runtime.
"""
print(s.ljust(20), end=" ")
# Make sure the function is compiled before we start the benchmark
res = func(*args, **kwargs)
if correct is not None:
assert np.allclose(res, correct), (res, correct)
# time it
print('{:>5.0f} ms'.format(min(repeat(lambda: func(*args, **kwargs),
number=5, repeat=2)) * 1000))
return res
def make_singlethread(inner_func):
"""
Run the given function inside a single thread.
"""
def func(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
inner_func(result, *args)
return result
return func
def make_multithread(inner_func, numthreads):
"""
Run the given function inside *numthreads* threads, splitting its
arguments into equal-sized chunks.
"""
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + numthreads - 1) // numthreads
# Create argument tuples for each input chunk
chunks = [[arg[i * chunklen:(i + 1) * chunklen] for arg in args]
for i in range(numthreads)]
# Spawn one thread per chunk
threads = [threading.Thread(target=inner_func, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
return func_mt
func_nb = make_singlethread(inner_func_nb)
func_nb_mt = make_multithread(inner_func_nb, nthreads)
a = np.random.rand(size)
b = np.random.rand(size)
correct = timefunc(None, "numpy (1 thread)", func_np, a, b)
timefunc(correct, "numba (1 thread)", func_nb, a, b)
timefunc(correct, "numba (%d threads)" % nthreads, func_nb_mt, a, b)
我突出显示了我更改的部分,其他所有内容都是从示例中逐字复制的。这利用了我机器上的所有内核(4 核机器因此 4 线程)但没有显示出显着的加速:
numpy (1 thread) 539 ms
numba (1 thread) 536 ms
numba (4 threads) 442 ms
在这种情况下,多线程缺乏(很多)加速是因为加法是一种带宽受限的操作。这意味着从数组中加载元素并将结果放入结果数组所花的时间比实际添加要多得多。
在这些情况下,由于并行执行,您甚至可能会看到速度变慢!
只有当函数更复杂并且实际操作与加载和存储数组元素相比花费大量时间时,您才会看到并行执行的巨大改进。 numba 文档中的示例就是这样一个:
def func_np(a, b):
"""
Control function using Numpy.
"""
return np.exp(2.1 * a + 3.2 * b)
@jit('void(double[:], double[:], double[:])', nopython=True, nogil=True)
def inner_func_nb(result, a, b):
"""
Function under test.
"""
for i in range(len(result)):
result[i] = math.exp(2.1 * a[i] + 3.2 * b[i])
这实际上(几乎)随线程数缩放,因为两次乘法、一次加法和一次调用 math.exp
比加载和存储结果慢得多:
func_nb = make_singlethread(inner_func_nb)
func_nb_mt2 = make_multithread(inner_func_nb, 2)
func_nb_mt3 = make_multithread(inner_func_nb, 3)
func_nb_mt4 = make_multithread(inner_func_nb, 4)
a = np.random.rand(size)
b = np.random.rand(size)
correct = timefunc(None, "numpy (1 thread)", func_np, a, b)
timefunc(correct, "numba (1 thread)", func_nb, a, b)
timefunc(correct, "numba (2 threads)", func_nb_mt2, a, b)
timefunc(correct, "numba (3 threads)", func_nb_mt3, a, b)
timefunc(correct, "numba (4 threads)", func_nb_mt4, a, b)
结果:
numpy (1 thread) 3422 ms
numba (1 thread) 2959 ms
numba (2 threads) 1555 ms
numba (3 threads) 1080 ms
numba (4 threads) 797 ms
为了完整起见,在 2018 年(numba v 0.39)你可以这样做
from numba import prange
并在您的原始函数定义中将 range
替换为 prange
,就是这样。
这立即使 CPU 利用率达到 100%,在我的例子中,运行时间从 2.9 秒提高到 1.7 秒(对于 SIZE = 2147483648 * 1,在具有 16 个内核和 32 个线程的机器上)。
更复杂的内核通常可以通过传入 fastmath=True
.
来加速
我正在使用 numbas @jit
装饰器在 python 中添加两个 numpy 数组。如果我使用 @jit
与 python
相比,性能是如此之高。
然而,即使我传入 @numba.jit(nopython = True, parallel = True, nogil = True)
,它 也没有利用所有 CPU 个核心 。
有什么方法可以利用所有 CPU 个内核和 numba @jit
。
这是我的代码:
import time
import numpy as np
import numba
SIZE = 2147483648 * 6
a = np.full(SIZE, 1, dtype = np.int32)
b = np.full(SIZE, 1, dtype = np.int32)
c = np.ndarray(SIZE, dtype = np.int32)
@numba.jit(nopython = True, parallel = True, nogil = True)
def add(a, b, c):
for i in range(SIZE):
c[i] = a[i] + b[i]
start = time.time()
add(a, b, c)
end = time.time()
print(end - start)
您可以将 parallel=True
传递给任何 numba jitted 函数,但这并不意味着它总是利用所有内核。您必须了解 numba 使用一些启发式方法使代码并行执行,有时这些启发式方法根本找不到代码中的任何内容来并行化。当前有一个 pull request,因此如果无法做到 "parallel",它会发出警告。所以它更像是一个 "please make it execute in parallel if possible" 参数而不是 "enforce parallel execution".
但是,如果您确实知道可以并行化代码,则始终可以手动使用线程或进程。只需调整 example of using multi-threading from the numba docs:
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import math
import threading
from timeit import repeat
import numpy as np
from numba import jit
nthreads = 4
size = 10**7 # CHANGED
# CHANGED
def func_np(a, b):
"""
Control function using Numpy.
"""
return a + b
# CHANGED
@jit('void(double[:], double[:], double[:])', nopython=True, nogil=True)
def inner_func_nb(result, a, b):
"""
Function under test.
"""
for i in range(len(result)):
result[i] = a[i] + b[i]
def timefunc(correct, s, func, *args, **kwargs):
"""
Benchmark *func* and print out its runtime.
"""
print(s.ljust(20), end=" ")
# Make sure the function is compiled before we start the benchmark
res = func(*args, **kwargs)
if correct is not None:
assert np.allclose(res, correct), (res, correct)
# time it
print('{:>5.0f} ms'.format(min(repeat(lambda: func(*args, **kwargs),
number=5, repeat=2)) * 1000))
return res
def make_singlethread(inner_func):
"""
Run the given function inside a single thread.
"""
def func(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
inner_func(result, *args)
return result
return func
def make_multithread(inner_func, numthreads):
"""
Run the given function inside *numthreads* threads, splitting its
arguments into equal-sized chunks.
"""
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + numthreads - 1) // numthreads
# Create argument tuples for each input chunk
chunks = [[arg[i * chunklen:(i + 1) * chunklen] for arg in args]
for i in range(numthreads)]
# Spawn one thread per chunk
threads = [threading.Thread(target=inner_func, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
return func_mt
func_nb = make_singlethread(inner_func_nb)
func_nb_mt = make_multithread(inner_func_nb, nthreads)
a = np.random.rand(size)
b = np.random.rand(size)
correct = timefunc(None, "numpy (1 thread)", func_np, a, b)
timefunc(correct, "numba (1 thread)", func_nb, a, b)
timefunc(correct, "numba (%d threads)" % nthreads, func_nb_mt, a, b)
我突出显示了我更改的部分,其他所有内容都是从示例中逐字复制的。这利用了我机器上的所有内核(4 核机器因此 4 线程)但没有显示出显着的加速:
numpy (1 thread) 539 ms
numba (1 thread) 536 ms
numba (4 threads) 442 ms
在这种情况下,多线程缺乏(很多)加速是因为加法是一种带宽受限的操作。这意味着从数组中加载元素并将结果放入结果数组所花的时间比实际添加要多得多。
在这些情况下,由于并行执行,您甚至可能会看到速度变慢!
只有当函数更复杂并且实际操作与加载和存储数组元素相比花费大量时间时,您才会看到并行执行的巨大改进。 numba 文档中的示例就是这样一个:
def func_np(a, b):
"""
Control function using Numpy.
"""
return np.exp(2.1 * a + 3.2 * b)
@jit('void(double[:], double[:], double[:])', nopython=True, nogil=True)
def inner_func_nb(result, a, b):
"""
Function under test.
"""
for i in range(len(result)):
result[i] = math.exp(2.1 * a[i] + 3.2 * b[i])
这实际上(几乎)随线程数缩放,因为两次乘法、一次加法和一次调用 math.exp
比加载和存储结果慢得多:
func_nb = make_singlethread(inner_func_nb)
func_nb_mt2 = make_multithread(inner_func_nb, 2)
func_nb_mt3 = make_multithread(inner_func_nb, 3)
func_nb_mt4 = make_multithread(inner_func_nb, 4)
a = np.random.rand(size)
b = np.random.rand(size)
correct = timefunc(None, "numpy (1 thread)", func_np, a, b)
timefunc(correct, "numba (1 thread)", func_nb, a, b)
timefunc(correct, "numba (2 threads)", func_nb_mt2, a, b)
timefunc(correct, "numba (3 threads)", func_nb_mt3, a, b)
timefunc(correct, "numba (4 threads)", func_nb_mt4, a, b)
结果:
numpy (1 thread) 3422 ms
numba (1 thread) 2959 ms
numba (2 threads) 1555 ms
numba (3 threads) 1080 ms
numba (4 threads) 797 ms
为了完整起见,在 2018 年(numba v 0.39)你可以这样做
from numba import prange
并在您的原始函数定义中将 range
替换为 prange
,就是这样。
这立即使 CPU 利用率达到 100%,在我的例子中,运行时间从 2.9 秒提高到 1.7 秒(对于 SIZE = 2147483648 * 1,在具有 16 个内核和 32 个线程的机器上)。
更复杂的内核通常可以通过传入 fastmath=True
.