优化 python 中的嵌套迭代器

Optimizing nested iterators in python

比如说,fig1 和 fig2 是形状为 (n1, 2) 和 (n2, 2) 的 numpy 二维数组,它们基本上是坐标列表。我要做的是计算两个以点集表示的图形之间的平均 Hausdorff 距离。这是我想出的:

@jit(nopython=True, fastmath=True)   
def dist(fig1, fig2):
    n1 = fig1.shape[0]
    n2 = fig2.shape[0]
    dist_matrix = np.zeros((n1, n2))
    min1 = np.full(n1, np.inf)
    min2 = np.full(n2, np.inf)

    for i in range(n1):
        for j in range(n2):
            dist_matrix[i, j] = np.sqrt((fig1[i, 0] - fig2[j, 0])**2 + (fig1[i, 1] - fig2[j, 1])**2)
            min1[i] = np.minimum(min1[i], dist_matrix[i, j])
            min2[j] = np.minimum(min2[j], dist_matrix[i, j])

    d1 = np.mean(min1)
    d2 = np.mean(min2)
    h_dist = np.maximum(d1, d2)
    return h_dist

效果很好,但我想避免这些嵌套的for,希望通过向量化。有人可以建议任何可能的变体吗?

如果需要我可以提供一些数字生成器。

两种方法

  1. 使用
  2. Numba 已发布方法

代码

# Method 1--KDTree
import numpy as np
from sklearn.neighbors import KDTree

def dist_kdtree(fig1, fig2):
    ' Distance using KDTree '

    # KDTree data structures for fig1 & fig2
    # which provides for nearest neighbor queries
    fig1_tree = KDTree(fig1)
    fig2_tree = KDTree(fig2)

    # Nearest neighbors of each point of fig1 in fig2
    nearest_dist1, nearest_ind1 = fig2_tree.query(fig1, k=1)

    # Nearest neighbors of each point of fig2 in fig1
    nearest_dist2, nearest_ind2 = fig1_tree.query(fig2, k=1)

    # Mean of distancess
    d1 = np.mean(nearest_dist1)
    d2 = np.mean(nearest_dist2)
    return np.maximum(d1, d2)

# Method 2--Numba on posted approach
import numpy as np
from numba import jit

@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def dist_numba(fig1, fig2):
    n1 = fig1.shape[0]
    n2 = fig2.shape[0]
    dist_matrix = np.zeros((n1, n2))
    min1 = np.full(n1, np.inf)
    min2 = np.full(n2, np.inf)

    for i in range(n1):
        for j in range(n2):
            dist_matrix[i, j] = np.sqrt((fig1[i, 0] - fig2[j, 0])**2 + (fig1[i, 1] - fig2[j, 1])**2)
            min1[i] = np.minimum(min1[i], dist_matrix[i, j])
            min2[j] = np.minimum(min2[j], dist_matrix[i, j])

    d1 = np.mean(min1)
    d2 = np.mean(min2)
    h_dist = np.maximum(d1, d2)
    return h_dist

# Baseline--Original version
import numpy as np

def dist(fig1, fig2):
    n1 = fig1.shape[0]
    n2 = fig2.shape[0]
    dist_matrix = np.zeros((n1, n2))
    min1 = np.full(n1, np.inf)
    min2 = np.full(n2, np.inf)

    for i in range(n1):
        for j in range(n2):
            dist_matrix[i, j] = np.sqrt((fig1[i, 0] - fig2[j, 0])**2 + (fig1[i, 1] - fig2[j, 1])**2)
            min1[i] = np.minimum(min1[i], dist_matrix[i, j])
            min2[j] = np.minimum(min2[j], dist_matrix[i, j])

    d1 = np.mean(min1)
    d2 = np.mean(min2)
    h_dist = np.maximum(d1, d2)
    return h_dist

性能测试

# Create two random 2D arrays
np.random.seed(0)
fig1 = np.random.random((100, 2))
fig2 = np.random.random((500, 2))

# Time original
%timeit dist(fig1, fig2)
Out: 815 ms ± 49.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)

# Time KDTree
%timeit dist_kdtree(fig1, fig2)
Out: 1.66 ms ± 88.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)

# Time Numba
%timeit dist_numba(fig1, fig2)
Out: 770 µs ± 11.2 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)

总结

  • KDTree 提供了比原来快 490 倍的速度(1.6 毫秒对 815 毫秒)
  • Numba 比原来的速度提高了 1K(770 微秒对 815 毫秒)