从 label_image 绘制轮廓
Drawing contours from label_image
我有一个 label_image
数组,我正在导出该数组上对象的 outlines/boundaries。目前,我正在通过获取所有唯一标签、遍历它们然后找到每个对象的轮廓来做到这一点。就像在下面的循环中一样,我在 dict
中填充标签的键和轮廓
的值
import cv2
import pandas as pd
import numpy as np
def extract_borders(label_image):
labels = np.unique(label_image[label_image > 0])
d = {}
for label in labels:
y = label_image == label
y = y * 255
y = y.astype('uint8')
contours, hierarchy = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = np.squeeze(contours)
d[label] = contours.tolist()
df = pd.DataFrame([d]).T
df = df.reset_index()
df.columns = ['label', 'coords']
return df
if __name__ == "__main__":
label_img = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
res = extract_borders(label_img)
print(res)
当 labels
为数千时,这可能是一个真正的瓶颈。请问有更有效的方法吗?也许有一个我不知道的功能......我希望能够将标签分配给相应的轮廓。
上面的代码打印:
label coords
0 1 [[5, 6], [5, 9], [9, 9], [9, 6]]
1 2 [[3, 3], [3, 12], [11, 12], [11, 10], [5, 10],...
2 3 [[12, 5], [11, 6], [10, 6], [10, 9], [11, 9], ...
3 4 [[12, 3], [12, 4], [14, 4], [15, 5], [15, 10],...
我尝试了如下多处理并使用 6 个 CPU 获得了 2.5 倍的加速:
#!/usr/bin/env python3
from multiprocessing import Pool, cpu_count, freeze_support
from functools import partial
import pandas as pd
import numpy as np
import cv2
import os
def worker(labels, label_image):
"""One worker started per CPU. Receives the label image once and a list of the labels to look for."""
pid = os.getpid()
print(f'Worker pid: {pid}, processing {len(labels)} labels')
d = {}
for label in labels:
y = label_image == label
y = y * 255
y = y.astype('uint8')
contours, _ = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = np.squeeze(contours)
d[label] = contours.tolist()
return d
if __name__ == '__main__':
freeze_support()
# Synthesize label image: 2500 labels arranged as 50x50 image, then scaled up to some realistic size
label_image = np.arange(2500, dtype=np.uint16).reshape((50,50))
label_image = cv2.resize(label_image, (4000,4000), interpolation=cv2.INTER_NEAREST)
# Get number of cores and split labels across that many workers
processes = cpu_count()
# UNCOMMENT FOLLOWING LINE FOR SINGLE CPU
# processes = 1
print(f'Using {processes} processes')
labels = np.unique(label_image[label_image > 0])
print(f'Unique labels detected: {labels}')
# Chunk up the labels across the processes
chunks = np.array_split(labels, processes)
# Map the labels across the processes
with Pool(processes=processes) as pool:
result = pool.map(partial(worker, label_image=label_image), chunks)
#print(result)
12 核的示例输出
Using 12 processes
Unique labels detected: [ 1 2 3 ... 2497 2498 2499]
Worker pid: 76884, processing 209 labels
Worker pid: 76886, processing 209 labels
Worker pid: 76885, processing 209 labels
Worker pid: 76888, processing 208 labels
Worker pid: 76887, processing 208 labels
Worker pid: 76889, processing 208 labels
Worker pid: 76890, processing 208 labels
Worker pid: 76893, processing 208 labels
Worker pid: 76891, processing 208 labels
Worker pid: 76892, processing 208 labels
Worker pid: 76895, processing 208 labels
Worker pid: 76894, processing 208 labels
关键字:Python,图像处理,多处理,并行,块。
DIPlib库有一个函数可以为图像中的每个对象提取链码。然而,它确实要求每个对象都是连接的(具有相同标签的像素必须形成一个连接的组件)。使用 Mark 的大型示例图像,计算时间从 154.8 秒减少到 0.781 秒,快了 200 倍。我认为大部分时间都致力于将链码转换为多边形,转换为 numpy 数组,转换为列表,最后转换为 pandas table。大量转化...
需要注意的一件事:dip.GetImageChainCodes
返回的链代码如您所料:它们跟踪每个对象的外部像素。然而,converting these to a polygon does something different: the polygon doesn't link the outer pixels, but goes around them, following the "crack" between the pixels. And it cuts pixel corners doing so. This leads to a polygon that much better describes the actual object, its area is exactly half a pixel smaller than the number of pixels in the object, and its length is much closer to the perimeter of the underlying object (before discretizing it into a set of pixels). This idea comes from Steve Eddins at the MathWorks.
import pandas as pd
import numpy as np
import diplib as dip
import cv2
import time
def extract_borders(label_image):
labels = np.unique(label_image[label_image > 0])
d = {}
for label in labels:
y = label_image == label
y = y * 255
y = y.astype('uint8')
contours, hierarchy = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = np.squeeze(contours)
d[label] = contours.tolist()
df = pd.DataFrame([d]).T
df = df.reset_index()
df.columns = ['label', 'coords']
return df
def extract_borders_dip(label_image):
cc = dip.GetImageChainCodes(label_img) # input must be an unsigned integer type
d = {}
for c in cc:
d[c.objectID] = np.array(c.Polygon()).tolist()
df = pd.DataFrame([d]).T
df = df.reset_index()
df.columns = ['label', 'coords']
return df
if __name__ == "__main__":
label_img = np.arange(2500, dtype=np.uint16).reshape((50,50))
label_img = cv2.resize(label_img, (4000,4000), interpolation=cv2.INTER_NEAREST)
start = time.process_time()
res = extract_borders(label_img)
print('OP code:', time.process_time() - start)
print(res)
start = time.process_time()
res = extract_borders_dip(label_img)
print('DIPlib code: ', time.process_time() - start)
print(res)
我有一个 label_image
数组,我正在导出该数组上对象的 outlines/boundaries。目前,我正在通过获取所有唯一标签、遍历它们然后找到每个对象的轮廓来做到这一点。就像在下面的循环中一样,我在 dict
中填充标签的键和轮廓
import cv2
import pandas as pd
import numpy as np
def extract_borders(label_image):
labels = np.unique(label_image[label_image > 0])
d = {}
for label in labels:
y = label_image == label
y = y * 255
y = y.astype('uint8')
contours, hierarchy = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = np.squeeze(contours)
d[label] = contours.tolist()
df = pd.DataFrame([d]).T
df = df.reset_index()
df.columns = ['label', 'coords']
return df
if __name__ == "__main__":
label_img = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
res = extract_borders(label_img)
print(res)
当 labels
为数千时,这可能是一个真正的瓶颈。请问有更有效的方法吗?也许有一个我不知道的功能......我希望能够将标签分配给相应的轮廓。
上面的代码打印:
label coords
0 1 [[5, 6], [5, 9], [9, 9], [9, 6]]
1 2 [[3, 3], [3, 12], [11, 12], [11, 10], [5, 10],...
2 3 [[12, 5], [11, 6], [10, 6], [10, 9], [11, 9], ...
3 4 [[12, 3], [12, 4], [14, 4], [15, 5], [15, 10],...
我尝试了如下多处理并使用 6 个 CPU 获得了 2.5 倍的加速:
#!/usr/bin/env python3
from multiprocessing import Pool, cpu_count, freeze_support
from functools import partial
import pandas as pd
import numpy as np
import cv2
import os
def worker(labels, label_image):
"""One worker started per CPU. Receives the label image once and a list of the labels to look for."""
pid = os.getpid()
print(f'Worker pid: {pid}, processing {len(labels)} labels')
d = {}
for label in labels:
y = label_image == label
y = y * 255
y = y.astype('uint8')
contours, _ = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = np.squeeze(contours)
d[label] = contours.tolist()
return d
if __name__ == '__main__':
freeze_support()
# Synthesize label image: 2500 labels arranged as 50x50 image, then scaled up to some realistic size
label_image = np.arange(2500, dtype=np.uint16).reshape((50,50))
label_image = cv2.resize(label_image, (4000,4000), interpolation=cv2.INTER_NEAREST)
# Get number of cores and split labels across that many workers
processes = cpu_count()
# UNCOMMENT FOLLOWING LINE FOR SINGLE CPU
# processes = 1
print(f'Using {processes} processes')
labels = np.unique(label_image[label_image > 0])
print(f'Unique labels detected: {labels}')
# Chunk up the labels across the processes
chunks = np.array_split(labels, processes)
# Map the labels across the processes
with Pool(processes=processes) as pool:
result = pool.map(partial(worker, label_image=label_image), chunks)
#print(result)
12 核的示例输出
Using 12 processes
Unique labels detected: [ 1 2 3 ... 2497 2498 2499]
Worker pid: 76884, processing 209 labels
Worker pid: 76886, processing 209 labels
Worker pid: 76885, processing 209 labels
Worker pid: 76888, processing 208 labels
Worker pid: 76887, processing 208 labels
Worker pid: 76889, processing 208 labels
Worker pid: 76890, processing 208 labels
Worker pid: 76893, processing 208 labels
Worker pid: 76891, processing 208 labels
Worker pid: 76892, processing 208 labels
Worker pid: 76895, processing 208 labels
Worker pid: 76894, processing 208 labels
关键字:Python,图像处理,多处理,并行,块。
DIPlib库有一个函数可以为图像中的每个对象提取链码。然而,它确实要求每个对象都是连接的(具有相同标签的像素必须形成一个连接的组件)。使用 Mark 的大型示例图像,计算时间从 154.8 秒减少到 0.781 秒,快了 200 倍。我认为大部分时间都致力于将链码转换为多边形,转换为 numpy 数组,转换为列表,最后转换为 pandas table。大量转化...
需要注意的一件事:dip.GetImageChainCodes
返回的链代码如您所料:它们跟踪每个对象的外部像素。然而,converting these to a polygon does something different: the polygon doesn't link the outer pixels, but goes around them, following the "crack" between the pixels. And it cuts pixel corners doing so. This leads to a polygon that much better describes the actual object, its area is exactly half a pixel smaller than the number of pixels in the object, and its length is much closer to the perimeter of the underlying object (before discretizing it into a set of pixels). This idea comes from Steve Eddins at the MathWorks.
import pandas as pd
import numpy as np
import diplib as dip
import cv2
import time
def extract_borders(label_image):
labels = np.unique(label_image[label_image > 0])
d = {}
for label in labels:
y = label_image == label
y = y * 255
y = y.astype('uint8')
contours, hierarchy = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = np.squeeze(contours)
d[label] = contours.tolist()
df = pd.DataFrame([d]).T
df = df.reset_index()
df.columns = ['label', 'coords']
return df
def extract_borders_dip(label_image):
cc = dip.GetImageChainCodes(label_img) # input must be an unsigned integer type
d = {}
for c in cc:
d[c.objectID] = np.array(c.Polygon()).tolist()
df = pd.DataFrame([d]).T
df = df.reset_index()
df.columns = ['label', 'coords']
return df
if __name__ == "__main__":
label_img = np.arange(2500, dtype=np.uint16).reshape((50,50))
label_img = cv2.resize(label_img, (4000,4000), interpolation=cv2.INTER_NEAREST)
start = time.process_time()
res = extract_borders(label_img)
print('OP code:', time.process_time() - start)
print(res)
start = time.process_time()
res = extract_borders_dip(label_img)
print('DIPlib code: ', time.process_time() - start)
print(res)