PyTorch grid_sample 到 CoreML 的转换(通过 coremltools)

PyTorch's grid_sample conversion to CoreML (via coremltools)

torch.nn.functional.grid_sample(来源 here, click on docs for documentation) is currently unsupported operation by CoreML (and their conversion utilities library: coremltools)。

我正在寻找的是一种将下面显示的层从 PyTorch 的 torchscript(文档 here)导出到 CoreML(使用通过 [=143 创建的自定义 op =] 或通过高效的 PyTorch 重写 grid_sample).

有关详细信息和入门提示,请参阅“提示”部分

最小可验证示例

import coremltools as ct
import torch


class GridSample(torch.nn.Module):
    def forward(self, inputs, grid):
        # Rest could be the default behaviour, e.g. bilinear
        return torch.nn.functional.grid_sample(inputs, grid, align_corners=True)


# Image could also have more in_channels, different dimension etc.,
# for example (2, 32, 64, 64)
image = torch.randn(2, 3, 32, 32)  # (batch, in_channels, width, height)
grid = torch.randint(low=-1, high=2, size=(2, 64, 64, 2)).float()

layer = GridSample()
# You could use `torch.jit.script` if preferable
scripted = torch.jit.trace(layer, (image, grid))

# Sanity check
print(scripted(image, grid).shape)


# Error during conversion
coreml_layer = ct.converters.convert(
    scripted,
    source="pytorch",
    inputs=[
        ct.TensorType(name="image", shape=image.shape),
        ct.TensorType(name="grid", shape=grid.shape),
    ],
)

这会引发以下错误:

Traceback (most recent call last):
  File "/home/REDACTED/Downloads/sample.py", line 23, in <module>
    coreml_layer = ct.converters.convert(
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/_converters_entry.py", line 175, in convert
    mlmodel = mil_convert(
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 128, in mil_convert
    proto = mil_convert_to_proto(, convert_from, convert_to,
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 171, in mil_convert_to_proto
    prog = frontend_converter(, **kwargs)
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 85, in __call__
    return load(*args, **kwargs)
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 81, in load
    raise e
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 73, in load
    prog = converter.convert()
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 227, in convert
    convert_nodes(self.context, self.graph)
  File "/home/REDACTED/.conda/envs/REDACTED/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 54, in convert_nodes
    raise RuntimeError(
RuntimeError: PyTorch convert function for op 'grid_sampler' not implemented.

依赖关系

Python (conda):

您也可以使用 nightly/master 构建(至少在写作当天:2021-03-20

提示

我目前看到的分为两种可能的解决方案:

仅限 PyTorch

从头开始重写torch.nn.functional.grid_sample

优点:

缺点:

Swift & CoreML

注册负责运行grid_sample的自定义图层。 CPU 仅实现就可以了(尽管使用 Apple 的 Metal 进行 GPU 加速会很棒)。

由于我不喜欢 Swift,我收集了一些可能对您有帮助的资源:

优点:

缺点:

好吧,这不是确切的答案,而是一些研究。 grid_sample 本质上是稀疏矩阵运算,思路是尽量把它做成稠密的。下面的代码演示了如何完成。它可能很慢,并且需要 grid 是静态的以从要转换的模型中消除 grid_sample,但有点管用。

目标是以线性形式进行变换。在这里,为了得到密集矩阵,我们将单位对角线输入 'grid_sample',结果是我们正在寻找的矩阵保持变换。要进行命名变换,请将展平图像乘以该矩阵。 正如您在此处看到的 batch=1,必须为每个 grid 独立地进行转换。

您的代码:

in_sz  = 2;    out_sz = 4;    batch  = 1;    ch     = 3

class GridSample(torch.nn.Module):
    def forward(self, inputs, grid):
        # Rest could be the default behaviour, e.g. bilinear
        return torch.nn.functional.grid_sample(inputs, grid, align_corners=True)

image = torch.randn( batch, ch, in_sz, in_sz)  # (batch, in_channels, width, height)
grid = torch.randint(low=-1, high=2, size=( batch, out_sz, out_sz, 2)).float()

layer = GridSample()
scripted = torch.jit.trace(layer, (image, grid))
print(scripted(image, grid))

输出:

tensor([[[[-0.8226, -0.4457, -0.3382, -0.0795],
          [-0.4457, -0.0052, -0.8226, -0.6341],
          [-0.4457, -0.8226, -0.4457, -0.6341],
          [-0.4510, -0.3382, -0.4457, -0.0424]],

         [[-1.0090, -1.6029, -1.3813, -0.1212],
          [-1.6029, -2.7920, -1.0090, -1.3060],
          [-1.6029, -1.0090, -1.6029, -1.3060],
          [-0.5651, -1.3813, -1.6029, -1.4566]],

         [[ 0.1482,  0.7313,  0.8916,  1.8723],
          [ 0.7313,  0.8144,  0.1482,  0.4398],
          [ 0.7313,  0.1482,  0.7313,  0.4398],
          [ 1.0103,  0.8916,  0.7313,  1.3434]]]])

转化率:

oness  = torch.ones( in_sz*in_sz )
diagg  = torch.diag( oness ).reshape( 1, in_sz*in_sz, in_sz, in_sz )
denser = torch.nn.functional.grid_sample( diagg, grid, align_corners=True).reshape( in_sz*in_sz, out_sz*out_sz ).transpose(0,1)
print (denser.shape)
print (image.shape)
image_flat = image.reshape( batch, ch, in_sz*in_sz )
print (image_flat.shape)
print( torch.nn.functional.linear( image_flat, denser ).reshape( batch, ch, out_sz, out_sz ) )

输出:

torch.Size([16, 4])
torch.Size([1, 3, 2, 2])
torch.Size([1, 3, 4])
tensor([[[[-0.8226, -0.4457, -0.3382, -0.0795],
          [-0.4457, -0.0052, -0.8226, -0.6341],
          [-0.4457, -0.8226, -0.4457, -0.6341],
          [-0.4510, -0.3382, -0.4457, -0.0424]],

         [[-1.0090, -1.6029, -1.3813, -0.1212],
          [-1.6029, -2.7920, -1.0090, -1.3060],
          [-1.6029, -1.0090, -1.6029, -1.3060],
          [-0.5651, -1.3813, -1.6029, -1.4566]],

         [[ 0.1482,  0.7313,  0.8916,  1.8723],
          [ 0.7313,  0.8144,  0.1482,  0.4398],
          [ 0.7313,  0.1482,  0.7313,  0.4398],
          [ 1.0103,  0.8916,  0.7313,  1.3434]]]])
         

嗯,可能不是很有效,我希望至少能逗乐一下。

显然,一些善良的人看到了我们的挣扎,并使用 MIL(CoreML 的中间表示语言)提供了自定义操作。

Blog post where I found the solution and gist with grid sample

我不确定为什么 OP 没有在此处 post,但如果您想为您的解决方案获取一些 SO 点,请务必回复评论!

下面是完整的操作转换代码:

from coremltools.converters.mil import register_torch_op, register_op
from coremltools.converters.mil.mil.ops.defs._op_reqs import *

# Custom operator for `torch.nn.functional.grid_sample`
@register_op(doc_str="Custom Grid Sampler", is_custom_op=True)
class custom_grid_sample(Operation):
    input_spec = InputSpec(
        x = TensorInputType(),
        grid = TensorInputType(),
        mode = StringInputType(const=True, optional=True),
        padding_mode = StringInputType(const=True, optional=True),
        align_corners = BoolInputType(const=True, optional=True)
    )

    bindings = {
        "class_name": "CustomGridSampler",
        "input_order": ["x", "grid"],
        "parameters": ["mode", "padding_mode", "align_corners"],
        "description": "Custom Grid Sampler"
    }

    def __init__(self, **kwargs):
        super(custom_grid_sample, self).__init__(**kwargs)

    def type_inference(self):
        x_type = self.x.dtype
        x_shape = self.x.shape

        grid_type = self.grid.dtype
        grid_shape = self.grid.shape

        assert len(x_shape) == len(grid_shape) == 4
        assert grid_shape[-1] == 2

        shape = list(x_shape)
        shape[-2] = grid_shape[1]
        shape[-1] = grid_shape[2]
        return types.tensor(x_type, tuple(shape))


@register_torch_op
def grid_sampler(context, node):
    inputs = _get_inputs(context, node)
    x = inputs[0]
    grid = inputs[1]
    mode = node.attr.get("mode", "bilinear")
    padding_mode = node.attr.get("padding_mode", "zeros")
    align_corners = node.attr.get("align_corners", False)
    x = mb.custom_grid_sample(
        x=x,
        grid=grid,
        mode=mode,
        padding_mode=padding_mode,
        align_corners=align_corners,
        name=node.name
    )
    context.add(x)