如何将关键字参数传递给预转发挂钩使用的转发?
How do I pass a keyword argument to the forward used by a pre-forward hook?
给定一个手电筒 nn.Module
带有一个前向挂钩,例如
import torch
import torch.nn as nn
class NeoEmbeddings(nn.Embedding):
def __init__(self, num_embeddings:int, embedding_dim:int, padding_idx=-1):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.register_forward_pre_hook(self.neo_genesis)
@staticmethod
def neo_genesis(self, input, higgs_bosson=0):
if higgs_bosson:
input = input + higgs_bosson
return input
可以让输入张量在进入实际 forward()
函数之前经过一些操作,例如
>>> x = NeoEmbeddings(10, 5, 1)
>>> x.forward(torch.tensor([0,2,5,8]))
tensor([[-1.6449, 0.5832, -0.0165, -1.3329, 0.6878],
[-0.3262, 0.5844, 0.6917, 0.1268, 2.1363],
[ 1.0772, 0.1748, -0.7131, 0.7405, 1.5733],
[ 0.7651, 0.4619, 0.4388, -0.2752, -0.3018]],
grad_fn=<EmbeddingBackward>)
>>> print(x._forward_pre_hooks)
OrderedDict([(25, <function NeoEmbeddings.neo_genesis at 0x1208d10d0>)])
我们如何传递预转发挂钩需要但不被默认 forward()
函数接受的参数(*args
或 **kwargs
)?
没有 modification/overriding forward()
函数,这是不可能的:
>>> x = NeoEmbeddings(10, 5, 1)
>>> x.forward(torch.tensor([0,2,5,8]), higgs_bosson=2)
----------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-102-8705a40a3cc2> in <module>
1 x = NeoEmbeddings(10, 5, 1)
----> 2 x.forward(torch.tensor([0,2,5,8]), higgs_bosson=2)
TypeError: forward() got an unexpected keyword argument 'higgs_bosson'
由于 forward pre-hook 根据定义仅使用张量调用,因此关键字参数在这里没有多大意义。更有意义的是使用实例属性,例如:
def neo_genesis(self, input):
if self.higgs_bosson:
input = input + self.higgs_bosson
return input
然后您可以根据需要切换该属性。您也可以为此使用上下文管理器:
from contextlib import contextmanager
@contextmanager
def HiggsBoson(module):
module.higgs_boson = 1
yield
module.higgs_boson = 0
with HiggsBoson(x):
x.forward(...)
如果您已经拥有该函数并且确实需要更改该参数,您仍然可以替换该函数的 __defaults__
属性:
x.neo_genesis.__defaults__ = (1,) # this corresponds to `higgs_boson` parameter
x.forward(...)
x.neo_genesis.__defaults__ = (0,) # reset to default
Torchscript 不兼容(截至 1.2.0
)
首先,你的例子torch.nn.Module
有一些小错误(可能是偶然的)。
其次,您可以将 任何东西 传递给转发,register_forward_pre_hook
将只获得将传递给您的 torch.nn.Module
的参数(无论是图层还是模型或任何东西)其他。如果不修改 forward
调用,您确实 无法做到 ,但您为什么要避免这种情况?您可以简单地将参数转发给基本函数,如下所示:
import torch
class NeoEmbeddings(torch.nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx=-1):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.register_forward_pre_hook(NeoEmbeddings.neo_genesis)
# First argument should be named something like module, as that's what
# you are registering this hook to
@staticmethod
def neo_genesis(module, inputs): # No need for self as first argument
net_input, higgs_bosson = inputs # Simply unpack tuple here
return net_input
def forward(self, inputs, higgs_bosson):
# Do whatever you want here with both arguments, you can ignore
# higgs_bosson if it's only needed in the hook as done here
return super().forward(inputs)
if __name__ == "__main__":
x = NeoEmbeddings(10, 5, 1)
# You should call () instead of forward so the hooks register appropriately
print(x(torch.tensor([0, 2, 5, 8]), 1))
你不能以更简洁的方式做到这一点,但限制是基础的 class forward
方法,而不是钩子本身(而且我不希望它更简洁因为它会变得不可读 IMO)。
兼容 Torchscript
如果你想使用 torchscript(在 1.2.0
上测试),你可以使用组合而不是继承。您只需更改两行,您的代码可能如下所示:
import torch
# Inherit from Module and register embedding as submodule
class NeoEmbeddings(torch.nn.Module):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx=-1):
super().__init__()
# Just use it as a container inside your own class
self._embedding = torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx)
self.register_forward_pre_hook(NeoEmbeddings.neo_genesis)
@staticmethod
def neo_genesis(module, inputs):
net_input, higgs_bosson = inputs
return net_input
def forward(self, inputs: torch.Tensor, higgs_bosson: torch.Tensor):
return self._embedding(inputs)
if __name__ == "__main__":
x = torch.jit.script(NeoEmbeddings(10, 5, 1))
# All arguments must be tensors in torchscript
print(x(torch.tensor([0, 2, 5, 8]), torch.tensor([1])))
给定一个手电筒 nn.Module
带有一个前向挂钩,例如
import torch
import torch.nn as nn
class NeoEmbeddings(nn.Embedding):
def __init__(self, num_embeddings:int, embedding_dim:int, padding_idx=-1):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.register_forward_pre_hook(self.neo_genesis)
@staticmethod
def neo_genesis(self, input, higgs_bosson=0):
if higgs_bosson:
input = input + higgs_bosson
return input
可以让输入张量在进入实际 forward()
函数之前经过一些操作,例如
>>> x = NeoEmbeddings(10, 5, 1)
>>> x.forward(torch.tensor([0,2,5,8]))
tensor([[-1.6449, 0.5832, -0.0165, -1.3329, 0.6878],
[-0.3262, 0.5844, 0.6917, 0.1268, 2.1363],
[ 1.0772, 0.1748, -0.7131, 0.7405, 1.5733],
[ 0.7651, 0.4619, 0.4388, -0.2752, -0.3018]],
grad_fn=<EmbeddingBackward>)
>>> print(x._forward_pre_hooks)
OrderedDict([(25, <function NeoEmbeddings.neo_genesis at 0x1208d10d0>)])
我们如何传递预转发挂钩需要但不被默认 forward()
函数接受的参数(*args
或 **kwargs
)?
没有 modification/overriding forward()
函数,这是不可能的:
>>> x = NeoEmbeddings(10, 5, 1)
>>> x.forward(torch.tensor([0,2,5,8]), higgs_bosson=2)
----------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-102-8705a40a3cc2> in <module>
1 x = NeoEmbeddings(10, 5, 1)
----> 2 x.forward(torch.tensor([0,2,5,8]), higgs_bosson=2)
TypeError: forward() got an unexpected keyword argument 'higgs_bosson'
由于 forward pre-hook 根据定义仅使用张量调用,因此关键字参数在这里没有多大意义。更有意义的是使用实例属性,例如:
def neo_genesis(self, input):
if self.higgs_bosson:
input = input + self.higgs_bosson
return input
然后您可以根据需要切换该属性。您也可以为此使用上下文管理器:
from contextlib import contextmanager
@contextmanager
def HiggsBoson(module):
module.higgs_boson = 1
yield
module.higgs_boson = 0
with HiggsBoson(x):
x.forward(...)
如果您已经拥有该函数并且确实需要更改该参数,您仍然可以替换该函数的 __defaults__
属性:
x.neo_genesis.__defaults__ = (1,) # this corresponds to `higgs_boson` parameter
x.forward(...)
x.neo_genesis.__defaults__ = (0,) # reset to default
Torchscript 不兼容(截至 1.2.0
)
首先,你的例子torch.nn.Module
有一些小错误(可能是偶然的)。
其次,您可以将 任何东西 传递给转发,register_forward_pre_hook
将只获得将传递给您的 torch.nn.Module
的参数(无论是图层还是模型或任何东西)其他。如果不修改 forward
调用,您确实 无法做到 ,但您为什么要避免这种情况?您可以简单地将参数转发给基本函数,如下所示:
import torch
class NeoEmbeddings(torch.nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx=-1):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.register_forward_pre_hook(NeoEmbeddings.neo_genesis)
# First argument should be named something like module, as that's what
# you are registering this hook to
@staticmethod
def neo_genesis(module, inputs): # No need for self as first argument
net_input, higgs_bosson = inputs # Simply unpack tuple here
return net_input
def forward(self, inputs, higgs_bosson):
# Do whatever you want here with both arguments, you can ignore
# higgs_bosson if it's only needed in the hook as done here
return super().forward(inputs)
if __name__ == "__main__":
x = NeoEmbeddings(10, 5, 1)
# You should call () instead of forward so the hooks register appropriately
print(x(torch.tensor([0, 2, 5, 8]), 1))
你不能以更简洁的方式做到这一点,但限制是基础的 class forward
方法,而不是钩子本身(而且我不希望它更简洁因为它会变得不可读 IMO)。
兼容 Torchscript
如果你想使用 torchscript(在 1.2.0
上测试),你可以使用组合而不是继承。您只需更改两行,您的代码可能如下所示:
import torch
# Inherit from Module and register embedding as submodule
class NeoEmbeddings(torch.nn.Module):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx=-1):
super().__init__()
# Just use it as a container inside your own class
self._embedding = torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx)
self.register_forward_pre_hook(NeoEmbeddings.neo_genesis)
@staticmethod
def neo_genesis(module, inputs):
net_input, higgs_bosson = inputs
return net_input
def forward(self, inputs: torch.Tensor, higgs_bosson: torch.Tensor):
return self._embedding(inputs)
if __name__ == "__main__":
x = torch.jit.script(NeoEmbeddings(10, 5, 1))
# All arguments must be tensors in torchscript
print(x(torch.tensor([0, 2, 5, 8]), torch.tensor([1])))