PyTorch 中可学习的标量权重

Learnable scalar weight in PyTorch

我有两个并行的神经网络 运行。每个都给出一个相同大小的特征图,比如 Nx1。现在我想要像这样 w1 * embed1 + w2 * embed2 的这些嵌入的加权平均值。我试过这些 1 2。但是权重没有更新。任何帮助,将不胜感激。这是我正在尝试的方法:

class LinearWeightedAvg(nn.Module):
      def __init__(self, n_inputs):
        super(LinearWeightedAvg, self).__init__()
        self.weight1 = Variable(torch.randn(1), requires_grad=True).cuda()
        self.weight2 = Variable(torch.randn(1), requires_grad=True).cuda()

    def forward(self, inp_embed):
        return self.weight1 * inp_embed[0] + self.weight2 * inp_embed[1]

class EmbedBranch(nn.Module):
    def __init__(self, feat_dim, embedding_dim):
        super(EmbedBranch, self).__init__()
        fc_layer1 = fc_layer
    def forward(self, x):
        x = self.fc_layer1(x)
        return x

class EmbeddingNetwork(nn.Module):
    def __init__(self, args, N):
        super(EmbeddingNetwork, self).__init__()
        embedding_dim = N

        self.embed1 = EmbedBranch(N, N)
        self.embed2 = EmbedBranch(N, N)
        self.comb_branch = LinearWeightedAvg(metric_dim)
        
        self.args = args
        if args.cuda:
            self.cuda()

    def forward(self, emb1, emb2):
        embeds1 = self.text_branch(emb1)
        embeds2 = self.image_branch(emb2)
        combined = self.comb_branch([embeds1, embeds2])
        return combined

    def train_forward(self, embed1, embed2):

        combined = self(embed1, embed2)

embeds = model.train_forward(embed1, embed2)
loss = loss_func(embeds, labels)
running_loss.update(loss.data.item())
optimizer.zero_grad()
loss.backward()

另外我希望权重在0-1范围内。

谢谢,

您应该使用 self.weightx = torch.nn.Parameter(your_inital_tensor) 将张量注册为模型的可学习参数。