如何使用 Pytorch 实现将 Alexnet 中的附加输入数据与最后一个丢失层的输出连接起来?

How can I concatenate an additional input data in Alexnet with the output of the last dropout layer using Pytorch implementation?

这里是实现架构

class AlexNet(nn.Module):
def __init__(self, num_classes=10):
    super(AlexNet, self).__init__()
    #1
    self.features= nn.Sequential(
    nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),
    nn.ReLU(inplace=True),
    nn.MaxPool2d(kernel_size=3, stride=2),
    #2
    nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),
    nn.ReLU(inplace=True),
    nn.MaxPool2d(kernel_size=3, stride=2),
    #3
    nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),
    nn.ReLU(inplace=True),
    #4
    nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),
    nn.ReLU(inplace=True),
    #5
    nn.Conv2d(384, 256, kernel_size=5, stride=1, padding=2),
    nn.ReLU(inplace=True),
    nn.MaxPool2d(kernel_size=3, stride=2),
    )
    self.avgpool= nn.AvgPool2d(6)
    self.classifier= nn.Sequential(
        nn.Dropout(), nn.Linear(256*6*6, 4096), #128*2*2, 1024
    nn.ReLU(inplace=True), nn.Dropout(),torch.cat((nn.Dropout(),PIs_features)),
    nn.Linear(4096, num_classes))
    
def forward(self, x):
    x= self.features(x)
    x=x.view(x.size(0), 256*6*6)
    x= self.classifier(x)
    return x

所以我想用 self.classifier.

中最后一个 dropout 层 'nn.dropout()' 的输出实现说 'y' 输入数据

提前致谢。

您可以在 forward 定义中这样做,只需调用 torch.cat((x, y), 1) 将两个特征向量连接在一起。

class AlexNet(nn.Module):
  def __init__(self, num_classes=10):
      super().__init__()
        #1
      self.features= nn.Sequential(
        nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=3, stride=2),
        #2
        nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=3, stride=2),
        #3
        nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),
        nn.ReLU(inplace=True),
        #4
        nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),
        nn.ReLU(inplace=True),
        #5
        nn.Conv2d(384, 256, kernel_size=5, stride=1, padding=2),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=3, stride=2))
      
      self.avgpool= nn.AvgPool2d(6)
      self.classifier= nn.Sequential(
          nn.Dropout(), 
          nn.LazyLinear(4096),
          nn.ReLU(inplace=True), 
          nn.Dropout())
      
      self.fc = nn.LazyLinear(num_classes)
      
  def forward(self, x, y):
      x = self.features(x)
      x = self.avgpool(x)
      x = x.flatten(1)
      x = torch.cat((x, y), 1)
      x = self.classifier(x)
      return x

此外,我已经替换了完全连接的nn.Linear layers with LazyLayer。但如果你愿意,你可以用固定的神经元替换它们。