Pytorch: RuntimeError: Sizes of tensors must match except in dimension 2

Pytorch: RuntimeError: Sizes of tensors must match except in dimension 2

我在将图像传递到我的 unet 时遇到问题。我收到以下错误:

Traceback (most recent call last):
File "path\Main.py", line 101, in <module>
    outputs = model(inputs[None,...].float())
File "C:\Users\...\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
    result = self.forward(*input, **kwargs)
File "path\UNets.py", line 53, in forward
    upconv2 = self.upconv2(torch.cat([upconv1,conv4]),1)
RuntimeError: Sizes of tensors must match except in dimension 2. Got 15 and 10 (The offending index is 0)

我使用的图像大小不同,我将它们格式化为 256x256。它们是灰度图像

我的数据加载:

    def getImageAndTransform(self,item):
    data = Image.open(self.datalist[item])
    label = Image.open(self.labellist[item])

    p = torchvision.transforms.Compose([torchvision.transforms.Scale((256,256))])

    data = p(data)
    label = p(label)

    data = torch.from_numpy(np.array(data))
    label = torch.from_numpy(np.array((label)))

    return data, label

我的网络:

class Unet(SegmentationNetwork):
def __init__(self,config):
    super(StandardUnet,self).__init__(config = config)


    #down
    self.downconv1 =self.contract_block(self.in_channels,self.channels[0],self.kernel[0],self.padding[0])
    self.downconv2 =self.contract_block(self.channels[0],self.channels[1],self.kernel[1],self.padding[1])
    self.downconv3 =self.contract_block(self.channels[1],self.channels[2],self.kernel[2],self.padding[2])
    self.downconv4 =self.contract_block(self.channels[2],self.channels[3],self.kernel[3],self.padding[3])
    self.downconv5 =self.contract_block(self.channels[3],self.channels[4],self.kernel[4],self.padding[4])

    #up
    self.upconv1 = self.expand_block(self.channels[4],self.channels[3],self.kernel[4],self.padding[4])
    self.upconv2 = self.expand_block(self.channels[3],self.channels[2],self.kernel[3],self.padding[3])
    self.upconv3 = self.expand_block(self.channels[2], self.channels[1], self.kernel[2], self.padding[2])
    self.upconv4 = self.expand_block(self.channels[1], self.channels[0], self.kernel[1], self.padding[1])
    self.upconv5 = self.expand_block(self.channels[0], self.out_channels, self.kernel[0], self.padding[0])



def forward(self,x):
    #down
    conv1 = self.downconv1(x)
    conv2 = self.downconv2(conv1)
    conv3 = self.downconv3(conv2)
    conv4 = self.downconv4(conv3)
    conv5 = self.downconv5(conv4)

    #up
    upconv1 = self.upconv1(conv5)
    upconv2 = self.upconv2(torch.cat([upconv1,conv4]),1)
    upconv3 = self.upconv3(torch.cat([upconv2,conv3]),1)
    upconv4 = self.upconv4(torch.cat([upconv3,conv2]),1)
    upconv5 = self.upconv5(torch.cat([upconv4,conv1]),1)

    self.out = upconv5


def contract_block(self,in_channels,out_channels,kernel_size, padding):
    contract = nn.Sequential(
        nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=1,padding=padding),
        nn.BatchNorm2d(out_channels),
        nn.ReLU(inplace=True),
        nn.Conv2d(out_channels,out_channels,kernel_size=kernel_size,stride=1,padding=padding),
        nn.BatchNorm2d(out_channels),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=3,stride=2,padding=1))
    return contract

def expand_block(self,in_channels,out_channels,kernel_size,padding):
    expand = nn.Sequential(
        nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=padding),
        nn.BatchNorm2d(out_channels),
        nn.ReLU(inplace=True),
        nn.Conv2d(out_channels, out_channels, kernel_size, stride=1, padding=padding),
        nn.BatchNorm2d(out_channels),
        nn.ReLU(inplace=True),
        nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
    )
    return expand

我的实现:

        for i, data in enumerate(dataloader_train, 0):  # inputdata as list of [inputs,labels]
            data[0].size()
            data[1].size()
            inputs, labels = data[0].to(device), data[1].to(device)


        # zero the parameter gradients
            optimizer.zero_grad()
        # forward + backward + optimize
            outputs = model(inputs[None,...].float())
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()

谁能告诉我应该怎么做才能解决这个问题? 也许这个问题很愚蠢,但由于我是 torch 和深度学习的新手,所以我会很感激帮助 谢谢

我发现我的错误,在上卷积步骤中错误的地方有一个括号。 正确的是 upconv2 = self.upconv2(torch.cat([upconv1,conv4],1))