将 CNN 模型代码从 Keras 转换为 Pytorch

To convert CNN model code from Keras to Pytorch

我正在尝试将 CNN 模型代码从 Keras 转换到 Pytorch。

这是Keras Sequential层

model=Sequential()
model.add(Conv2D(filters=64, kernel_size = (3,3), activation="relu", input_shape=(28,28,1)))
model.add(Conv2D(filters=64, kernel_size = (3,3), activation="relu"))

model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size = (3,3), activation="relu"))
model.add(Conv2D(filters=128, kernel_size = (3,3), activation="relu"))

model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())    
model.add(Conv2D(filters=256, kernel_size = (3,3), activation="relu"))
    
model.add(MaxPooling2D(pool_size=(2,2)))
    
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(512,activation="relu"))
    
model.add(Dense(10,activation="softmax"))
    
model.compile(loss="categorical_crossentropy",optimizer=optimizer,metrics=["accuracy"])

如何在pytorch模型上初始化并编写正向代码?特别是 Flatten 和 Dense 层。

如有任何意见,我们将不胜感激。

我尝试在 PyTorch 中实现它,但检查参数数量以确保这与您的 Keras 相同执行。我试着把它写得更容易理解和简单,这就是我写下所有激活函数的原因。希望对您有所帮助。

import torch

import torch.nn as nn


class Net(nn.Module):
    def __init__(self, num_classes=10):
        super(Net, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), padding=(1, 1))
        self.relu1 = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=(1, 1))
        self.relu2 = nn.ReLU(inplace=True)

        self.pool1 = nn.MaxPool2d(kernel_size=(2, 2))
        self.norm1 = nn.BatchNorm2d(num_features=64)

        self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=(1, 1))
        self.relu3 = nn.ReLU(inplace=True)

        self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=(1, 1))
        self.relu4 = nn.ReLU(inplace=True)

        self.pool2 = nn.MaxPool2d(kernel_size=(2, 2))
        self.norm2 = nn.BatchNorm2d(num_features=128)

        self.conv5 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), padding=(1, 1))
        self.relu5 = nn.ReLU(inplace=True)

        self.pool3 = nn.MaxPool2d(kernel_size=(2, 2))
        self.norm3 = nn.BatchNorm2d(num_features=256)

        self.fc1 = nn.Linear(in_features=256, out_features=512)
        self.relu6 = nn.ReLU(inplace=True)

        self.fc2 = nn.Linear(in_features=512, out_features=10)
        self.act = nn.Softmax(dim=1)

    def forward(self, x):
        x = self.relu1(self.conv1(x))
        x = self.relu2(self.conv2(x))

        x = self.norm1(self.pool1(x))

        x = self.relu3(self.conv3(x))
        x = self.relu4(self.conv4(x))

        x = self.norm2(self.pool2(x))

        x = self.relu5(self.conv5(x))

        x = self.norm3(self.pool3(x))

        x = x.mean((2, 3), keepdim=True)
        x = torch.flatten(x, 1)

        x = self.relu6(self.fc1(x))
        x = self.act(self.fc2(x),)

        return x


if __name__ == '__main__':
    model = Net(num_classes=10)

    a = torch.randn(1, 3, 224, 224)

    print("Output: ", model(a).shape)
    print("Num. params: ", sum(p.numel() for p in model.parameters() if p.requires_grad))

输出

Output:  torch.Size([1, 10])
Num. params:  692938