根据 Pytorch 中的层宽度改变学习率

Changing Learning Rate According to Layer Width in Pytroch

我正在尝试训练一个网络,其中每一层的学习率与 1/(层宽度)成比例。有没有办法在 pytorch 中做到这一点?我尝试更改优化器中的学习率并将其包含在我的训练循环中,但这没有用。我看到有人和 Adam 谈论这个,但我正在使用 SGD 进行训练。这是我定义模型和训练的块,如果有帮助的话。

class ConvNet2(nn.Module):
    def __init__(self):
        super(ConvNet2, self).__init__()
        self.network = nn.Sequential(
        nn.Conv2d(3, 8, 3),
        nn.ReLU(),
        nn.Conv2d(8,32, 3),
        nn.ReLU(),
        nn.MaxPool2d(2, 2),

        nn.Conv2d(32, 32, 3),
        nn.ReLU(),
        nn.Conv2d(32,32, 3),
        nn.ReLU(),
        nn.MaxPool2d(2, 2),
        
        nn.Flatten(),

        nn.Linear(800, 10)
        )

    def forward(self, x):
        return self.network(x)

net2 = ConvNet2().to(device)


def train(network, number_of_epochs):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(network.parameters(), lr=learning_rate)
    for epoch in range(number_of_epochs):  # loop over the dataset multiple times
        running_loss = 0.0
        for i, (inputs, labels) in enumerate(trainloader):
            # get the inputs
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            outputs = network(inputs)
            loss = criterion(outputs, labels)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = network(inputs)
            loss.backward()
            optimizer.step()

您可以通过传递具有相关学习率的相关参数来做到这一点。

optimizer = optim.SGD(
    [
        {"params": network.layer[0].parameters(), "lr": 1e-1},
        {"params": network.layer[1].parameters(), "lr": 1e-2},
        ...
    ],
    lr=1e-3,
)

documentation中你可以看到你可以指定“per-parameter选项”。假设您只想指定 Conv2d 层的学习率(这很容易在下面的代码中自定义),您可以这样做:

import torch
from torch import nn
from torch import optim
from pprint import pprint

class ConvNet2(nn.Module):
    def __init__(self):
        super(ConvNet2, self).__init__()
        self.network = nn.Sequential(
        nn.Conv2d(3, 8, 3),
        nn.ReLU(),
        nn.Conv2d(8,32, 3),
        nn.ReLU(),
        nn.MaxPool2d(2, 2),

        nn.Conv2d(32, 32, 3),
        nn.ReLU(),
        nn.Conv2d(32,32, 3),
        nn.ReLU(),
        nn.MaxPool2d(2, 2),
        
        nn.Flatten(),

        nn.Linear(800, 10)
        )

    def forward(self, x):
        return self.network(x)

net2 = ConvNet2()

def getParameters(model):
    getWidthConv2D = lambda layer: layer.out_channels
    parameters = []
    for layer in model.children():
        paramdict = {'params': layer.parameters()}
        if (isinstance(layer, nn.Conv2d)):
            paramdict['lr'] = getWidthConv2D(layer) * 0.1 # Specify learning rate for Conv2D here
        parameters.append(paramdict)
    return parameters

optimizer = optim.SGD(getParameters(net2.network), lr=0.05)
print(optimizer)