pytorch 不保存加载的预训练模型权重及其在最终模型中的部分
pytorch does not save pre-trained model weights loaded and the parts of it in the final model
我目前正在使用我的数据在 CIFAR-10 上开发预训练模型,删除了模型的最终 fc 层并附加了我自己的 fc 层和 softmax。有七个网络,每个网络都与预训练部分相同,并使用附加的 fc 层组合。以下是预训练网络代码:
class Bottleneck(nn.Module):
def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class BasicBlock(nn.Module):
def __init__(self, inplanes, expansion=1, growthRate=12, dropRate=0):
super(BasicBlock, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, inplanes, outplanes):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=1,
bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, depth = 22, block = Bottleneck,
dropRate = 0, num_classes = 10, growthRate = 12, compressionRate = 2):
super(DenseNet, self).__init__()
assert (depth - 4) % 3 == 0, 'depth should be 3n+4'
n = (depth - 4) / 3 if block == BasicBlock else (depth - 4) // 6
self.growthRate = growthRate
self.dropRate = dropRate
# self.inplanes is a global variable used across multiple
# helper functions
self.inplanes = growthRate * 2
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size = 3, padding = 1,
bias = False)
self.dense1 = self._make_denseblock(block, n)
self.trans1 = self._make_transition(compressionRate)
self.dense2 = self._make_denseblock(block, n)
self.trans2 = self._make_transition(compressionRate)
self.dense3 = self._make_denseblock(block, n)
self.bn = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
#self.fc = nn.Linear(self.inplanes, num_classes)
# Weight initialization
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_denseblock(self, block, blocks):
layers = []
for i in range(blocks):
# Currently we fix the expansion ratio as the default value
layers.append(block(self.inplanes, growthRate = self.growthRate, dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate):
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
#x = x.view(x.size(0), -1)
#x = self.fc(x)
return x
def getParams(self, paramName):
if paramName == 'inplanes':
return self.inplanes
elif paramName == 'growthRate':
return self.growthRate
elif paramName == 'dropRate':
return self.dropRate
def densenet(**kwargs):
"""
Constructs a DenseNet model.
"""
return DenseNet(**kwargs)
接下来是我的代码:
class Network(nn.Module):
def __init__(self,pretrained_dict, num_classes = 6, num_channels = 7,
expansion = 4, depth = 100, growthRate = 12, dropRate = 0):
super(Network, self).__init__()
self.num_channels = num_channels
# creating 7 channels networks
self.channels_dnsnets = []
for ch in range(self.num_channels):
# print(ch)
d = densenet(depth = depth)
d_dict = d.state_dict()
# 1. filter out unnecessary keys
pretrained_dict2 = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in d_dict}
# print('d_dict_keys :')
# print(d_dict.keys())
# print('*'*50)
# print('pretrained_dict2.keys:')
# print(pretrained_dict2.keys())
# print('*'*50)
# 2. overwrite entries in the existing state dict
d_dict.update(pretrained_dict2)
# 3. load the new state dict
d.load_state_dict(pretrained_dict2)
# freeze the layers of densenet
for param in d.parameters():
param.requires_grad = False
self.channels_dnsnets.append(d)
self.inplanes = self.channels_dnsnets[0].getParams(paramName = 'inplanes')
self.fc = nn.Linear(self.inplanes * self.num_channels, num_classes)
self.softmax = nn.Softmax(dim = 1)
def forward(self, x):
batch_size, channels, ht, wd, in_channels = x.shape
x = np.reshape(x,(batch_size,channels,in_channels,ht,wd))
out = []
for num in range(self.num_channels):
temp_out = self.channels_dnsnets[0](x[:,num,:])
temp_out = temp_out.view(temp_out.size(0),-1)
# print(temp_out.shape)
# print('*' * 50)
out.append(temp_out)
out = torch.stack(out,dim = 1)
# print(out.shape)
out = out.view(out.size(0),-1)
out = self.fc(out)
out = self.softmax(out)
return out
我将优化器设置为:
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = lr,
betas = (0.9, 0.999), eps = 1e-08, weight_decay = wd, amsgrad = False)
但是,每当我保存模型时,densenets 列表及其权重都不会保存,只会保存 fc 层和 softmax 层权重。代码有什么问题吗?我是pytorch的新手。
问题是 self.channels_dnsnets
只是一个 list
而不是 state_dict
的一部分。只有 self.fc
和 self.softmax
会被注册到 Module
。最简单的更改是这样定义它:
self.channels_dnsnets = nn.ModuleList()
我目前正在使用我的数据在 CIFAR-10 上开发预训练模型,删除了模型的最终 fc 层并附加了我自己的 fc 层和 softmax。有七个网络,每个网络都与预训练部分相同,并使用附加的 fc 层组合。以下是预训练网络代码:
class Bottleneck(nn.Module):
def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class BasicBlock(nn.Module):
def __init__(self, inplanes, expansion=1, growthRate=12, dropRate=0):
super(BasicBlock, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, inplanes, outplanes):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=1,
bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, depth = 22, block = Bottleneck,
dropRate = 0, num_classes = 10, growthRate = 12, compressionRate = 2):
super(DenseNet, self).__init__()
assert (depth - 4) % 3 == 0, 'depth should be 3n+4'
n = (depth - 4) / 3 if block == BasicBlock else (depth - 4) // 6
self.growthRate = growthRate
self.dropRate = dropRate
# self.inplanes is a global variable used across multiple
# helper functions
self.inplanes = growthRate * 2
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size = 3, padding = 1,
bias = False)
self.dense1 = self._make_denseblock(block, n)
self.trans1 = self._make_transition(compressionRate)
self.dense2 = self._make_denseblock(block, n)
self.trans2 = self._make_transition(compressionRate)
self.dense3 = self._make_denseblock(block, n)
self.bn = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
#self.fc = nn.Linear(self.inplanes, num_classes)
# Weight initialization
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_denseblock(self, block, blocks):
layers = []
for i in range(blocks):
# Currently we fix the expansion ratio as the default value
layers.append(block(self.inplanes, growthRate = self.growthRate, dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate):
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
#x = x.view(x.size(0), -1)
#x = self.fc(x)
return x
def getParams(self, paramName):
if paramName == 'inplanes':
return self.inplanes
elif paramName == 'growthRate':
return self.growthRate
elif paramName == 'dropRate':
return self.dropRate
def densenet(**kwargs):
"""
Constructs a DenseNet model.
"""
return DenseNet(**kwargs)
接下来是我的代码:
class Network(nn.Module):
def __init__(self,pretrained_dict, num_classes = 6, num_channels = 7,
expansion = 4, depth = 100, growthRate = 12, dropRate = 0):
super(Network, self).__init__()
self.num_channels = num_channels
# creating 7 channels networks
self.channels_dnsnets = []
for ch in range(self.num_channels):
# print(ch)
d = densenet(depth = depth)
d_dict = d.state_dict()
# 1. filter out unnecessary keys
pretrained_dict2 = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in d_dict}
# print('d_dict_keys :')
# print(d_dict.keys())
# print('*'*50)
# print('pretrained_dict2.keys:')
# print(pretrained_dict2.keys())
# print('*'*50)
# 2. overwrite entries in the existing state dict
d_dict.update(pretrained_dict2)
# 3. load the new state dict
d.load_state_dict(pretrained_dict2)
# freeze the layers of densenet
for param in d.parameters():
param.requires_grad = False
self.channels_dnsnets.append(d)
self.inplanes = self.channels_dnsnets[0].getParams(paramName = 'inplanes')
self.fc = nn.Linear(self.inplanes * self.num_channels, num_classes)
self.softmax = nn.Softmax(dim = 1)
def forward(self, x):
batch_size, channels, ht, wd, in_channels = x.shape
x = np.reshape(x,(batch_size,channels,in_channels,ht,wd))
out = []
for num in range(self.num_channels):
temp_out = self.channels_dnsnets[0](x[:,num,:])
temp_out = temp_out.view(temp_out.size(0),-1)
# print(temp_out.shape)
# print('*' * 50)
out.append(temp_out)
out = torch.stack(out,dim = 1)
# print(out.shape)
out = out.view(out.size(0),-1)
out = self.fc(out)
out = self.softmax(out)
return out
我将优化器设置为:
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = lr,
betas = (0.9, 0.999), eps = 1e-08, weight_decay = wd, amsgrad = False)
但是,每当我保存模型时,densenets 列表及其权重都不会保存,只会保存 fc 层和 softmax 层权重。代码有什么问题吗?我是pytorch的新手。
问题是 self.channels_dnsnets
只是一个 list
而不是 state_dict
的一部分。只有 self.fc
和 self.softmax
会被注册到 Module
。最简单的更改是这样定义它:
self.channels_dnsnets = nn.ModuleList()