具有 VGG 层的顺序网络
Sequential network with the VGG layers
我想要一个具有VGG网络特性的时序网络(我想把我的网络传递给另一个不支持VGG对象但支持nn.sequential的函数)。
我将函数 getSequentialVersion 方法添加到 VGG class 以获得带有线性层的顺序网络。但是,显然,网络中存在大小不匹配。
'''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG','vgg16_bn',
]
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, cfg_type=None, batch_norm=False, **kwargs):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
self.cfg_type = cfg_type
self.batch_norm = batch_norm
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def getSequentialVersion(self):
return make_layers(cfg[self.cfg_type], batch_norm=self.batch_norm, flag=True)
def make_layers(cfg, batch_norm=False, flag=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
if flag:
#for Cifar10
layers += [nn.Linear(512, 10)]
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
print("VGG16-bn")
model = VGG(make_layers(cfg['D'], batch_norm=True), cfg_type='D', batch_norm=True,**kwargs)
return model
当我调用 summary(net, ( 3, 32, 32))
(针对 cifar10)时,出现不匹配错误。换句话说,主要问题是当我添加这一行 layers+= [nn.linear(512, 10)]
.
谁能帮帮我?非常感谢。
错误信息:
File "./main.py", line 284, in <module>
summary(net, ( 3, 32, 32))
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torchsummary/torchsummary.py", line 72, in summary
model(*x)
File ".anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/container.py", line 119, in forward
input = module(input)
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 94, in forward
return F.linear(input, self.weight, self.bias)
File "./envs/my_env/lib/python3.8/site-packages/torch/nn/functional.py", line 1753, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 dim 1 must match mat2 dim 0
附加信息:
这就是我初始化和使用我的网络的方式:
net = vgg16_bn(depth=args.depth,
num_classes=num_classes,
growthRate=args.growthRate,
compressionRate=args.compressionRate,
widen_factor=args.widen_factor,
dropRate=args.dropRate,
base_width=args.base_width,
cardinality=args.cardinality).getSequentialVersion()
net = net.to(args.device)
module_names = ''
if hasattr(net, 'features'):
module_names = 'features'
elif hasattr(net, 'children'):
module_names = 'children'
else:
print('unknown net modules...')
summary(net, ( 3, 32, 32))
问题很简单。当 flag=True
(如 getSequentialVersion()
)时,缺少 Flatten
操作。因此,要解决这个问题,你需要像这样添加这个操作:
if flag:
# for Cifar10
layers += [nn.Flatten(), nn.Linear(512, 10)] # <<< add Flatten before Linear
在 forward
调用中,您可以在其视图形式中看到展平:
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1) # here, equivalent to torch.flatten(x, 1)
x = self.classifier(x)
return x
这就是您将图层转换为 Sequential
时所缺少的内容。
我想要一个具有VGG网络特性的时序网络(我想把我的网络传递给另一个不支持VGG对象但支持nn.sequential的函数)。
我将函数 getSequentialVersion 方法添加到 VGG class 以获得带有线性层的顺序网络。但是,显然,网络中存在大小不匹配。
'''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG','vgg16_bn',
]
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, cfg_type=None, batch_norm=False, **kwargs):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
self.cfg_type = cfg_type
self.batch_norm = batch_norm
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def getSequentialVersion(self):
return make_layers(cfg[self.cfg_type], batch_norm=self.batch_norm, flag=True)
def make_layers(cfg, batch_norm=False, flag=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
if flag:
#for Cifar10
layers += [nn.Linear(512, 10)]
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
print("VGG16-bn")
model = VGG(make_layers(cfg['D'], batch_norm=True), cfg_type='D', batch_norm=True,**kwargs)
return model
当我调用 summary(net, ( 3, 32, 32))
(针对 cifar10)时,出现不匹配错误。换句话说,主要问题是当我添加这一行 layers+= [nn.linear(512, 10)]
.
谁能帮帮我?非常感谢。
错误信息:
File "./main.py", line 284, in <module>
summary(net, ( 3, 32, 32))
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torchsummary/torchsummary.py", line 72, in summary
model(*x)
File ".anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/container.py", line 119, in forward
input = module(input)
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "./anaconda3/envs/my_env/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 94, in forward
return F.linear(input, self.weight, self.bias)
File "./envs/my_env/lib/python3.8/site-packages/torch/nn/functional.py", line 1753, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 dim 1 must match mat2 dim 0
附加信息: 这就是我初始化和使用我的网络的方式:
net = vgg16_bn(depth=args.depth,
num_classes=num_classes,
growthRate=args.growthRate,
compressionRate=args.compressionRate,
widen_factor=args.widen_factor,
dropRate=args.dropRate,
base_width=args.base_width,
cardinality=args.cardinality).getSequentialVersion()
net = net.to(args.device)
module_names = ''
if hasattr(net, 'features'):
module_names = 'features'
elif hasattr(net, 'children'):
module_names = 'children'
else:
print('unknown net modules...')
summary(net, ( 3, 32, 32))
问题很简单。当 flag=True
(如 getSequentialVersion()
)时,缺少 Flatten
操作。因此,要解决这个问题,你需要像这样添加这个操作:
if flag:
# for Cifar10
layers += [nn.Flatten(), nn.Linear(512, 10)] # <<< add Flatten before Linear
在 forward
调用中,您可以在其视图形式中看到展平:
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1) # here, equivalent to torch.flatten(x, 1)
x = self.classifier(x)
return x
这就是您将图层转换为 Sequential
时所缺少的内容。