PyTorch: "ValueError: can't optimize a non-leaf Tensor" after changing pretrained model from 3 RGB Channels to 4 Channels
PyTorch: "ValueError: can't optimize a non-leaf Tensor" after changing pretrained model from 3 RGB Channels to 4 Channels
我一直在尝试将预训练的 PyTorch Densenet 的第一个 conv 层从 3 个通道更改为 4 个通道,同时保持其原始 RGB 通道的预训练权重。我已经完成了以下代码,但是优化器部分抛出了这个错误:"ValueError: can't optimize a non-leaf Tensor"
.
import torchvision.models as models
import torch.nn as nn
backbone = models.__dict__['densenet169'](pretrained=True)
weight1 = backbone.features.conv0.weight.data.clone()
new_first_layer = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
with torch.no_grad():
new_first_layer.weight[:,:3] = weight1
backbone.features.conv0 = new_first_layer
optimizer = torch.optim.SGD(backbone.parameters(), 0.001,
weight_decay=0.1) # Changing this optimizer from SGD to ADAM
我也尝试删除参数 with torch.no_grad():
但问题仍然存在:
ValueError Traceback (most recent call last)
<ipython-input-343-5fc87352da04> in <module>()
11 backbone.features.conv0 = new_first_layer
12 optimizer = torch.optim.SGD(res.parameters(), 0.001,
---> 13 weight_decay=0.1) # Changing this optimizer from SGD to ADAM
~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/sgd.py in __init__(self, params, lr, momentum, dampening, weight_decay, nesterov)
66 if nesterov and (momentum <= 0 or dampening != 0):
67 raise ValueError("Nesterov momentum requires a momentum and zero dampening")
---> 68 super(SGD, self).__init__(params, defaults)
69
70 def __setstate__(self, state):
~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/optimizer.py in __init__(self, params, defaults)
50
51 for param_group in param_groups:
---> 52 self.add_param_group(param_group)
53
54 def __getstate__(self):
~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/optimizer.py in add_param_group(self, param_group)
231 "but one of the params is " + torch.typename(param))
232 if not param.is_leaf:
--> 233 raise ValueError("can't optimize a non-leaf Tensor")
234
235 for name, default in self.defaults.items():
ValueError: can't optimize a non-leaf Tensor
我的 PyTorch 版本是:1.7.0.
你们能帮忙吗?非常感谢!
此致。
我想我已经解决了这个问题!:
import torchvision.models as models
import torch.nn as nn
from torch.autograd import Variable
backbone = models.__dict__['densenet169'](pretrained=True)
weight1 = backbone.features.conv0.weight.clone()
new_first_layer = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).requires_grad_()
new_first_layer.weight[:,:3,:,:].data[...] = Variable(weight1, requires_grad=True)
backbone.features.conv0 = new_first_layer
optimizer = torch.optim.SGD(res.parameters(), 0.001,
weight_decay=0.1)
我一直在尝试将预训练的 PyTorch Densenet 的第一个 conv 层从 3 个通道更改为 4 个通道,同时保持其原始 RGB 通道的预训练权重。我已经完成了以下代码,但是优化器部分抛出了这个错误:"ValueError: can't optimize a non-leaf Tensor"
.
import torchvision.models as models
import torch.nn as nn
backbone = models.__dict__['densenet169'](pretrained=True)
weight1 = backbone.features.conv0.weight.data.clone()
new_first_layer = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
with torch.no_grad():
new_first_layer.weight[:,:3] = weight1
backbone.features.conv0 = new_first_layer
optimizer = torch.optim.SGD(backbone.parameters(), 0.001,
weight_decay=0.1) # Changing this optimizer from SGD to ADAM
我也尝试删除参数 with torch.no_grad():
但问题仍然存在:
ValueError Traceback (most recent call last)
<ipython-input-343-5fc87352da04> in <module>()
11 backbone.features.conv0 = new_first_layer
12 optimizer = torch.optim.SGD(res.parameters(), 0.001,
---> 13 weight_decay=0.1) # Changing this optimizer from SGD to ADAM
~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/sgd.py in __init__(self, params, lr, momentum, dampening, weight_decay, nesterov)
66 if nesterov and (momentum <= 0 or dampening != 0):
67 raise ValueError("Nesterov momentum requires a momentum and zero dampening")
---> 68 super(SGD, self).__init__(params, defaults)
69
70 def __setstate__(self, state):
~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/optimizer.py in __init__(self, params, defaults)
50
51 for param_group in param_groups:
---> 52 self.add_param_group(param_group)
53
54 def __getstate__(self):
~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/optimizer.py in add_param_group(self, param_group)
231 "but one of the params is " + torch.typename(param))
232 if not param.is_leaf:
--> 233 raise ValueError("can't optimize a non-leaf Tensor")
234
235 for name, default in self.defaults.items():
ValueError: can't optimize a non-leaf Tensor
我的 PyTorch 版本是:1.7.0.
你们能帮忙吗?非常感谢!
此致。
我想我已经解决了这个问题!:
import torchvision.models as models
import torch.nn as nn
from torch.autograd import Variable
backbone = models.__dict__['densenet169'](pretrained=True)
weight1 = backbone.features.conv0.weight.clone()
new_first_layer = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).requires_grad_()
new_first_layer.weight[:,:3,:,:].data[...] = Variable(weight1, requires_grad=True)
backbone.features.conv0 = new_first_layer
optimizer = torch.optim.SGD(res.parameters(), 0.001,
weight_decay=0.1)