AttributeError: 'list' object has no attribute 'view' while training network
AttributeError: 'list' object has no attribute 'view' while training network
我有一个 pytorch,我正在尝试对其进行训练,但出现此错误 AttributeError: 'list' object has no attribute 'view'
。不知道为什么我会收到这个。
sample data
data = np.random.rand(400, 46, 55, 46)
ds = TensorDataset(torch.from_numpy(data))
train_ds, valid_ds = random_split(ds, (350, 50))
train_dl, valid_dl = DataLoader(train_ds), DataLoader(valid_ds)
model
class AutoEncoder(pl.LightningModule):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(46*55*46, 400),
nn.Tanh())
self.decoder = nn.Sequential(
nn.Linear(400, 46*55*46),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def training_step(self, train_batch, batch_idx):
x = train_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss)
return loss
def validation_step(self, val_batch, batch_idx):
x = val_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('val_loss', loss)
model = AutoEncoder()
Error
AttributeError Traceback (most recent call last)
<ipython-input-18-11e725b78922> in <module>()
1 trainer = pl.Trainer()
----> 2 trainer.fit(model, train_dl, valid_dl)
16 frames
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader, ckpt_path)
739 train_dataloaders = train_dataloader
740 self._call_and_handle_interrupt(
--> 741 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
742 )
743
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
683 """
684 try:
--> 685 return trainer_fn(*args, **kwargs)
686 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
687 except KeyboardInterrupt as exception:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
775 # TODO: ckpt_path only in v1.7
776 ckpt_path = ckpt_path or self.resume_from_checkpoint
--> 777 self._run(model, ckpt_path=ckpt_path)
778
779 assert self.state.stopped
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1197
1198 # dispatch `start_training` or `start_evaluating` or `start_predicting`
-> 1199 self._dispatch()
1200
1201 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _dispatch(self)
1277 self.training_type_plugin.start_predicting(self)
1278 else:
-> 1279 self.training_type_plugin.start_training(self)
1280
1281 def run_stage(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
200 def start_training(self, trainer: "pl.Trainer") -> None:
201 # double dispatch to initiate the training loop
--> 202 self._results = trainer.run_stage()
203
204 def start_evaluating(self, trainer: "pl.Trainer") -> None:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
1287 if self.predicting:
1288 return self._run_predict()
-> 1289 return self._run_train()
1290
1291 def _pre_training_routine(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1309 self.progress_bar_callback.disable()
1310
-> 1311 self._run_sanity_check(self.lightning_module)
1312
1313 # enable train mode
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_sanity_check(self, ref_model)
1373 # run eval step
1374 with torch.no_grad():
-> 1375 self._evaluation_loop.run()
1376
1377 self.call_hook("on_sanity_check_end")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/dataloader/evaluation_loop.py in advance(self, *args, **kwargs)
108 dl_max_batches = self._max_batches[dataloader_idx]
109
--> 110 dl_outputs = self.epoch_loop.run(dataloader, dataloader_idx, dl_max_batches, self.num_dataloaders)
111
112 # store batch level output per dataloader
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in advance(self, data_fetcher, dataloader_idx, dl_max_batches, num_dataloaders)
120 # lightning module methods
121 with self.trainer.profiler.profile("evaluation_step_and_end"):
--> 122 output = self._evaluation_step(batch, batch_idx, dataloader_idx)
123 output = self._evaluation_step_end(output)
124
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in _evaluation_step(self, batch, batch_idx, dataloader_idx)
215 self.trainer.lightning_module._current_fx_name = "validation_step"
216 with self.trainer.profiler.profile("validation_step"):
--> 217 output = self.trainer.accelerator.validation_step(step_kwargs)
218
219 return output
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self, step_kwargs)
237 """
238 with self.precision_plugin.val_step_context():
--> 239 return self.training_type_plugin.validation_step(*step_kwargs.values())
240
241 def test_step(self, step_kwargs: Dict[str, Union[Any, int]]) -> Optional[STEP_OUTPUT]:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self, *args, **kwargs)
217
218 def validation_step(self, *args, **kwargs):
--> 219 return self.model.validation_step(*args, **kwargs)
220
221 def test_step(self, *args, **kwargs):
<ipython-input-12-16d602e3e66b> in validation_step(self, val_batch, batch_idx)
29 def validation_step(self, val_batch, batch_idx):
30 x = val_batch
---> 31 x = x.view(x.size(0), -1)
32 z = self.encoder(x)
33 x_hat = self.decoder(z)
AttributeError: 'list' object has no attribute 'view'
如错误日志所示,在这一行:
29 def validation_step(self, val_batch, batch_idx):
30 x = val_batch
31 x = x.view(x.size(0), -1) # here is your problem
x
或 vali_batch
是一个 list
对象,列表没有属性 view()
因为它不是张量。如果你想将列表转换为张量,你可以简单地使用:
x = torch.tensor(val_batch)
或者,您可以在加载和处理数据的过程中,提前在代码中将 val_batch
转换为张量。
我有一个 pytorch,我正在尝试对其进行训练,但出现此错误 AttributeError: 'list' object has no attribute 'view'
。不知道为什么我会收到这个。
sample data
data = np.random.rand(400, 46, 55, 46)
ds = TensorDataset(torch.from_numpy(data))
train_ds, valid_ds = random_split(ds, (350, 50))
train_dl, valid_dl = DataLoader(train_ds), DataLoader(valid_ds)
model
class AutoEncoder(pl.LightningModule):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(46*55*46, 400),
nn.Tanh())
self.decoder = nn.Sequential(
nn.Linear(400, 46*55*46),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def training_step(self, train_batch, batch_idx):
x = train_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss)
return loss
def validation_step(self, val_batch, batch_idx):
x = val_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('val_loss', loss)
model = AutoEncoder()
Error
AttributeError Traceback (most recent call last)
<ipython-input-18-11e725b78922> in <module>()
1 trainer = pl.Trainer()
----> 2 trainer.fit(model, train_dl, valid_dl)
16 frames
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader, ckpt_path)
739 train_dataloaders = train_dataloader
740 self._call_and_handle_interrupt(
--> 741 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
742 )
743
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
683 """
684 try:
--> 685 return trainer_fn(*args, **kwargs)
686 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
687 except KeyboardInterrupt as exception:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
775 # TODO: ckpt_path only in v1.7
776 ckpt_path = ckpt_path or self.resume_from_checkpoint
--> 777 self._run(model, ckpt_path=ckpt_path)
778
779 assert self.state.stopped
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1197
1198 # dispatch `start_training` or `start_evaluating` or `start_predicting`
-> 1199 self._dispatch()
1200
1201 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _dispatch(self)
1277 self.training_type_plugin.start_predicting(self)
1278 else:
-> 1279 self.training_type_plugin.start_training(self)
1280
1281 def run_stage(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
200 def start_training(self, trainer: "pl.Trainer") -> None:
201 # double dispatch to initiate the training loop
--> 202 self._results = trainer.run_stage()
203
204 def start_evaluating(self, trainer: "pl.Trainer") -> None:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
1287 if self.predicting:
1288 return self._run_predict()
-> 1289 return self._run_train()
1290
1291 def _pre_training_routine(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1309 self.progress_bar_callback.disable()
1310
-> 1311 self._run_sanity_check(self.lightning_module)
1312
1313 # enable train mode
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_sanity_check(self, ref_model)
1373 # run eval step
1374 with torch.no_grad():
-> 1375 self._evaluation_loop.run()
1376
1377 self.call_hook("on_sanity_check_end")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/dataloader/evaluation_loop.py in advance(self, *args, **kwargs)
108 dl_max_batches = self._max_batches[dataloader_idx]
109
--> 110 dl_outputs = self.epoch_loop.run(dataloader, dataloader_idx, dl_max_batches, self.num_dataloaders)
111
112 # store batch level output per dataloader
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in advance(self, data_fetcher, dataloader_idx, dl_max_batches, num_dataloaders)
120 # lightning module methods
121 with self.trainer.profiler.profile("evaluation_step_and_end"):
--> 122 output = self._evaluation_step(batch, batch_idx, dataloader_idx)
123 output = self._evaluation_step_end(output)
124
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in _evaluation_step(self, batch, batch_idx, dataloader_idx)
215 self.trainer.lightning_module._current_fx_name = "validation_step"
216 with self.trainer.profiler.profile("validation_step"):
--> 217 output = self.trainer.accelerator.validation_step(step_kwargs)
218
219 return output
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self, step_kwargs)
237 """
238 with self.precision_plugin.val_step_context():
--> 239 return self.training_type_plugin.validation_step(*step_kwargs.values())
240
241 def test_step(self, step_kwargs: Dict[str, Union[Any, int]]) -> Optional[STEP_OUTPUT]:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self, *args, **kwargs)
217
218 def validation_step(self, *args, **kwargs):
--> 219 return self.model.validation_step(*args, **kwargs)
220
221 def test_step(self, *args, **kwargs):
<ipython-input-12-16d602e3e66b> in validation_step(self, val_batch, batch_idx)
29 def validation_step(self, val_batch, batch_idx):
30 x = val_batch
---> 31 x = x.view(x.size(0), -1)
32 z = self.encoder(x)
33 x_hat = self.decoder(z)
AttributeError: 'list' object has no attribute 'view'
如错误日志所示,在这一行:
29 def validation_step(self, val_batch, batch_idx):
30 x = val_batch
31 x = x.view(x.size(0), -1) # here is your problem
x
或 vali_batch
是一个 list
对象,列表没有属性 view()
因为它不是张量。如果你想将列表转换为张量,你可以简单地使用:
x = torch.tensor(val_batch)
或者,您可以在加载和处理数据的过程中,提前在代码中将 val_batch
转换为张量。