AttributeError: 'tuple' object has no attribute 'train_dataloader'

AttributeError: 'tuple' object has no attribute 'train_dataloader'

我有一个 3 文件。在 datamodule 文件中,我创建了数据并使用了 PyTorch Lightning 的基本格式。在linear_model中我根据这个page做了一个linear regression model。最后,我有一个 train 文件,我正在调用模型并尝试拟合数据。但是我收到这个错误

GPU available: False, used: False
TPU available: False, using: 0 TPU cores
Traceback (most recent call last):
  File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
    exec(code, run_globals)
  File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/test_train.py", line 10, in <module>
    train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),
AttributeError: 'tuple' object has no attribute 'train_dataloader'

示例数据模块文件

class DataModuleClass(pl.LightningDataModule):
    def __init__(self):
        super().__init__()
        self.sigma = 5
        self.batch_size = 10
        self.prepare_data()
    
def prepare_data(self):
    x = np.random.uniform(0, 10, 10)
    e = np.random.normal(0, self.sigma, len(x))
    
    y = x + e

    X = np.transpose(np.array([x, e]))

    self.x_train_tensor = torch.from_numpy(X).float().to(device)
    self.y_train_tensor = torch.from_numpy(y).float().to(device)
    
    training_dataset = TensorDataset(self.x_train_tensor, self.y_train_tensor)
    self.training_dataset = training_dataset

def setup(self):
    data = self.training_dataset
    self.train_data, self.val_data = random_split(data, [8, 2])
    return self.train_data, self.val_data
    
def train_dataloader(self):
    return DataLoader(self.train_data)

def val_dataloader(self):
    return DataLoader(self.val_data)

样本训练文件

from . import datamodule, linear_model

model = linear_model.LinearRegression(input_dim=2, l1_strength=1, l2_strength=1)

trainer = pl.Trainer()
trainer.fit(model, 
            train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),
            val_dataloaders=datamodule.DataModuleClass().setup().val_dataloaders())

如果您需要更多代码或解释,请告诉我。

更新(根据评论)

现在,在从 DataModuleClass()__init__() 中删除 self.prepare_data()、从 setup() 中删除 return self.train_data, self.val_data 并更改后,我收到以下错误test 文件到

data_module = datamodule.DataModuleClass()

trainer = pl.Trainer()
trainer.fit(model,data_module)

错误:

GPU available: False, used: False
TPU available: False, using: 0 TPU cores
Traceback (most recent call last):
  File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
    exec(code, run_globals)
  File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/test_train.py", line 10, in <module>
    train_dataloader=datamodule.DataModuleClass().train_dataloader(),
  File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/datamodule.py", line 54, in train_dataloader
    return DataLoader(self.train_data)
AttributeError: 'DataModuleClass' object has no attribute 'train_data'

大部分内容都是正确的,除了以下几项:

def prepare_data(self):

这个函数是正确的,只是它不应该 return 任何东西。

另一件事是

def setup(self,stage=None):

需要 stage 变量,如果我们不想在不同的测试和训练阶段之间切换,可以将其设置为默认值 none。

将所有内容放在一起,这是代码:

from argparse import ArgumentParser
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import random_split, DataLoader, TensorDataset
import torch
from torch.autograd import Variable
from torchvision import transforms
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from torch.optim import Adam
from torch.optim.optimizer import Optimizer


class LinearRegression(pl.LightningModule):
    def __init__(
        self,
        input_dim: int = 2,
        output_dim: int = 1,
        bias: bool = True,
        learning_rate: float = 1e-4,
        optimizer: Optimizer = Adam,
        l1_strength: float = 0.0,
        l2_strength: float = 0.0
    ):
        super().__init__()
        self.save_hyperparameters()
        self.optimizer = optimizer

        self.linear = nn.Linear(in_features=self.hparams.input_dim, out_features=self.hparams.output_dim, bias=bias)

    def forward(self, x):
        y_hat = self.linear(x)
        return y_hat

    def training_step(self, batch, batch_idx):
        x, y = batch

        # flatten any input
        x = x.view(x.size(0), -1)

        y_hat = self(x)

        loss = F.mse_loss(y_hat, y, reduction='sum')

        # L1 regularizer
        if self.hparams.l1_strength > 0:
            l1_reg = sum(param.abs().sum() for param in self.parameters())
            loss += self.hparams.l1_strength * l1_reg

        # L2 regularizer
        if self.hparams.l2_strength > 0:
            l2_reg = sum(param.pow(2).sum() for param in self.parameters())
            loss += self.hparams.l2_strength * l2_reg

        loss /= x.size(0)

        tensorboard_logs = {'train_mse_loss': loss}
        progress_bar_metrics = tensorboard_logs
        return {'loss': loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}

    def validation_step(self, batch, batch_idx):
        x, y = batch
        x = x.view(x.size(0), -1)
        y_hat = self(x)
        return {'val_loss': F.mse_loss(y_hat, y)}

    def validation_epoch_end(self, outputs):
        val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
        tensorboard_logs = {'val_mse_loss': val_loss}
        progress_bar_metrics = tensorboard_logs
        return {'val_loss': val_loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}

    def configure_optimizers(self):
        return self.optimizer(self.parameters(), lr=self.hparams.learning_rate)




np.random.seed(42)

device = 'cuda' if torch.cuda.is_available() else 'cpu'

class DataModuleClass(pl.LightningDataModule):
  def __init__(self):
      super().__init__()
      self.sigma = 5
      self.batch_size = 10
    
  def prepare_data(self):
      x = np.random.uniform(0, 10, 10)
      e = np.random.normal(0, self.sigma, len(x))
      
      y = x + e

      X = np.transpose(np.array([x, e]))

      self.x_train_tensor = torch.from_numpy(X).float().to(device)
      self.y_train_tensor = torch.from_numpy(y).float().to(device)
      
      training_dataset = TensorDataset(self.x_train_tensor, self.y_train_tensor)
      self.training_dataset = training_dataset

  def setup(self,stage=None):
      data = self.training_dataset
      self.train_data, self.val_data = random_split(data, [8, 2])
      
  def train_dataloader(self):
      return DataLoader(self.train_data)

  def val_dataloader(self):
      return DataLoader(self.val_data)

model = LinearRegression(input_dim=2, l1_strength=1, l2_strength=1)
trainer = pl.Trainer()
dummy = DataModuleClass()
trainer.fit(model,dummy)