RuntimeError: all elements of input should be between 0 and 1

RuntimeError: all elements of input should be between 0 and 1

我想在蛋白质嵌入上使用带有 bilstm 层的 RNN,使用 pytorch。它适用于线性层,但当我使用 Bilstm 时出现运行时错误。对不起,如果它不清楚这是我的第一篇文章,如果有人能帮助我,我将不胜感激。

from collections import Counter, OrderedDict
from typing import Optional
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F  # noqa
from deepchain import log
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from torch import Tensor, nn
num_layers=2
hidden_size=256
from torch.utils.data import DataLoader, TensorDataset

def classification_dataloader_from_numpy(
    x: np.ndarray, y: np.array, batch_size: int = 32
) -> DataLoader:
    """Build a dataloader from numpy for classification problem

    This dataloader is use only for classification. It detects automatically the class of
    the problem (binary or multiclass classification)
    Args:
        x (np.ndarray): [description]
        y (np.array): [description]
        batch_size (int, optional): [description]. Defaults to None.

    Returns:
        DataLoader: [description]
    """
    n_class: int = len(np.unique(y))
    if n_class > 2:
        log.info("This is a classification problem with %s classes", n_class)
    else:
        log.info("This is a binary classification problem")

    # y is float for binary classification, int for multiclass
    y_tensor = torch.tensor(y).long() if len(np.unique(y)) > 2 else torch.tensor(y).float()
    tensor_set = TensorDataset(torch.tensor(x).float(), y_tensor)
    loader = DataLoader(tensor_set, batch_size=batch_size)
    return loader
class RNN(pl.LightningModule):
     
    """A `pytorch` based deep learning model"""
    def __init__(self, input_shape: int, n_class: int, num_layers,  n_neurons: int = 128, lr: float = 1e-3):
        super(RNN,self).__init__()
        self.lr = lr
        self.n_neurons=n_neurons
        self.num_layers=num_layers
        self.input_shape = input_shape
        self.output_shape = 1 if n_class <= 2 else n_class
        self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
        self.lstm = nn.LSTM(self.input_shape, self.n_neurons, num_layers, batch_first=True, bidirectional=True)
        self.fc= nn.Linear(self.n_neurons, self.output_shape)
    def forward(self, x):
        h0=torch.zeros(self.num_layers, x_size(0), self.n_neurons).to(device)
        c0=torch.zeros(self.num_layers, x_size(0), self.n_neurons).to(device)
        out, _=self.lstm(x,(h0, c0))
        out=self.fc(out[:, -1, :])
        return self.fc(x)

    def training_step(self, batch, batch_idx):
        """training_step defined the train loop. It is independent of forward"""
        x, y = batch
        y_hat = self.fc(x).squeeze()
        y = y.squeeze()
        if self.output_shape > 1:
            y_hat = torch.log(y_hat)
        loss = self.loss(y_hat, y)
        self.log("train_loss", loss, on_epoch=True, on_step=False)
        return {"loss": loss}
    def validation_step(self, batch, batch_idx):
        """training_step defined the train loop. It is independent of forward"""
        x, y = batch
        y_hat = self.fc(x).squeeze()
        y = y.squeeze()
        if self.output_shape > 1:
            y_hat = torch.log(y_hat)
        loss = self.loss(y_hat, y)
        self.log("val_loss", loss, on_epoch=True, on_step=False)
        return {"val_loss": loss}
    def configure_optimizers(self):
        """(Optional) Configure training optimizers."""
        return torch.optim.Adam(self.parameters(),lr=self.lr)
    def compute_class_weight(self, y: np.array, n_class: int):
        """Compute class weight for binary/multiple classification
        If n_class=2, only compute weights for the positve class.
        If n>2, compute for all classes.
        Args:
            y ([np.array]):vector of int represented the class
            n_class (int) : number fo class to use
        """
        if n_class == 2:
            class_count: typing.Counter = Counter(y)
            cond_binary = (0 in class_count) and (1 in class_count)
            assert cond_binary, "Must have O and 1 class for binary classification"
            weight = class_count[0] / class_count[1]
        else:
            weight = compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y)
        return torch.tensor(weight).float()
    def fit(
        self,
        x: np.ndarray,
        y: np.array,
        epochs: int = 10,
        batch_size: int = 32,
        class_weight: Optional[str] = None,
        validation_data: bool = True, 
        **kwargs
    ):
        assert isinstance(x, np.ndarray), "X should be a numpy array"
        assert isinstance(y, np.ndarray), "y should be a numpy array"
        assert class_weight in (
            None,
            "balanced",
        ), "the only choice available for class_weight is 'balanced'"
        n_class = len(np.unique(y))
        weight = None
        self.input_shape = x.shape[1]
        self.output_shape = 1 if n_class <= 2 else n_class
        self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
        if class_weight == "balanced":
            weight = self.compute_class_weight(y, n_class)
        self.loss = nn.NLLLoss(weight) if self.output_shape > 1 else nn.BCELoss(weight)
        if validation_data:
            x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2)
            train_loader = classification_dataloader_from_numpy(
                x_train, y_train, batch_size=batch_size
            )
            val_loader = classification_dataloader_from_numpy(x_val, y_val, batch_size=batch_size)
        else:
            train_loader = classification_dataloader_from_numpy(x, y, batch_size=batch_size)
            val_loader = None
        self.trainer = pl.Trainer(max_epochs=epochs, **kwargs)
        self.trainer.fit(self, train_loader, val_loader)
    def predict(self, x):
        """Run inference on data."""
        if self.output_shape is None:
            log.warning("Model is not fitted. Can't do predict")
            return
        return self.forward(x).detach().numpy()
    def save(self, path: str):
        """Save the state dict model with torch"""
        torch.save(self.fc.state_dict(), path)
        log.info("Save state_dict parameters in model.pt")
    def load_state_dict(self, state_dict: "OrderedDict[str, Tensor]", strict: bool = False):
        """Load state_dict saved parameters
        Args:
            state_dict (OrderedDict[str, Tensor]): state_dict tensor
            strict (bool, optional): [description]. Defaults to False.
        """
        self.fc.load_state_dict(state_dict, strict=strict)
        self.fc.eval()

    mlp = RNN(input_shape=1024, n_neurons=1024, num_layers=2, n_class=2)
mlp.fit(embeddings_train, np.array(y_train),validation_data=(embeddings_test, np.array(y_test)), epochs=30)
mlp.save("model.pt")

这些是发生的错误。我真的需要帮助,我会随时为您提供更多信息。

错误 1

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-154-e5fde11a675c> in <module>
      1 # init MLP model, train it on the data, then save model
      2 mlp = RNN(input_shape=1024, n_neurons=1024, num_layers=2, n_class=2)
----> 3 mlp.fit(embeddings_train, np.array(y_train),validation_data=(embeddings_test, np.array(y_test)), epochs=30)
      4 mlp.save("model.pt")

<ipython-input-153-a8d51af53bb5> in fit(self, x, y, epochs, batch_size, class_weight, validation_data, **kwargs)
    134             val_loader = None
    135         self.trainer = pl.Trainer(max_epochs=epochs, **kwargs)
--> 136         self.trainer.fit(self, train_loader, val_loader)
    137     def predict(self, x):
    138         """Run inference on data."""

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
    456         )
    457 
--> 458         self._run(model)
    459 
    460         assert self.state.stopped

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model)
    754 
    755         # dispatch `start_training` or `start_evaluating` or `start_predicting`
--> 756         self.dispatch()
    757 
    758         # plugin will finalized fitting (e.g. ddp_spawn will load trained model)

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in dispatch(self)
    795             self.accelerator.start_predicting(self)
    796         else:
--> 797             self.accelerator.start_training(self)
    798 
    799     def run_stage(self):

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in start_training(self, trainer)
     94 
     95     def start_training(self, trainer: 'pl.Trainer') -> None:
---> 96         self.training_type_plugin.start_training(trainer)
     97 
     98     def start_evaluating(self, trainer: 'pl.Trainer') -> None:

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
    142     def start_training(self, trainer: 'pl.Trainer') -> None:
    143         # double dispatch to initiate the training loop
--> 144         self._results = trainer.run_stage()
    145 
    146     def start_evaluating(self, trainer: 'pl.Trainer') -> None:

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
    805         if self.predicting:
    806             return self.run_predict()
--> 807         return self.run_train()
    808 
    809     def _pre_training_routine(self):

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_train(self)
    840             self.progress_bar_callback.disable()
    841 
--> 842         self.run_sanity_check(self.lightning_module)
    843 
    844         self.checkpoint_connector.has_trained = False

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
   1105 
   1106             # run eval step
-> 1107             self.run_evaluation()
   1108 
   1109             self.on_sanity_check_end()

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, on_epoch)
    960                 # lightning module methods
    961                 with self.profiler.profile("evaluation_step_and_end"):
--> 962                     output = self.evaluation_loop.evaluation_step(batch, batch_idx, dataloader_idx)
    963                     output = self.evaluation_loop.evaluation_step_end(output)
    964 

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, batch, batch_idx, dataloader_idx)
    172             model_ref._current_fx_name = "validation_step"
    173             with self.trainer.profiler.profile("validation_step"):
--> 174                 output = self.trainer.accelerator.validation_step(args)
    175 
    176         # capture any logged information

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self, args)
    224 
    225         with self.precision_plugin.val_step_context(), self.training_type_plugin.val_step_context():
--> 226             return self.training_type_plugin.validation_step(*args)
    227 
    228     def test_step(self, args: List[Union[Any, int]]) -> Optional[STEP_OUTPUT]:

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self, *args, **kwargs)
    159 
    160     def validation_step(self, *args, **kwargs):
--> 161         return self.lightning_module.validation_step(*args, **kwargs)
    162 
    163     def test_step(self, *args, **kwargs):

<ipython-input-153-a8d51af53bb5> in validation_step(self, batch, batch_idx)
     78         if self.output_shape > 1:
     79             y_hat = torch.log(y_hat)
---> 80         loss = self.loss(y_hat, y)
     81         self.log("val_loss", loss, on_epoch=True, on_step=False)
     82         return {"val_loss": loss}

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
    611     def forward(self, input: Tensor, target: Tensor) -> Tensor:
    612         assert self.weight is None or isinstance(self.weight, Tensor)
--> 613         return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
    614 
    615 

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/functional.py in binary_cross_entropy(input, target, weight, size_average, reduce, reduction)
   2760         weight = weight.expand(new_size)
   2761 
-> 2762     return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)
   2763 
   2764 

RuntimeError: all elements of input should be between 0 and 1

错误 2


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-139-b7e8b13763ef> in <module>
      1 # Model evaluation
----> 2 y_pred = mlp(embeddings_val).squeeze().detach().numpy()
      3 model_evaluation_accuracy(np.array(y_val), y_pred)

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

<ipython-input-136-e2fc535640ab> in forward(self, x)
     55         self.fc= nn.Linear(self.hidden_size, self.output_shape)
     56     def forward(self, x):
---> 57         h0=torch.zeros(self.num_layers, x_size(0), self.hidden_size).to(device)
     58         c0=torch.zeros(self.num_layers, x_size(0), self.hidden_size).to(device)
     59         out, _=self.lstm(x,(h0, c0))

NameError: name 'x_size' is not defined

我将此添加为答案,因为很难发表评论。

您遇到的主要问题是 BCE 丢失。 IIRC BCE 损失期望 p(y=1),所以你的输出应该在 0 和 1 之间。如果你想使用 logits(它在数值上也更稳定),你应该使用 BinaryCrossEntropyWithLogits.

正如您在其中一条评论中提到的,您使用的是 sigmoid 激活函数,但我不喜欢您的前向函数。主要是你forward函数的最后一行是

        return self.fc(x)

这不使用 sigmoid 激活。此外,您仅使用输入 x 来生成输出。 LSTM 输出只是被丢弃了?我认为,添加一些打印语句或断点以确保中间输出符合您的预期是个好主意。

我收到错误 RuntimeError: all elements of input should be between 0 and 1 因为我的 x 数据有 NaN 项。