Pytorch RuntimeError: expected scalar type Double but found Float

Pytorch RuntimeError: expected scalar type Double but found Float

我刚开始学习 Pytorch 并创建了我的第一个 LSTM。数据集是时间序列数据。下面是训练代码。 使用 .double() 不能修复错误。 在 Windows 11 环境中是 运行。

import torch
import torch.nn as nn
from torch.optim import SGD
import math
import numpy as np

class Predictor(nn.Module):
    def __init__(self, inputDim, hiddenDim, outputDim):
        super(Predictor, self).__init__()

        self.rnn = nn.LSTM(input_size = inputDim,
                            hidden_size = hiddenDim,
                            batch_first = True)
        self.output_layer = nn.Linear(hiddenDim, outputDim)
    
    def forward(self, inputs, hidden0=None):
        output, (hidden, cell) = self.rnn(inputs, hidden0)
        output = self.output_layer(output[:, -1, :])

        return output

def mkDataSet(train_x, train_y=None):

    t_train_x = []
    t_train_y = []


    sequence_length = 50
    data_length = train_x.shape[0]

    for offset in range(data_length-sequence_length):
        t_train_x.append([train_x.iloc[offset + i] for i in range(sequence_length)])
        try:
            t_train_y.append([train_y.iloc[offset + i] for i in range(sequence_length)])
        except:
            pass

    return t_train_x, t_train_y

def mkRandomBatch(train_x, train_t, batch_size=10):
    batch_x = []
    batch_t = []

    for _ in range(batch_size):
        idx = np.random.randint(0, len(train_x) - 1)
        batch_x.append(train_x[idx])
        try:
            batch_t.append(train_t[idx])
        except:
            pass
    
    return torch.tensor(batch_x), torch.tensor(batch_t)

def main(train_x=train_x, train_y=train_y, test_x=test_x):
    training_size = 10000
    test_size = 1000
    epochs_num = 1000
    hidden_size = 5
    batch_size = 100

    train_x_origin, train_y_origin, test_x_origin = train_x.copy(), train_y.copy(), test_x.copy()

    train_x, train_t = mkDataSet(train_x, train_y)
    test_x = mkDataSet(test_x)

    model = Predictor(train_x_origin.shape[1], hidden_size, 1)
    criterion = nn.MSELoss()
    optimizer = SGD(model.parameters(), lr=0.01)

    for epoch in range(epochs_num):
        # training
        running_loss = 0.0
        training_accuracy = 0.0
        for i in range(int(training_size / batch_size)):
            optimizer.zero_grad()

            data, label = mkRandomBatch(train_x, train_t, batch_size)

            output = model(data)

            loss = criterion(output, label)
            loss.backward()
            optimizer.step()

            running_loss += loss.data[0]
            training_accuracy += np.sum(np.abs((output.data - label.data).numpy()) < 0.1)

        #test
        test_accuracy = 0.0
        for i in range(int(test_size / batch_size)):
            offset = i * batch_size
            data, label = torch.tensor(test_x[offset:offset+batch_size])
            output = model(data, None)
        
        training_accuracy /= training_size

        print('%d loss: %.3f, training_accuracy: %.5f, test_accuracy: %.5f' % (
            epoch + 1, running_loss, training_accuracy))


if __name__ == '__main__':
    main(train_x, train_y, test_x)

然后我有这个错误:

RuntimeError                              Traceback (most recent call last)
.ipynb Cell 26' in <cell line: 113>()
    109         print('%d loss: %.3f, training_accuracy: %.5f, test_accuracy: %.5f' % (
    110             epoch + 1, running_loss, training_accuracy))
    113 if __name__ == '__main__':
--> 114     main(train_x, train_y, test_x)

.ipynb Cell 26' in main(train_x, train_y, test_x)
     87 optimizer.zero_grad()
     89 data, label = mkRandomBatch(train_x, train_t, batch_size)
---> 91 output = model(data)
     93 loss = criterion(output, label)
     94 loss.backward()

File ~\Documents\lib\site-packages\torch\nn\modules\module.py:1110, in Module._call_impl(self, *input, **kwargs)
   1106 # If we don't have any hooks, we want to skip the rest of the logic in
   1107 # this function, and just call forward.
   1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110     return forward_call(*input, **kwargs)
   1111 # Do not call functions when jit is used
   1112 full_backward_hooks, non_full_backward_hooks = [], []

.ipynb Cell 26' in Predictor.forward(self, inputs, hidden0)
     16 def forward(self, inputs, hidden0=None):
---> 17     output, (hidden, cell) = self.rnn(inputs, hidden0)
     18     output = self.output_layer(output[:, -1, :])
     20     return output

File ~\Documents\lib\site-packages\torch\nn\modules\module.py:1110, in Module._call_impl(self, *input, **kwargs)
   1106 # If we don't have any hooks, we want to skip the rest of the logic in
   1107 # this function, and just call forward.
   1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110     return forward_call(*input, **kwargs)
   1111 # Do not call functions when jit is used
   1112 full_backward_hooks, non_full_backward_hooks = [], []

File ~\Documents\lib\site-packages\torch\nn\modules\rnn.py:761, in LSTM.forward(self, input, hx)
    759 self.check_forward_args(input, hx, batch_sizes)
    760 if batch_sizes is None:
--> 761     result = _VF.lstm(input, hx, self._flat_weights, self.bias, self.num_layers,
    762                       self.dropout, self.training, self.bidirectional, self.batch_first)
    763 else:
    764     result = _VF.lstm(input, batch_sizes, hx, self._flat_weights, self.bias,
    765                       self.num_layers, self.dropout, self.training, self.bidirectional)

RuntimeError: expected scalar type Double but found Float

我认为 'data' 有问题,我通过 print(type(data)):

打印它的类型
torch.Tensor

这是浮点数而不是双精度数。你知道哪里出了问题吗?感谢您的帮助!

您输入模型的数据是 Double 类型的张量,而模型需要一个浮点型张量。在 mkRandomBatch() 函数的最后一行执行此操作:

 return torch.tensor(batch_x).float(), torch.tensor(batch_t)

在损失计算过程中,标签张量可能会也可能不会出现类似的错误,在这种情况下,也请尝试将标签张量转换为浮点数。