RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target'
RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target'
我 运行 在计算我的神经网络的损失时遇到了问题。我不确定为什么程序需要一个长对象,因为我所有的张量都是浮点形式。我查看了具有类似错误的线程,解决方案是将张量转换为浮点数而不是长整数,但这在我的情况下不起作用,因为我的所有数据在传递到网络时都已经是浮点形式了。
这是我的代码:
# Dataloader
from torch.utils.data import Dataset, DataLoader
class LoadInfo(Dataset):
def __init__(self, prediction, indicator):
self.prediction = prediction
self.indicator = indicator
def __len__(self):
return len(self.prediction)
def __getitem__(self, idx):
data = torch.tensor(self.indicator.iloc[idx, :],dtype=torch.float)
data = torch.unsqueeze(data, 0)
label = torch.tensor(self.prediction.iloc[idx, :],dtype=torch.float)
sample = {'data': data, 'label': label}
return sample
# Trainloader
test_train = LoadInfo(train_label, train_indicators)
trainloader = DataLoader(test_train, batch_size=64,shuffle=True, num_workers=1,pin_memory=True)
# The Network
class NetDense2(nn.Module):
def __init__(self):
super(NetDense2, self).__init__()
self.rnn1 = nn.RNN(11, 100, 3)
self.rnn2 = nn.RNN(100, 500, 3)
self.fc1 = nn.Linear(500, 100)
self.fc2 = nn.Linear(100, 20)
self.fc3 = nn.Linear(20, 3)
def forward(self, x):
x1, h1 = self.rnn1(x)
x2, h2 = self.rnn2(x1)
x = F.relu(self.fc1(x2))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Allocate / Transfer to GPU
dense2 = NetDense2()
dense2.cuda()
# Optimizer
import torch.optim as optim
criterion = nn.CrossEntropyLoss() # specify the loss function
optimizer = optim.SGD(dense2.parameters(), lr=0.001, momentum=0.9,weight_decay=0.001)
# Training
dense2.train()
loss_memory = []
for epoch in range(50): # loop over the dataset multiple times
running_loss = 0.0
for i, samp in enumerate(trainloader):
# get the inputs
ins = samp['data']
targets = samp['label']
tmp = []
tmp = torch.squeeze(targets.float())
ins, targets = ins.cuda(), tmp.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = dense2(ins)
loss = criterion(outputs, targets) # The loss
loss.backward()
optimizer.step()
# keep track of loss
running_loss += loss.data.item()
我在“loss = criterion(outputs, targets)”行中收到上面的错误
根据 pytorch webpage 的文档和官方示例,传递给 nn.CrossEntropyLoss()
的目标应采用 torch.long 格式
# official example
import torch
import torch.nn as nn
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
# if you will replace the dtype=torch.float, you will get error
output = loss(input, target)
output.backward()
将代码中的这一行更新为
label = torch.tensor(self.prediction.iloc[idx, :],dtype=torch.long) #updated torch.float to torch.long
您的代码的小变通方法如下:
for epoch in range(50): # loop over the dataset multiple times
running_loss = 0.0
for i, samp in enumerate(trainloader):
# get the inputs
ins = samp['data']
targets = samp['label'].long() # HERE IS THE CHANGE <<---------------
tmp = []
tmp = torch.squeeze(targets.float())
ins, targets = ins.cuda(), tmp.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = dense2(ins)
loss = criterion(outputs, targets) # The loss
loss.backward()
optimizer.step()
# keep track of loss
running_loss += loss.data.item()
一个简单的修复,通过替换
对我有用
loss = criterion(outputs, targets)
和
loss = criterion(outputs, targets.long())
我 运行 在计算我的神经网络的损失时遇到了问题。我不确定为什么程序需要一个长对象,因为我所有的张量都是浮点形式。我查看了具有类似错误的线程,解决方案是将张量转换为浮点数而不是长整数,但这在我的情况下不起作用,因为我的所有数据在传递到网络时都已经是浮点形式了。
这是我的代码:
# Dataloader
from torch.utils.data import Dataset, DataLoader
class LoadInfo(Dataset):
def __init__(self, prediction, indicator):
self.prediction = prediction
self.indicator = indicator
def __len__(self):
return len(self.prediction)
def __getitem__(self, idx):
data = torch.tensor(self.indicator.iloc[idx, :],dtype=torch.float)
data = torch.unsqueeze(data, 0)
label = torch.tensor(self.prediction.iloc[idx, :],dtype=torch.float)
sample = {'data': data, 'label': label}
return sample
# Trainloader
test_train = LoadInfo(train_label, train_indicators)
trainloader = DataLoader(test_train, batch_size=64,shuffle=True, num_workers=1,pin_memory=True)
# The Network
class NetDense2(nn.Module):
def __init__(self):
super(NetDense2, self).__init__()
self.rnn1 = nn.RNN(11, 100, 3)
self.rnn2 = nn.RNN(100, 500, 3)
self.fc1 = nn.Linear(500, 100)
self.fc2 = nn.Linear(100, 20)
self.fc3 = nn.Linear(20, 3)
def forward(self, x):
x1, h1 = self.rnn1(x)
x2, h2 = self.rnn2(x1)
x = F.relu(self.fc1(x2))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Allocate / Transfer to GPU
dense2 = NetDense2()
dense2.cuda()
# Optimizer
import torch.optim as optim
criterion = nn.CrossEntropyLoss() # specify the loss function
optimizer = optim.SGD(dense2.parameters(), lr=0.001, momentum=0.9,weight_decay=0.001)
# Training
dense2.train()
loss_memory = []
for epoch in range(50): # loop over the dataset multiple times
running_loss = 0.0
for i, samp in enumerate(trainloader):
# get the inputs
ins = samp['data']
targets = samp['label']
tmp = []
tmp = torch.squeeze(targets.float())
ins, targets = ins.cuda(), tmp.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = dense2(ins)
loss = criterion(outputs, targets) # The loss
loss.backward()
optimizer.step()
# keep track of loss
running_loss += loss.data.item()
我在“loss = criterion(outputs, targets)”行中收到上面的错误
根据 pytorch webpage 的文档和官方示例,传递给 nn.CrossEntropyLoss()
的目标应采用 torch.long 格式
# official example
import torch
import torch.nn as nn
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
# if you will replace the dtype=torch.float, you will get error
output = loss(input, target)
output.backward()
将代码中的这一行更新为
label = torch.tensor(self.prediction.iloc[idx, :],dtype=torch.long) #updated torch.float to torch.long
您的代码的小变通方法如下:
for epoch in range(50): # loop over the dataset multiple times
running_loss = 0.0
for i, samp in enumerate(trainloader):
# get the inputs
ins = samp['data']
targets = samp['label'].long() # HERE IS THE CHANGE <<---------------
tmp = []
tmp = torch.squeeze(targets.float())
ins, targets = ins.cuda(), tmp.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = dense2(ins)
loss = criterion(outputs, targets) # The loss
loss.backward()
optimizer.step()
# keep track of loss
running_loss += loss.data.item()
一个简单的修复,通过替换
对我有用loss = criterion(outputs, targets)
和
loss = criterion(outputs, targets.long())