一旦 batchsize 设置为 > 1,网络就会停止学习
Network stops learning once batchsize is set to > 1
今天我开始从 Keras 切换到 Pytorch,并尝试了一些简单的前馈网络。它应该学习平方运算,即 f(x) = x^2。但是,如果我将 batchsize 设置为 1,我的网络只会合理地学习。任何其他 batchsize 都会产生非常糟糕的结果。我还尝试了 1 到 0.0001 之间的不同学习率,看看这是否以某种方式修复了它,还测试了对网络的一些更改,但无济于事。谁能告诉我我做错了什么,即为什么一旦我将 batchsize 设置为大于 1 的任何值,我的网络就无法学习?在下面找到一个最小的工作示例。感谢您的帮助!
import numpy as np
from random import randint
import random
import time
from multiprocessing import Pool
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
class SquareDataset(Dataset):
def __init__(self, num_samples):
super(Dataset, self).__init__()
self.num_samples = num_samples
self.train = [None] * num_samples
self.target = [None] * num_samples
for i in range(0, num_samples):
self.train[i] = random.random() * randint(1, 10)
self.target[i] = self.train[i] ** 2
def __len__(self):
return self.num_samples
def __getitem__(self, index):
return self.train[index], self.target[index]
def trainNetwork(epochs=50):
data_train = SquareDataset(num_samples=1000)
data_train_loader = DataLoader(data_train, batch_size=1, shuffle=False)
model = nn.Sequential(nn.Linear(1, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, 1))
# Define the loss
criterion = nn.MSELoss()
# Optimizers require the parameters to optimize and a learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for e in range(epochs):
running_loss = 0
for number, labels in data_train_loader:
optimizer.zero_grad()
number = number.view(number.size(0), -1)
output = model(number.float())
loss = criterion(output, labels.float())
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(data_train_loader)}")
# some test outputs
sample = torch.tensor([0.2])
out = model(sample.float())
print("Out:")
print(out.item())
sample = torch.tensor([1])
out = model(sample.float())
print("Out:")
print(out.item())
trainNetwork()
在线 loss = criterion(output, labels.float())
第一个张量的形状为 (batch_size, 1)
而 labels
的形状为 (batch_size, )
。因此,当 batch_size > 1
发生广播并导致错误 objective 时,情况类似于 。为了克服问题重写损失线但具有相同的形状,例如:
loss = criterion(output.squeeze(-1), labels.float())
今天我开始从 Keras 切换到 Pytorch,并尝试了一些简单的前馈网络。它应该学习平方运算,即 f(x) = x^2。但是,如果我将 batchsize 设置为 1,我的网络只会合理地学习。任何其他 batchsize 都会产生非常糟糕的结果。我还尝试了 1 到 0.0001 之间的不同学习率,看看这是否以某种方式修复了它,还测试了对网络的一些更改,但无济于事。谁能告诉我我做错了什么,即为什么一旦我将 batchsize 设置为大于 1 的任何值,我的网络就无法学习?在下面找到一个最小的工作示例。感谢您的帮助!
import numpy as np
from random import randint
import random
import time
from multiprocessing import Pool
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
class SquareDataset(Dataset):
def __init__(self, num_samples):
super(Dataset, self).__init__()
self.num_samples = num_samples
self.train = [None] * num_samples
self.target = [None] * num_samples
for i in range(0, num_samples):
self.train[i] = random.random() * randint(1, 10)
self.target[i] = self.train[i] ** 2
def __len__(self):
return self.num_samples
def __getitem__(self, index):
return self.train[index], self.target[index]
def trainNetwork(epochs=50):
data_train = SquareDataset(num_samples=1000)
data_train_loader = DataLoader(data_train, batch_size=1, shuffle=False)
model = nn.Sequential(nn.Linear(1, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, 1))
# Define the loss
criterion = nn.MSELoss()
# Optimizers require the parameters to optimize and a learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for e in range(epochs):
running_loss = 0
for number, labels in data_train_loader:
optimizer.zero_grad()
number = number.view(number.size(0), -1)
output = model(number.float())
loss = criterion(output, labels.float())
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(data_train_loader)}")
# some test outputs
sample = torch.tensor([0.2])
out = model(sample.float())
print("Out:")
print(out.item())
sample = torch.tensor([1])
out = model(sample.float())
print("Out:")
print(out.item())
trainNetwork()
在线 loss = criterion(output, labels.float())
第一个张量的形状为 (batch_size, 1)
而 labels
的形状为 (batch_size, )
。因此,当 batch_size > 1
发生广播并导致错误 objective 时,情况类似于
loss = criterion(output.squeeze(-1), labels.float())