ANN 训练不准确,因为我没有得到更好的损失减少
ANN not training accurately as i am not getting a better loss reduction
刚开始回归,我似乎没有做对,请问我做错了什么,因为我的损失没有减少。
import torch
from torch import nn
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_excel('Folds5x2_pp.xlsx')
x = df.iloc[:,:-1].values
y = df.iloc[:,-1].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
class ANN(nn.Module):
def __init__(self, input, output):
super(ANN, self).__init__()
self.fc1 = nn.Linear(input, 6)
self.r1 = nn.ReLU()
self.fc2 = nn.Linear(6, output)
def forward(self, x):
return self.fc2(self.r1(self.fc1(x)))
f, s = x.shape
ann = ANN(s, 1)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(ann.parameters(), lr=0.01)
x = torch.from_numpy(x_train.astype(np.float32))
y = torch.from_numpy(y_train.astype(np.float32))
for i in range(100):
y_pred = ann(x)
loss = criterion(y_pred, y)
print(f"i: {i}, loss: {loss.item()}")
loss.backward()
optimizer.step()
optimizer.zero_grad()
你应该把optimizer.zero_grad()放在第一位,因为如果你不把它归零的话,梯度将相对于前一批数据。
像这样:
for i in range(100):
y_pred = ann(x)
loss = criterion(y_pred, y)
print(f"i: {i}, loss: {loss.item()}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
刚开始回归,我似乎没有做对,请问我做错了什么,因为我的损失没有减少。
import torch
from torch import nn
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_excel('Folds5x2_pp.xlsx')
x = df.iloc[:,:-1].values
y = df.iloc[:,-1].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
class ANN(nn.Module):
def __init__(self, input, output):
super(ANN, self).__init__()
self.fc1 = nn.Linear(input, 6)
self.r1 = nn.ReLU()
self.fc2 = nn.Linear(6, output)
def forward(self, x):
return self.fc2(self.r1(self.fc1(x)))
f, s = x.shape
ann = ANN(s, 1)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(ann.parameters(), lr=0.01)
x = torch.from_numpy(x_train.astype(np.float32))
y = torch.from_numpy(y_train.astype(np.float32))
for i in range(100):
y_pred = ann(x)
loss = criterion(y_pred, y)
print(f"i: {i}, loss: {loss.item()}")
loss.backward()
optimizer.step()
optimizer.zero_grad()
你应该把optimizer.zero_grad()放在第一位,因为如果你不把它归零的话,梯度将相对于前一批数据。
像这样:
for i in range(100):
y_pred = ann(x)
loss = criterion(y_pred, y)
print(f"i: {i}, loss: {loss.item()}")
optimizer.zero_grad()
loss.backward()
optimizer.step()