Pytorch 的线性回归:常数损失
Linear Regression with Pytorch : constant loss
我正在使用 Pytorch 处理线性回归问题(y=A*x,其中 A 的维度为 2x2)。我写了下面的代码。我不知道为什么损失没有改变...有人可以帮助我吗?
谢谢,
托马斯
import torch
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
from torch.autograd import Variable
def EDP(X,t):
X_0=-2*X[0]
X_1=-2*X[1]
grad=np.array([X_0,X_1])
return grad
T=np.arange(0,10,0.1)
X_train=odeint(EDP,[10,20],T)
Y_train=np.zeros_like(X_train)
for i in range(Y_train.shape[0]):
Y_train[i,:]=np.dot(np.array([[2,0],[0,2]]),X_train[i,:])
print(X_train,Y_train)
X_train=torch.Tensor(X_train)
torch.transpose(X_train,0,1)
Y_train=torch.Tensor(Y_train)
print(X_train.shape)
import torch.nn as nn
class LinearRegression(torch.nn.Module):
def __init__(self):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(2,2,bias = False) # bias is default True
def forward(self, x):
y_pred = self.linear(x)
return y_pred
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(our_model.parameters(), lr = 0.0001)
our_model = LinearRegression()
x_train = X_train
y_train = Y_train
#x_train.requires_grad=True
print(x_train.shape)
print(y_train.shape)
ntrain=10
for t in range(ntrain):
y_pred=our_model(x_train)
loss=criterion(y_train,y_pred)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(t,loss)
print(our_model.linear.weight)
在我的笔记本电脑上它工作...
因为你 运行 它只在 10 epochs
... 并且使用 lr = 0.0001
,你不会在 10 epochs
.
中看到它
我这样做了 optimizer = torch.optim.SGD(our_model.parameters(), lr = 0.01)
(增加了 lr
)实际上只用了 10 个周期就减少了损失
我正在使用 Pytorch 处理线性回归问题(y=A*x,其中 A 的维度为 2x2)。我写了下面的代码。我不知道为什么损失没有改变...有人可以帮助我吗?
谢谢,
托马斯
import torch
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
from torch.autograd import Variable
def EDP(X,t):
X_0=-2*X[0]
X_1=-2*X[1]
grad=np.array([X_0,X_1])
return grad
T=np.arange(0,10,0.1)
X_train=odeint(EDP,[10,20],T)
Y_train=np.zeros_like(X_train)
for i in range(Y_train.shape[0]):
Y_train[i,:]=np.dot(np.array([[2,0],[0,2]]),X_train[i,:])
print(X_train,Y_train)
X_train=torch.Tensor(X_train)
torch.transpose(X_train,0,1)
Y_train=torch.Tensor(Y_train)
print(X_train.shape)
import torch.nn as nn
class LinearRegression(torch.nn.Module):
def __init__(self):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(2,2,bias = False) # bias is default True
def forward(self, x):
y_pred = self.linear(x)
return y_pred
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(our_model.parameters(), lr = 0.0001)
our_model = LinearRegression()
x_train = X_train
y_train = Y_train
#x_train.requires_grad=True
print(x_train.shape)
print(y_train.shape)
ntrain=10
for t in range(ntrain):
y_pred=our_model(x_train)
loss=criterion(y_train,y_pred)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(t,loss)
print(our_model.linear.weight)
在我的笔记本电脑上它工作...
因为你 运行 它只在 10 epochs
... 并且使用 lr = 0.0001
,你不会在 10 epochs
.
我这样做了 optimizer = torch.optim.SGD(our_model.parameters(), lr = 0.01)
(增加了 lr
)实际上只用了 10 个周期就减少了损失