使用 Pytorch 进行线性回归

Linear regression using Pytorch

我有 class化问题。我正在使用 Pytorch,我的输入是长度为 341 的序列,输出是三个 classes {0,1,2} 之一,我想使用 pytorch 训练线性回归模型,我创建了以下 class但是在训练期间,损失值开始有数字,然后是 inf,然后是 NAN。我不知道如何解决这个问题。我也尝试初始化线性模型的权重,但这是一回事。任何建议。

class regression(nn.Module):
    def __init__(self, input_dim):
        super().__init__()
        self.input_dim = input_dim
        # One layer
        self.linear = nn.Linear(input_dim, 1)

    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred

criterion = torch.nn.MSELoss()

def fit(model, data_loader, optim, epochs):
    for epoch in range(epochs):

        for i, (X, y) in enumerate(data_loader):
            X = X.float()
            y = y.unsqueeze(1).float()
            X = Variable(X, requires_grad=True)
            y = Variable(y, requires_grad=True)
            # Make a prediction for the input X
            pred = model(X)
            #loss = (y-pred).pow(2).mean()
            loss = criterion(y, pred)
            optim.zero_grad()
            loss.backward()
            optim.step()
            print(loss)
            print(type(loss))
        # Give some feedback after each 5th pass through the data
        if epoch % 5 == 0:
            print("Epoch", epoch, f"loss: {loss}")
    return None
regnet = regression(input_dim=341)
optim = SGD(regnet.parameters(), lr=0.01)
fit(regnet, data_loader, optim=optim, epochs=5)
pred = regnet(torch.Tensor(test_set.data_info).float())
pred = pred.detach().numpy()

因为我的声望值,如果我是你,我不能comment.so。我要这样构建:我认为你制作模块的方法有问题。

class regression(nn.Module):
    def __init__(self,input_dim,output_dim):
        super(regression,self).__init__()
        #function
        self.linear=nn.Linear(input_dim,output_dim)

    def forward(self,x):
        return self.linear(x)
#define the model
input_dim=341
output_dim=3
model=LinearRegression(input_dim,output_dim) 

# Mean square error
mse=nn.MSELoss()

#Optimization
learning_rate=0.01
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)

#train the model
loss_list=[]
iteration_number=X
for iteration in range(iteration_number):
    #optimiziation
    optimizer.zero_grad()

    #forward to get output
    results=model("input_datas_tensor") 
    
   
    #loss calculate
    loss=mse(results,"outputs_datas_tensor")

    #backward propagation
    loss.backward()
    #updating parameters
    optimizer.step()
    #store loss
    loss_list.append(loss.data)
    
    if(iteration  %5==0):
        print("epoch{} ,loss{}".format(iteration,loss.data))

我还建议用交叉熵损失替换 MSE,因为它更适合多class class化问题。

import random
import torch
from torch import nn, optim
from matplotlib import pyplot as plt

# Generate random dataset with your shape to test
# Replace this with your own dataset
data = []
for label in [0, 1, 2]:
    for i in range(1000):
        data.append((torch.rand(341), label))

# train test split
random.shuffle(data)
train, val = data[:1500], data[1500:]    


 def run_gradient_descent(model, data_train, data_val, batch_size=64, learning_rate=0.01, weight_decay=0, num_epochs=10):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    iters, losses = [], []
    iters_sub, train_acc, val_acc = [], [] ,[]
    train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True)

    # training
    n = 0 # the number of iterations
    for epoch in range(num_epochs):
        for xs, ts in iter(train_loader):
            if len(ts) != batch_size:
                continue
            zs = model(xs)
            loss = criterion(zs, ts) # compute the total loss
            loss.backward() # compute updates for each parameter
            optimizer.step() # make the updates for each parameter
            optimizer.zero_grad() # a clean up step for PyTorch
            # save the current training information
            iters.append(n)
            losses.append(float(loss)/batch_size) # compute *average* loss
            if n % 10 == 0:
                iters_sub.append(n)
                train_acc.append(get_accuracy(model, data_train))
                val_acc.append(get_accuracy(model, data_val))
             # increment the iteration number
            n += 1

    # plotting
    plt.title("Training Curve (batch_size={}, lr={})".format(batch_size, learning_rate))
    plt.plot(iters, losses, label="Train")
    plt.xlabel("Iterations")
    plt.ylabel("Loss")
    plt.show()
    plt.title("Training Curve (batch_size={}, lr={})".format(batch_size, learning_rate))
    plt.plot(iters_sub, train_acc, label="Train")
    plt.plot(iters_sub, val_acc, label="Validation")
    plt.xlabel("Iterations")
    plt.ylabel("Accuracy")
    plt.legend(loc='best')
    plt.show()
    return model


def get_accuracy(model, data):
    loader = torch.utils.data.DataLoader(data, batch_size=500)
    correct, total = 0, 0
    for xs, ts in loader:
        zs = model(xs)
        pred = zs.max(1, keepdim=True)[1] # get the index of the max logit
        correct += pred.eq(ts.view_as(pred)).sum().item()
        total += int(ts.shape[0])
    return correct / total


class MyRegression(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(MyRegression, self).__init__()
        # One layer
        self.linear = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        return self.linear(x)


model = MyRegression(341, 3)
run_gradient_descent(model, train, val, batch_size=64, learning_rate=0.01, num_epochs=10)