如何在 nn.LSTM pytorch 中取得 R2 分数
How to make R2 score in nn.LSTM pytorch
我试图用 R2in 制作损失函数 nn.LSTM 但我找不到任何关于它的文档。我已经使用了 pytorch 的 RMSE 和 MAE 损失。
我的数据是一个时间序列,我在做时间序列预测
这是我在数据训练中使用 RMSE 损失函数的代码
model = LSTM_model(input_size=1, output_size=1, hidden_size=512, num_layers=2, dropout=0).to(device)
criterion = nn.MSELoss(reduction="sum")
optimizer = optim.Adam(model.parameters(), lr=0.001)
callback = Callback(model, early_stop_patience=10 ,outdir="model/lstm", plot_every=20,)
from tqdm.auto import tqdm
def loop_fn(mode, dataset, dataloader, model, criterion, optimizer,device):
if mode =="train":
model.train()
elif mode =="test":
model.eval()
cost = 0
for feature, target in tqdm(dataloader, desc=mode.title()):
feature, target = feature.to(device), target.to(device)
output , hidden = model(feature,None)
loss = torch.sqrt(criterion(output,target))
if mode =="train":
loss.backward()
optimizer.step()
optimizer.zero_grad()
cost += loss.item() * feature.shape[0]
cost = cost / len(dataset)
return cost
这是开始数据训练的代码
while True :
train_cost = loop_fn("train", train_set, trainloader, model, criterion, optimizer,device)
with torch.no_grad():
test_cost = loop_fn("test", test_set, testloader, model, criterion, optimizer,device)
callback.log(train_cost, test_cost)
callback.save_checkpoint()
callback.cost_runtime_plotting()
if callback.early_stopping(model, monitor="test_cost"):
callback.plot_cost()
break
谁能帮我算一下R2损失函数?提前谢谢你
这是一个实现,
"""
From https://en.wikipedia.org/wiki/Coefficient_of_determination
"""
def r2_loss(output, target):
target_mean = torch.mean(target)
ss_tot = torch.sum((target - target_mean) ** 2)
ss_res = torch.sum((target - output) ** 2)
r2 = 1 - ss_res / ss_tot
return r2
您可以如下使用,
loss = r2_loss(output, target)
loss.backward()
我试图用 R2in 制作损失函数 nn.LSTM 但我找不到任何关于它的文档。我已经使用了 pytorch 的 RMSE 和 MAE 损失。
我的数据是一个时间序列,我在做时间序列预测
这是我在数据训练中使用 RMSE 损失函数的代码
model = LSTM_model(input_size=1, output_size=1, hidden_size=512, num_layers=2, dropout=0).to(device)
criterion = nn.MSELoss(reduction="sum")
optimizer = optim.Adam(model.parameters(), lr=0.001)
callback = Callback(model, early_stop_patience=10 ,outdir="model/lstm", plot_every=20,)
from tqdm.auto import tqdm
def loop_fn(mode, dataset, dataloader, model, criterion, optimizer,device):
if mode =="train":
model.train()
elif mode =="test":
model.eval()
cost = 0
for feature, target in tqdm(dataloader, desc=mode.title()):
feature, target = feature.to(device), target.to(device)
output , hidden = model(feature,None)
loss = torch.sqrt(criterion(output,target))
if mode =="train":
loss.backward()
optimizer.step()
optimizer.zero_grad()
cost += loss.item() * feature.shape[0]
cost = cost / len(dataset)
return cost
这是开始数据训练的代码
while True :
train_cost = loop_fn("train", train_set, trainloader, model, criterion, optimizer,device)
with torch.no_grad():
test_cost = loop_fn("test", test_set, testloader, model, criterion, optimizer,device)
callback.log(train_cost, test_cost)
callback.save_checkpoint()
callback.cost_runtime_plotting()
if callback.early_stopping(model, monitor="test_cost"):
callback.plot_cost()
break
谁能帮我算一下R2损失函数?提前谢谢你
这是一个实现,
"""
From https://en.wikipedia.org/wiki/Coefficient_of_determination
"""
def r2_loss(output, target):
target_mean = torch.mean(target)
ss_tot = torch.sum((target - target_mean) ** 2)
ss_res = torch.sum((target - output) ** 2)
r2 = 1 - ss_res / ss_tot
return r2
您可以如下使用,
loss = r2_loss(output, target)
loss.backward()