How to make R2 score in nn.LSTM pytorch
Question:
I tried to make loss function with R2in nn.LSTM but i couldnt find any documentation about it . I already use RMSE and MAE loss from pytorch.
My data is a time series and im doing time series forecasting
This is the code where i use the loss function of RMSE in data training
model = LSTM_model(input_size=1, output_size=1, hidden_size=512, num_layers=2, dropout=0).to(device)
criterion = nn.MSELoss(reduction="sum")
optimizer = optim.Adam(model.parameters(), lr=0.001)
callback = Callback(model, early_stop_patience=10 ,outdir="model/lstm", plot_every=20,)
from tqdm.auto import tqdm
def loop_fn(mode, dataset, dataloader, model, criterion, optimizer,device):
if mode =="train":
model.train()
elif mode =="test":
model.eval()
cost = 0
for feature, target in tqdm(dataloader, desc=mode.title()):
feature, target = feature.to(device), target.to(device)
output , hidden = model(feature,None)
loss = torch.sqrt(criterion(output,target))
if mode =="train":
loss.backward()
optimizer.step()
optimizer.zero_grad()
cost += loss.item() * feature.shape[0]
cost = cost / len(dataset)
return cost
And this is the code to start data training
while True :
train_cost = loop_fn("train", train_set, trainloader, model, criterion, optimizer,device)
with torch.no_grad():
test_cost = loop_fn("test", test_set, testloader, model, criterion, optimizer,device)
callback.log(train_cost, test_cost)
callback.save_checkpoint()
callback.cost_runtime_plotting()
if callback.early_stopping(model, monitor="test_cost"):
callback.plot_cost()
break
Can anyone help me with the R2 loss function ? Thank you in advance
Answers:
Please see the comments and tilmo’s solution below.
Here is an implemention,
"""
From https://en.wikipedia.org/wiki/Coefficient_of_determination
"""
def r2_loss(output, target):
target_mean = torch.mean(target)
ss_tot = torch.sum((target - target_mean) ** 2)
ss_res = torch.sum((target - output) ** 2)
r2 = 1 - ss_res / ss_tot
return r2
You can use it as below,
loss = r2_loss(output, target)
loss.backward()
The following library function already implements the comments I have made on Melike’s solution:
from torchmetrics.functional import r2_score
loss = r2_score(output, target)
loss.backward()
I tried to make loss function with R2in nn.LSTM but i couldnt find any documentation about it . I already use RMSE and MAE loss from pytorch.
My data is a time series and im doing time series forecasting
This is the code where i use the loss function of RMSE in data training
model = LSTM_model(input_size=1, output_size=1, hidden_size=512, num_layers=2, dropout=0).to(device)
criterion = nn.MSELoss(reduction="sum")
optimizer = optim.Adam(model.parameters(), lr=0.001)
callback = Callback(model, early_stop_patience=10 ,outdir="model/lstm", plot_every=20,)
from tqdm.auto import tqdm
def loop_fn(mode, dataset, dataloader, model, criterion, optimizer,device):
if mode =="train":
model.train()
elif mode =="test":
model.eval()
cost = 0
for feature, target in tqdm(dataloader, desc=mode.title()):
feature, target = feature.to(device), target.to(device)
output , hidden = model(feature,None)
loss = torch.sqrt(criterion(output,target))
if mode =="train":
loss.backward()
optimizer.step()
optimizer.zero_grad()
cost += loss.item() * feature.shape[0]
cost = cost / len(dataset)
return cost
And this is the code to start data training
while True :
train_cost = loop_fn("train", train_set, trainloader, model, criterion, optimizer,device)
with torch.no_grad():
test_cost = loop_fn("test", test_set, testloader, model, criterion, optimizer,device)
callback.log(train_cost, test_cost)
callback.save_checkpoint()
callback.cost_runtime_plotting()
if callback.early_stopping(model, monitor="test_cost"):
callback.plot_cost()
break
Can anyone help me with the R2 loss function ? Thank you in advance
Please see the comments and tilmo’s solution below.
Here is an implemention,
"""
From https://en.wikipedia.org/wiki/Coefficient_of_determination
"""
def r2_loss(output, target):
target_mean = torch.mean(target)
ss_tot = torch.sum((target - target_mean) ** 2)
ss_res = torch.sum((target - output) ** 2)
r2 = 1 - ss_res / ss_tot
return r2
You can use it as below,
loss = r2_loss(output, target)
loss.backward()
The following library function already implements the comments I have made on Melike’s solution:
from torchmetrics.functional import r2_score
loss = r2_score(output, target)
loss.backward()