Pytorch: RuntimeError: result type Float can't be cast to the desired output type Long

Question:

I have a model which looks as follows:

IMG_WIDTH = IMG_HEIGHT = 224

class AlexNet(nn.Module):
  def __init__(self, output_dim):
    super(AlexNet, self).__init__()
    self._to_linear = None
    self.x = torch.randn(3, IMG_WIDTH, IMG_HEIGHT).view(-1, 3, IMG_WIDTH, IMG_HEIGHT)
    self.features = nn.Sequential(
        nn.Conv2d(3, 64, 3, 2, 1), # in_channels, out_channels, kernel_size, stride, padding
        nn.MaxPool2d(2),
        nn.ReLU(inplace=True),
        nn.Conv2d(64, 192, 3, padding=1),
        nn.MaxPool2d(2),
        nn.ReLU(inplace=True), 
        nn.Conv2d(192, 384, 3, padding=1),
        nn.MaxPool2d(2),
        nn.ReLU(inplace=True), 
        nn.Conv2d(384, 256, 3, padding=1),
        nn.MaxPool2d(2),
        nn.ReLU(inplace=True),
        nn.Conv2d(256, 512, 3, padding=1),
        nn.ReLU(inplace=True),
        nn.Conv2d(512, 256, 3, padding=1),
        nn.MaxPool2d(2),
        nn.ReLU(inplace=True)
  )
    self.conv(self.x)
    self.classifier = nn.Sequential(
        nn.Dropout(.5),
        nn.Linear(self._to_linear, 4096),
        nn.ReLU(inplace=True),
        nn.Dropout(.5),
        nn.Linear(4096, 4096),
        nn.ReLU(inplace=True),
        nn.Linear(4096, output_dim),
    )

  def conv(self, x):
    x = self.features(x)
    if self._to_linear is None:
        self._to_linear = x.shape[1] * x.shape[2] * x.shape[3]
    return x

  def forward(self, x):
    x = self.conv(x)
    h = x.view(x.shape[0], -1)
    x = self.classifier(h)
    return x, h

Here is my optimizer and loss functions:

optimizer = torch.optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss().to(device)

Here is my train and evaluate functions:

def train(model, iterator, optimizer, criterion, device):
  epoch_loss, epoch_acc = 0, 0
  model.train()
  for (x, y) in iterator:
    # features and labels to the device
    x = x.to(device)
    y = y.to(device).long()
    # Zero the gradients
    optimizer.zero_grad()
    y_pred, _ = model(x)
  
    # Calculate the loss and accuracy
    loss = criterion(y_pred.squeeze(), y)
    acc = binary_accuracy(y_pred, y)
    # Backward propagate
    loss.backward()
    # Update the weights
    optimizer.step()

    epoch_loss +=loss.item()
    epoch_acc += acc.item()

  return epoch_loss/len(iterator), epoch_acc/len(iterator)

def evaluate(model, iterator, criterion, device):
  epoch_loss, epoch_acc = 0, 0
  model.eval()
  with torch.no_grad():
    for (x, y) in iterator:
      x = x.to(device)
      y = y.to(device).long()
      y_pred, _ = model(x)
      loss = criterion(y_pred, y)
      acc = binary_accuracy(y_pred, y)

      epoch_loss += loss.item()
      epoch_acc += acc.item()
  return epoch_loss/len(iterator), epoch_acc/len(iterator)

This is the error that I’m getting:

RuntimeError: result type Float can't be cast to the desired output type Long

What may be possibly my problem because I have tried to convert my labels to long tensors as follows:

y = y.to(device).long()

But it seems not to work.

Asked By: crispengari

||

Answers:

I was getting the same error doing this:

loss_fn(output, target)

where the output was Tensor torch.float32 and target was Tensor torch.int64. What solved this problem was calling the loss function like this:

loss_fn(output, target.float())
Answered By: 5ar

I encountered this error while using a library (Huggingface). In that case you do not have access to the code that computes the loss. You do not convert the data type of your labels that you pass to the library. What worked for me was:

labels = labels.astype(np.float32).tolist()
Answered By: Berkay Berabi
Categories: questions Tags: , ,
Answers are sorted by their score. The answer accepted by the question owner as the best is marked with
at the top-right corner.