Runtime error in converting tensor with more than one elements into scalar #7650
-
Hello, I'm a novice in AI and image segmentation. RuntimeError Traceback (most recent call last)
[<ipython-input-17-ad51cca2723b>](https://localhost:8080/#) in <cell line: 14>()
53
54 # aggregate the final mean dice result
---> 55 metric = dice_metric.aggregate().item()
56 result = IoU_metric.aggregate().item()
57
RuntimeError: a Tensor with 20 elements cannot be converted to Scalar note that the number of elements that are reported varies in each run (I've encountered 40 and 60 elements as I was trying to fix the issue). This is the code I used in the training phase: max_epochs = 150
val_interval = 1
best_metric = -1
best_IoU = -1
best_metric_epoch = -1
best_IoU_epoch = -1
epoch_loss_values = []
metric_values = []
IoU_values = []
writer = SummaryWriter()
for epoch in range(max_epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = (
batch_data["image"].to(device),
batch_data["label"].to(device),
)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print(f"{step}/{len(train_ds) // train_loader.batch_size}, " f"train_loss: {loss.item():.4f}")
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
for val_data in val_loader:
val_inputs, val_labels = (
val_data["image"].to(device),
val_data["label"].to(device),
)
roi_size = (128, 128, 128)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_inputs, roi_size, sw_batch_size, model)
val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)]
val_labels = [post_label(i) for i in decollate_batch(val_labels)]
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
IoU_metric(y_pred=val_outputs, y=val_labels)
# aggregate the final mean dice result
metric = dice_metric.aggregate().item()
result = IoU_metric.aggregate().item()
# reset the status for next validation round
dice_metric.reset()
IoU_metric.reset()
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), os.path.join("/content/drive/MyDrive/SegResNet", "best_DSC_model.pth"))
print("saved new best Dice model")
print(
f"current epoch: {epoch + 1} current mean dice: {metric:.4f}"
f"\nbest mean dice: {best_metric:.4f} "
f"at epoch: {best_metric_epoch}"
)
writer.add_scalar("val_mean_dice", metric, epoch + 1)
# IoU Metric
IoU_values.append(result)
if result > best_IoU:
best_IoU = result
best_IoU_epoch = epoch + 1
torch.save(model.state_dict(), os.path.join("/content/drive/MyDrive/SegResNet", "best_IoU_model.pth"))
print("saved new best IoU model")
print(
f"current epoch: {epoch + 1} current IoU: {result:.4f}"
f"\n best IoU: {best_IoU:.4f} "
f"at epoch: {best_IoU_epoch}"
)
writer.add_scalar("val_IoU", result, epoch + 1)
#
print(f"train completed, best_Dice: {best_metric:.4f} " f"at epoch: {best_metric_epoch}")
print(f"train completed, best_IoU: {best_IoU:.4f} " f"at epoch: {best_IoU_epoch}") I previously had used Thanks in advance, |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 3 replies
-
Hi @Kiarashdnsh, could you please share how you initialize the |
Beta Was this translation helpful? Give feedback.
Hi @Kiarashdnsh, so the error makes sense since
mean_channel
only do channel average. If you want to get a scalar you may set it tomean
.https://github.com/Project-MONAI/MONAI/blob/dev/monai/metrics/utils.py#L119-L121
Thanks.