Skip to content

Commit

Permalink
Extend dice score metric for multi-dim segmentation (#829)
Browse files Browse the repository at this point in the history
  • Loading branch information
anwai98 authored Jan 11, 2025
1 parent bb42109 commit e0e7223
Showing 1 changed file with 16 additions and 2 deletions.
18 changes: 16 additions & 2 deletions micro_sam/evaluation/multi_dimensional_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,23 @@ def segment_slices_from_ground_truth(
segmentation=final_segmentation, groundtruth=curr_gt, return_accuracies=True
)
results = {"mSA": msa, "SA50": sa[0], "SA75": sa[5]}
elif evaluation_metric == "dice":
dice = dice_score(segmentation=final_segmentation, groundtruth=curr_gt)

elif evaluation_metric.startswith("dice"):
if evaluation_metric == "dice":
# Calculate overall dice score (by binarizing all labels).
dice = dice_score(segmentation=final_segmentation, groundtruth=curr_gt)
elif evaluation_metric == "dice_per_class":
# Calculate dice per class.
dice = [
dice_score(segmentation=(final_segmentation == i), groundtruth=(curr_gt == i))
for i in np.unique(curr_gt)[1:]
]
dice = np.mean(dice)
else:
raise ValueError("Please choose either 'dice' / 'dice_per_class'.")

results = {"Dice": dice}

else:
raise ValueError(f"'{evaluation_metric}' is not a supported evaluation metrics. Please choose 'sa' / 'dice'.")

Expand Down

0 comments on commit e0e7223

Please sign in to comment.