dice_coefficient / dice_coefficient.py
erntkn's picture
add dice
8c2c847
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dice Coefficient Metric."""
from typing import Dict, Optional
import numpy as np
import evaluate
import datasets
_DESCRIPTION = """\
Dice coefficient is 2 times the are of overlap divided by the total number of pixels in both segmentation maps.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *dice_score* (`float`):
Dice Coefficient.
Examples:
>>> import numpy as np
>>> dice = evaluate.load("DiceCoefficient")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = dice.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results)
{'dice_score': 0.47750000}
"""
_CITATION = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def intersect_and_union(
pred_label,
label,
num_labels,
ignore_index: bool,
label_map: Optional[Dict[int, int]] = None,
reduce_labels: bool = False,
):
"""Calculate intersection and Union.
Args:
pred_label (`ndarray`):
Prediction segmentation map of shape (height, width).
label (`ndarray`):
Ground truth segmentation map of shape (height, width).
num_labels (`int`):
Number of categories.
ignore_index (`int`):
Index that will be ignored during evaluation.
label_map (`dict`, *optional*):
Mapping old labels to new labels. The parameter will work only when label is str.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
area_intersect (`ndarray`):
The intersection of prediction and ground truth histogram on all classes.
area_union (`ndarray`):
The union of prediction and ground truth histogram on all classes.
area_pred_label (`ndarray`):
The prediction histogram on all classes.
area_label (`ndarray`):
The ground truth histogram on all classes.
"""
if label_map is not None:
for old_id, new_id in label_map.items():
label[label == old_id] = new_id
# turn into Numpy arrays
pred_label = np.array(pred_label)
label = np.array(label)
if reduce_labels:
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
mask = label != ignore_index
mask = np.not_equal(label, ignore_index)
pred_label = pred_label[mask]
label = np.array(label)[mask]
intersect = pred_label[pred_label == label]
area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0]
area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0]
area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0]
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def total_intersect_and_union(
results,
gt_seg_maps,
num_labels,
ignore_index: bool,
label_map: Optional[Dict[int, int]] = None,
reduce_labels: bool = False,
):
"""Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair.
Args:
results (`ndarray`):
List of prediction segmentation maps, each of shape (height, width).
gt_seg_maps (`ndarray`):
List of ground truth segmentation maps, each of shape (height, width).
num_labels (`int`):
Number of categories.
ignore_index (`int`):
Index that will be ignored during evaluation.
label_map (`dict`, *optional*):
Mapping old labels to new labels. The parameter will work only when label is str.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
total_area_intersect (`ndarray`):
The intersection of prediction and ground truth histogram on all classes.
total_area_union (`ndarray`):
The union of prediction and ground truth histogram on all classes.
total_area_pred_label (`ndarray`):
The prediction histogram on all classes.
total_area_label (`ndarray`):
The ground truth histogram on all classes.
"""
total_area_intersect = np.zeros((num_labels,), dtype=np.float64)
total_area_union = np.zeros((num_labels,), dtype=np.float64)
total_area_pred_label = np.zeros((num_labels,), dtype=np.float64)
total_area_label = np.zeros((num_labels,), dtype=np.float64)
for result, gt_seg_map in zip(results, gt_seg_maps):
area_intersect, area_union, area_pred_label, area_label = intersect_and_union(
result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels
)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def dice_coef(
results,
gt_seg_maps,
num_labels,
ignore_index: bool,
nan_to_num: Optional[int] = None,
label_map: Optional[Dict[int, int]] = None,
reduce_labels: bool = False,
):
"""Calculate Mean Dice Coefficient (mDSC).
Args:
results (`ndarray`):
List of prediction segmentation maps, each of shape (height, width).
gt_seg_maps (`ndarray`):
List of ground truth segmentation maps, each of shape (height, width).
num_labels (`int`):
Number of categories.
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
Mapping old labels to new labels. The parameter will work only when label is str.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_dsc* (`float`):
Mean Dice Coefficient (DSC averaged over all categories).
"""
total_area_intersect, _, total_area_pred_label, total_area_label = total_intersect_and_union(
results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels
)
result = dict()
dice = 2 * total_area_intersect / (total_area_pred_label + total_area_label)
result["dice_score"] = np.nanmean(dice)
if nan_to_num is not None:
metrics = dict(
{metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()}
)
return result
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class DiceCoefficient(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features({
'predictions': datasets.Value('int64'),
'references': datasets.Value('int64'),
}),
reference_urls=["https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/evaluation/metrics.py"]
)
def _compute(
self,
predictions,
references,
num_labels: int,
ignore_index: bool,
nan_to_num: Optional[int] = None,
label_map: Optional[Dict[int, int]] = None,
reduce_labels: bool = False,
):
dice = dice_coef(
results=predictions,
ground_truths=references,
num_labels=num_labels,
ignore_index=ignore_index,
nan_to_num=nan_to_num,
label_map=label_map,
reduce_labels=reduce_labels,
)
return dice