Spaces:
Running
Running
import contextlib | |
import copy | |
import os | |
from typing import Dict, List, Union | |
import numpy as np | |
import torch | |
from .coco import COCO | |
from .cocoeval import COCOeval | |
from .utils import ( | |
_TYPING_BOX, | |
_TYPING_PREDICTIONS, | |
convert_to_xywh, | |
create_common_coco_eval, | |
) | |
_SUPPORTED_TYPES = ["bbox"] | |
class COCOEvaluator(object): | |
""" | |
Class to perform evaluation for the COCO dataset. | |
""" | |
def __init__(self, coco_gt: COCO, iou_types: List[str] = ["bbox"]): | |
""" | |
Initializes COCOEvaluator with the ground truth COCO dataset and IoU types. | |
Args: | |
coco_gt: The ground truth COCO dataset. | |
iou_types: Intersection over Union (IoU) types for evaluation (Supported: "bbox"). | |
""" | |
self.coco_gt = copy.deepcopy(coco_gt) | |
self.coco_eval = {} | |
for iou_type in iou_types: | |
assert iou_type in _SUPPORTED_TYPES, ValueError( | |
f"IoU type not supported {iou_type}" | |
) | |
self.coco_eval[iou_type] = COCOeval(self.coco_gt, iouType=iou_type) | |
self.iou_types = iou_types | |
self.img_ids = [] | |
self.eval_imgs = {k: [] for k in iou_types} | |
def update(self, predictions: _TYPING_PREDICTIONS) -> None: | |
""" | |
Update the evaluator with new predictions. | |
Args: | |
predictions: The predictions to update. | |
""" | |
img_ids = list(np.unique(list(predictions.keys()))) | |
self.img_ids.extend(img_ids) | |
for iou_type in self.iou_types: | |
results = self.prepare(predictions, iou_type) | |
# suppress pycocotools prints | |
with open(os.devnull, "w") as devnull: | |
with contextlib.redirect_stdout(devnull): | |
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO() | |
coco_eval = self.coco_eval[iou_type] | |
coco_eval.cocoDt = coco_dt | |
coco_eval.params.imgIds = list(img_ids) | |
eval_imgs = coco_eval.evaluate() | |
self.eval_imgs[iou_type].append(eval_imgs) | |
def synchronize_between_processes(self) -> None: | |
""" | |
Synchronizes evaluation images between processes. | |
""" | |
for iou_type in self.iou_types: | |
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) | |
create_common_coco_eval( | |
self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type] | |
) | |
def accumulate(self) -> None: | |
""" | |
Accumulates the evaluation results. | |
""" | |
for coco_eval in self.coco_eval.values(): | |
coco_eval.accumulate() | |
def summarize(self) -> None: | |
""" | |
Prints the IoU metric and summarizes the evaluation results. | |
""" | |
for iou_type, coco_eval in self.coco_eval.items(): | |
print("IoU metric: {}".format(iou_type)) | |
coco_eval.summarize() | |
def prepare( | |
self, predictions: _TYPING_PREDICTIONS, iou_type: str | |
) -> List[Dict[str, Union[int, _TYPING_BOX, float]]]: | |
""" | |
Prepares the predictions for COCO detection. | |
Args: | |
predictions: The predictions to prepare. | |
iou_type: The Intersection over Union (IoU) type for evaluation. | |
Returns: | |
A dictionary with the prepared predictions. | |
""" | |
if iou_type == "bbox": | |
return self.prepare_for_coco_detection(predictions) | |
else: | |
raise ValueError(f"IoU type not supported {iou_type}") | |
def _post_process_stats( | |
self, stats, coco_eval_object, iou_type="bbox" | |
) -> Dict[str, float]: | |
""" | |
Prepares the predictions for COCO detection. | |
Args: | |
predictions: The predictions to prepare. | |
iou_type: The Intersection over Union (IoU) type for evaluation. | |
Returns: | |
A dictionary with the prepared predictions. | |
""" | |
if iou_type not in _SUPPORTED_TYPES: | |
raise ValueError(f"iou_type '{iou_type}' not supported") | |
current_max_dets = coco_eval_object.params.maxDets | |
index_to_title = { | |
"bbox": { | |
0: f"AP-IoU=0.50:0.95-area=all-maxDets={current_max_dets[2]}", | |
1: f"AP-IoU=0.50-area=all-maxDets={current_max_dets[2]}", | |
2: f"AP-IoU=0.75-area=all-maxDets={current_max_dets[2]}", | |
3: f"AP-IoU=0.50:0.95-area=small-maxDets={current_max_dets[2]}", | |
4: f"AP-IoU=0.50:0.95-area=medium-maxDets={current_max_dets[2]}", | |
5: f"AP-IoU=0.50:0.95-area=large-maxDets={current_max_dets[2]}", | |
6: f"AR-IoU=0.50:0.95-area=all-maxDets={current_max_dets[0]}", | |
7: f"AR-IoU=0.50:0.95-area=all-maxDets={current_max_dets[1]}", | |
8: f"AR-IoU=0.50:0.95-area=all-maxDets={current_max_dets[2]}", | |
9: f"AR-IoU=0.50:0.95-area=small-maxDets={current_max_dets[2]}", | |
10: f"AR-IoU=0.50:0.95-area=medium-maxDets={current_max_dets[2]}", | |
11: f"AR-IoU=0.50:0.95-area=large-maxDets={current_max_dets[2]}", | |
}, | |
"keypoints": { | |
0: "AP-IoU=0.50:0.95-area=all-maxDets=20", | |
1: "AP-IoU=0.50-area=all-maxDets=20", | |
2: "AP-IoU=0.75-area=all-maxDets=20", | |
3: "AP-IoU=0.50:0.95-area=medium-maxDets=20", | |
4: "AP-IoU=0.50:0.95-area=large-maxDets=20", | |
5: "AR-IoU=0.50:0.95-area=all-maxDets=20", | |
6: "AR-IoU=0.50-area=all-maxDets=20", | |
7: "AR-IoU=0.75-area=all-maxDets=20", | |
8: "AR-IoU=0.50:0.95-area=medium-maxDets=20", | |
9: "AR-IoU=0.50:0.95-area=large-maxDets=20", | |
}, | |
} | |
output_dict: Dict[str, float] = {} | |
for index, stat in enumerate(stats): | |
output_dict[index_to_title[iou_type][index]] = stat | |
return output_dict | |
def get_results(self) -> Dict[str, Dict[str, float]]: | |
""" | |
Gets the results of the COCO evaluation. | |
Returns: | |
A dictionary with the results of the COCO evaluation. | |
""" | |
output_dict = {} | |
for iou_type, coco_eval in self.coco_eval.items(): | |
if iou_type == "segm": | |
iou_type = "bbox" | |
output_dict[f"iou_{iou_type}"] = self._post_process_stats( | |
coco_eval.stats, coco_eval, iou_type | |
) | |
return output_dict | |
def prepare_for_coco_detection( | |
self, predictions: _TYPING_PREDICTIONS | |
) -> List[Dict[str, Union[int, _TYPING_BOX, float]]]: | |
""" | |
Prepares the predictions for COCO detection. | |
Args: | |
predictions: The predictions to prepare. | |
Returns: | |
A list of dictionaries with the prepared predictions. | |
""" | |
coco_results = [] | |
for original_id, prediction in predictions.items(): | |
if len(prediction) == 0: | |
continue | |
boxes = prediction["boxes"] | |
if len(boxes) == 0: | |
continue | |
if not isinstance(boxes, torch.Tensor): | |
boxes = torch.as_tensor(boxes) | |
boxes = boxes.tolist() | |
scores = prediction["scores"] | |
if not isinstance(scores, list): | |
scores = scores.tolist() | |
labels = prediction["labels"] | |
if not isinstance(labels, list): | |
labels = prediction["labels"].tolist() | |
coco_results.extend( | |
[ | |
{ | |
"image_id": original_id, | |
"category_id": labels[k], | |
"bbox": box, | |
"score": scores[k], | |
} | |
for k, box in enumerate(boxes) | |
] | |
) | |
return coco_results | |