-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate.py
47 lines (43 loc) · 1.44 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import json
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from tqdm import tqdm
import torch
import sys
pred_path = sys.argv[1]
target_path = sys.argv[2]
def get_gt_data(imageID, annotations):
gt_data = {'boxes':[],
'labels':[],
'image_id':[],
'area':[],
'iscrowd':[]}
for annotation in annotations:
if annotation["image_id"] == imageID:
bbox = annotation['bbox']
bbox[2] += bbox[0]
bbox[3] += bbox[1]
gt_data['boxes'].append(bbox)
gt_data['labels'].append(annotation['category_id'])
gt_data['image_id'].append(imageID)
gt_data['area'].append(annotation['area'])
gt_data['iscrowd'].append(annotation['iscrowd'])
return gt_data
with open(pred_path, 'r') as f:
preds = json.load(f)
with open(target_path, 'r') as f:
gt = json.load(f)
fname_to_imageID = {}
for image in gt["images"]:
fname_to_imageID[image["file_name"]] = image["id"]
metric = MeanAveragePrecision()
device = 'cuda'
for fname, pred in tqdm(preds.items()):
pred = [{k: torch.tensor(v).to(device) for k, v in pred.items()}]
imageID = fname_to_imageID[fname]
target = get_gt_data(imageID, gt["annotations"])
target = [{k: torch.tensor(v).to(device) for k, v in target.items()}]
metric.update(pred, target)
print(pred)
print(target)
result = metric.compute()
print(result)