forked from AILab-CVC/SEED
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheval_cider.py
55 lines (41 loc) · 1.71 KB
/
eval_cider.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import json
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
import argparse
def coco_results_processor(result_file, annotation_file):
# create coco object and coco_result object
coco = COCO(annotation_file)
coco_result = coco.loadRes(result_file)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
# evaluate on a subset of images by setting
# coco_eval.params['image_id'] = coco_result.getImgIds()
# please remove this line when evaluating the full validation set
coco_eval.params['image_id'] = coco_result.getImgIds()
# evaluate results
# SPICE will take a few minutes the first time, but speeds up due to caching
coco_eval.evaluate()
# print output evaluation scores
for metric, score in coco_eval.eval.items():
print(f"{metric}: {score:.3f}")
return coco_eval
def metric_engine(result_file, annotation_file):
# process results
with open(result_file, "r") as f:
result_file = json.load(f)
metric = None
metric = coco_results_processor(result_file, annotation_file)
return metric
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--result_file", type=str, default=None)
parser.add_argument("--data_split", type=str, default=None)
args = parser.parse_args()
if args.data_split == "test":
coco_gt_file = "/ssd0/data/coco/annotations/karpathy/dataset_coco_test.json"
elif args.data_split == "val":
coco_gt_file = "/ssd0/data/coco/annotations/karpathy/dataset_coco_val.json"
else:
print("Invalid data split")
exit()
metric_engine(args.result_file, coco_gt_file)