diff --git a/coco_explorer.py b/coco_explorer.py index fe4e83c..1dc36c6 100644 --- a/coco_explorer.py +++ b/coco_explorer.py @@ -1,4 +1,5 @@ import argparse +import json import os import re @@ -12,10 +13,28 @@ @st.cache(allow_output_mutation=True) -def get_inspector(coco_train, coco_predictions, images_path, eval_type, iou_min, iou_max): - coco = COCO(coco_train) - coco_dt = coco.loadRes(coco_predictions) - inspector = CoCoInspector(coco, coco_dt, base_path=images_path, +def get_inspector(coco_train, coco_predictions, images_path, eval_type, + iou_min, iou_max, filter_categories): + coco_gt = COCO(coco_train) + if coco_predictions is None: + coco_dt = coco_gt + else: + coco = json.load(open(coco_predictions)) + if isinstance(coco, dict) and 'annotations' in coco: + coco = coco['annotations'] + coco_dt = coco_gt.loadRes(coco) + if filter_categories: + filter_catids = [cat['id'] for cat in coco_gt.dataset['categories'] + if cat['name'] in filter_categories.split(',')] + for ann in coco_gt.anns.values(): + if ann['category_id'] in filter_catids: + coco_gt.dataset['annotations'].remove(ann) + coco_gt.createIndex() + for ann in coco_dt.anns.values(): + if ann['category_id'] in filter_catids: + coco_dt.dataset['annotations'].remove(ann) + coco_dt.createIndex() + inspector = CoCoInspector(coco_gt, coco_dt, base_path=images_path, iou_type=eval_type, iou_min=iou_min, iou_max=iou_max) inspector.evaluate() inspector.calculate_stats() @@ -33,7 +52,7 @@ def app(args): 'CoCo scores' ]) inspector = get_inspector(args.coco_train, args.coco_predictions, args.images_path, - args.eval_type, ioumin, ioumax) + args.eval_type, ioumin, ioumax, args.filter_categories) if topbox == 'inspect predictions visually': st.sidebar.subheader('Inspect predictions') @@ -192,7 +211,7 @@ def app(args): parser = argparse.ArgumentParser() parser.add_argument("--coco_train", type=str, required=True, metavar="PATH/TO/COCO.json", help="COCO dataset to inspect") - parser.add_argument("--coco_predictions", type=str, required=True, metavar="PATH/TO/COCO.json", + parser.add_argument("--coco_predictions", type=str, default=None, metavar="PATH/TO/COCO.json", help="COCO annotations to compare to") parser.add_argument("--images_path", type=str, default=os.getcwd(), metavar="PATH/TO/IMAGES/", help="Directory path to prepend to file_name paths in COCO") @@ -202,6 +221,8 @@ def app(args): help="Initial minimum IoU (overlap) (what constitutes a 'match')") parser.add_argument("--iou_max", type=float, default=0.95, help="Initial maximum IoU (overlap) (what constitutes a 'match')") + parser.add_argument("--filter_categories", type=str, default="", metavar="COMMA-SEPD-LIST", + help="Strip annotations for these categories after loading") args = parser.parse_args() if args.images_path[-1] != '/': args.images_path += '/' diff --git a/cocoinspector.py b/cocoinspector.py index ff979c5..1221900 100644 --- a/cocoinspector.py +++ b/cocoinspector.py @@ -61,7 +61,7 @@ def calculate_stats(self): all_anns = self.coco_gt.loadAnns(self.coco_gt.getAnnIds()) dfannot = pd.DataFrame.from_records(all_anns)[['area', 'category_id', 'bbox']] - dfannot['ann_ar'] = dfannot.bbox.apply(lambda x: x[2] / x[3]) + dfannot['ann_ar'] = dfannot.bbox.apply(lambda x: x[2] / x[3] if x[2] * x[3] else -1) dfannot['category_name'] = dfannot.category_id.apply(lambda x: self.coco_gt.cats[x]['name']) self.annot_df = dfannot @@ -151,21 +151,20 @@ def get_detection_matches(self, image_id): dtmatches = [] return list(set(gtmatches)), list(set(dtmatches)) - def organize_annotations(self, all_annotations, gtmatches, dtmatches): + def organize_annotations(self, annotations, gtmatches, dtmatches, is_gt=True): collect = [] - for a in all_annotations: + for a in annotations: a['label'] = self.coco_gt.cats[a['category_id']]['name'] - if 'score' not in a: + if is_gt: if a['id'] in dtmatches: a['type'] = 'gt' else: a['type'] = 'fn' - collect.append(a) - continue - if a['id'] in gtmatches: - a['type'] = 'tp' else: - a['type'] = 'fp' + if a['id'] in gtmatches: + a['type'] = 'tp' + else: + a['type'] = 'fp' collect.append(a) return collect @@ -186,8 +185,8 @@ def visualize_image(self, image_id, dt_annotations = self._get_detections(self.coco_dt, image_id, cat_ids=[self.cat2id[cat] for cat in only_categories or []]) gtmatches, dtmatches = self.get_detection_matches(image_id) - annotations = annotations + dt_annotations - annotations = self.organize_annotations(annotations, gtmatches, dtmatches) + annotations = (self.organize_annotations(annotations, gtmatches, dtmatches, True) + + self.organize_annotations(dt_annotations, gtmatches, dtmatches, False)) image = Image.open(self._imageid2path(image_id)) # cannot work with 16/32 bit or float images due to Pillow#3011 Pillow#3159 Pillow#3838 diff --git a/pycoco.py b/pycoco.py index a86ac2b..a55c2de 100644 --- a/pycoco.py +++ b/pycoco.py @@ -170,7 +170,7 @@ def computeIoU(self, imgId, catId): dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] if len(gt) == 0 and len(dt) == 0: return [] - inds = np.argsort([-d['score'] for d in dt], kind='mergesort') + inds = np.argsort([-d.get('score', 0) for d in dt], kind='mergesort') dt = [dt[i] for i in inds] if len(dt) > p.maxDets[-1]: dt = dt[0:p.maxDets[-1]] @@ -194,7 +194,7 @@ def computeOks(self, imgId, catId): # dimention here should be Nxm gts = self._gts[imgId, catId] dts = self._dts[imgId, catId] - inds = np.argsort([-d['score'] for d in dts], kind='mergesort') + inds = np.argsort([-d.get('score', 0) for d in dts], kind='mergesort') dts = [dts[i] for i in inds] if len(dts) > p.maxDets[-1]: dts = dts[0:p.maxDets[-1]] @@ -261,7 +261,7 @@ def evaluateImg(self, imgId, catId, aRng, maxDet): # sort dt highest score first, sort gt ignore last gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort') gt = [gt[i] for i in gtind] - dtind = np.argsort([-d['score'] for d in dt], kind='mergesort') + dtind = np.argsort([-d.get('score', 0) for d in dt], kind='mergesort') dt = [dt[i] for i in dtind[0:maxDet]] iscrowd = [int(o['iscrowd']) for o in gt] # load computed ious @@ -312,7 +312,7 @@ def evaluateImg(self, imgId, catId, aRng, maxDet): 'gtIds': [g['id'] for g in gt], 'dtMatches': dtm, 'gtMatches': gtm, - 'dtScores': [d['score'] for d in dt], + 'dtScores': [d.get('score', 0) for d in dt], 'gtIgnore': gtIg, 'dtIgnore': dtIg, }