# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import contextlib import io import logging import os from fvcore.common.timer import Timer from fvcore.common.file_io import PathManager import numpy as np from detectron2.structures import BoxMode import sys from detectron2.data import DatasetCatalog, MetadataCatalog """ This file contains functions to parse COCO-format text annotations into dicts in "Detectron2 format". """ logger = logging.getLogger(__name__) __all__ = ["load_text_json", "register_text_instances"] def register_text_instances(name, metadata, json_file, image_root, voc_size_cfg, num_pts_cfg): """ Register a dataset in json annotation format for text detection and recognition. Args: name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". metadata (dict): extra metadata associated with this dataset. It can be an empty dict. json_file (str): path to the json instance annotation file. image_root (str or path-like): directory which contains all the images. """ DatasetCatalog.register( name, lambda: load_text_json(json_file, image_root, name, voc_size_cfg=voc_size_cfg, num_pts_cfg=num_pts_cfg) ) MetadataCatalog.get(name).set( json_file=json_file, image_root=image_root, evaluator_type="text", **metadata ) def load_text_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None, voc_size_cfg=37, num_pts_cfg=25): """ Load a json file with totaltext annotation format. Currently supports text detection and recognition. Args: json_file (str): full path to the json file in totaltext annotation format. image_root (str or path-like): the directory where the images in this json file exists. dataset_name (str): the name of the dataset (e.g., coco_2017_train). If provided, this function will also put "thing_classes" into the metadata associated with this dataset. extra_annotation_keys (list[str]): list of per-annotation keys that should also be loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", "category_id", "segmentation"). The values for these keys will be returned as-is. For example, the densepose annotations are loaded in this way. Returns: list[dict]: a list of dicts in Detectron2 standard dataset dicts format. (See `Using Custom Datasets `_ ) Notes: 1. This function does not read the image files. The results do not have the "image" field. """ from pycocotools.coco import COCO timer = Timer() json_file = PathManager.get_local_path(json_file) with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file) if timer.seconds() > 1: logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) id_map = None if dataset_name is not None: meta = MetadataCatalog.get(dataset_name) cat_ids = sorted(coco_api.getCatIds()) # print(f'cat_ids: {cat_ids}') cats = coco_api.loadCats(cat_ids) # print(f'cats: {cats}') # The categories in a custom json file may not be sorted. thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] # print(f'thing_classes: {thing_classes}') meta.thing_classes = thing_classes # In COCO, certain category ids are artificially removed, # and by convention they are always ignored. # We deal with COCO's id issue and translate # the category ids to contiguous ids in [0, 80). # It works by looking at the "categories" field in the json, therefore # if users' own json also have incontiguous ids, we'll # apply this mapping as well but print a warning. if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): if "coco" not in dataset_name: logger.warning( """ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. """ ) id_map = {v: i for i, v in enumerate(cat_ids)} meta.thing_dataset_id_to_contiguous_id = id_map # sort indices for reproducible results img_ids = sorted(coco_api.imgs.keys()) # imgs is a list of dicts, each looks something like: # {'license': 4, # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', # 'file_name': 'COCO_val2014_000000001268.jpg', # 'height': 427, # 'width': 640, # 'date_captured': '2013-11-17 05:57:24', # 'id': 1268} imgs = coco_api.loadImgs(img_ids) # anns is a list[list[dict]], where each dict is an annotation # record for an object. The inner list enumerates the objects in an image # and the outer list enumerates over images. Example of anns[0]: # [{'segmentation': [[192.81, # 247.09, # ... # 219.03, # 249.06]], # 'area': 1035.749, # 'rec': [84, 72, ... 96], # 'bezier_pts': [169.0, 425.0, ..., ] # 'iscrowd': 0, # 'image_id': 1268, # 'bbox': [192.81, 224.8, 74.73, 33.43], # 'category_id': 16, # 'id': 42986}, # ...] anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] if "minival" not in json_file: # The popular valminusminival & minival annotations for COCO2014 contain this bug. # However the ratio of buggy annotations there is tiny and does not affect accuracy. # Therefore we explicitly white-list them. ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( json_file ) imgs_anns = list(zip(imgs, anns)) logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) dataset_dicts = [] ann_keys = ["iscrowd", "category_id"] + (extra_annotation_keys or []) num_instances_without_valid_segmentation = 0 for (img_dict, anno_dict_list) in imgs_anns: record = {} record["file_name"] = os.path.join(image_root, img_dict["file_name"]) record["height"] = img_dict["height"] record["width"] = img_dict["width"] image_id = record["image_id"] = img_dict["id"] objs = [] for anno in anno_dict_list: # Check that the image_id in this annotation is the same as # the image_id we're looking at. # This fails only when the data parsing logic or the annotation file is buggy. # The original COCO valminusminival2014 & minival2014 annotation files # actually contains bugs that, together with certain ways of using COCO API, # can trigger this assertion. assert anno["image_id"] == image_id assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' obj = {key: anno[key] for key in ann_keys if key in anno} segm = anno.get("segmentation", None) if segm: # either list[list[float]] or dict(RLE) if not isinstance(segm, dict): # filter out invalid polygons (< 3 points) segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] if len(segm) == 0: num_instances_without_valid_segmentation += 1 continue # ignore this instance obj["segmentation"] = segm bboxs = anno.get("bbox", None) if bboxs: obj["bbox"] = bboxs obj["bbox_mode"] = BoxMode.XYWH_ABS bezierpts = anno.get("bezier_pts", None) if bezierpts: bezierpts = np.array(bezierpts).reshape(-1, 2) center_bezierpts = (bezierpts[:4] + bezierpts[4:][::-1, :]) / 2 obj["beziers"] = center_bezierpts bezierpts = bezierpts.reshape(2, 4, 2).transpose(0, 2, 1).reshape(4, 4) u = np.linspace(0, 1, num_pts_cfg) boundary = np.outer((1 - u) ** 3, bezierpts[:, 0]) \ + np.outer(3 * u * ((1 - u) ** 2), bezierpts[:, 1]) \ + np.outer(3 * (u ** 2) * (1 - u), bezierpts[:, 2]) \ + np.outer(u ** 3, bezierpts[:, 3]) obj["boundary"] = np.hstack([boundary[:, :2], boundary[:, 2:][::-1, :]]).reshape(-1, 2) obj["polyline"] = (boundary[:, :2] + boundary[:, 2:][::-1, :]) / 2 text = anno.get("rec", None) if text: text_check = np.array(text) text_check = np.sum(text_check != voc_size_cfg) # filter the instance without text script if text_check == 0: continue obj["text"] = text if id_map: obj["category_id"] = id_map[obj["category_id"]] objs.append(obj) if objs == []: if 'test' not in dataset_name and 'val' not in dataset_name: continue record["annotations"] = objs dataset_dicts.append(record) if num_instances_without_valid_segmentation > 0: logger.warning( "Filtered out {} instances without valid segmentation. " "There might be issues in your dataset generation process.".format( num_instances_without_valid_segmentation ) ) return dataset_dicts