Commit b634945d authored by limm's avatar limm
Browse files

support v0.6

parent 5b3792fc
_BASE_: "../Base-RCNN-C4.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
ROI_HEADS:
BATCH_SIZE_PER_IMAGE: 256
MASK_ON: True
DATASETS:
TRAIN: ("coco_2017_val",)
TEST: ("coco_2017_val",)
INPUT:
MIN_SIZE_TRAIN: (600,)
MAX_SIZE_TRAIN: 1000
MIN_SIZE_TEST: 800
MAX_SIZE_TEST: 1000
SOLVER:
IMS_PER_BATCH: 8 # base uses 16
WARMUP_FACTOR: 0.33333
WARMUP_ITERS: 100
STEPS: (11000, 11600)
MAX_ITER: 12000
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 41.88, 0.7], ["segm", "AP", 33.79, 0.5]]
_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml"
MODEL:
WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl"
DATASETS:
TEST: ("coco_2017_val_100",)
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 47.44, 0.02], ["segm", "AP", 42.94, 0.02]]
_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
MODEL:
WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
DATASETS:
TEST: ("coco_2017_val_100",)
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 47.34, 0.02], ["segm", "AP", 42.67, 0.02], ["bbox_TTA", "AP", 49.11, 0.02], ["segm_TTA", "AP", 45.04, 0.02]]
AUG:
ENABLED: True
MIN_SIZES: (700, 800) # to save some time
_BASE_: "../Base-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
MASK_ON: True
DATASETS:
TRAIN: ("coco_2017_val_100",)
TEST: ("coco_2017_val_100",)
SOLVER:
BASE_LR: 0.005
STEPS: (30,)
MAX_ITER: 40
IMS_PER_BATCH: 4
DATALOADER:
NUM_WORKERS: 2
_BASE_: "./mask_rcnn_R_50_FPN_training_acc_test.yaml"
MODEL:
ROI_BOX_HEAD:
TRAIN_ON_PRED_BOXES: True
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 42.6, 1.0], ["segm", "AP", 35.8, 0.8]]
_BASE_: "../Base-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
ROI_HEADS:
BATCH_SIZE_PER_IMAGE: 256
MASK_ON: True
DATASETS:
TRAIN: ("coco_2017_val",)
TEST: ("coco_2017_val",)
INPUT:
MIN_SIZE_TRAIN: (600,)
MAX_SIZE_TRAIN: 1000
MIN_SIZE_TEST: 800
MAX_SIZE_TEST: 1000
SOLVER:
WARMUP_FACTOR: 0.3333333
WARMUP_ITERS: 100
STEPS: (5500, 5800)
MAX_ITER: 6000
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 42.5, 1.0], ["segm", "AP", 35.8, 0.8]]
_BASE_: "../COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml"
MODEL:
WEIGHTS: "detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl"
DATASETS:
TEST: ("coco_2017_val_100_panoptic_separated",)
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 46.47, 0.02], ["segm", "AP", 43.39, 0.02], ["sem_seg", "mIoU", 42.55, 0.02], ["panoptic_seg", "PQ", 38.99, 0.02]]
_BASE_: "../Base-RCNN-FPN.yaml"
MODEL:
META_ARCHITECTURE: "PanopticFPN"
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
MASK_ON: True
RESNETS:
DEPTH: 50
SEM_SEG_HEAD:
LOSS_WEIGHT: 0.5
DATASETS:
TRAIN: ("coco_2017_val_100_panoptic_separated",)
TEST: ("coco_2017_val_100_panoptic_separated",)
SOLVER:
BASE_LR: 0.005
STEPS: (30,)
MAX_ITER: 40
IMS_PER_BATCH: 4
DATALOADER:
NUM_WORKERS: 1
_BASE_: "../Base-RCNN-FPN.yaml"
MODEL:
META_ARCHITECTURE: "PanopticFPN"
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
MASK_ON: True
RESNETS:
DEPTH: 50
SEM_SEG_HEAD:
LOSS_WEIGHT: 0.5
DATASETS:
TRAIN: ("coco_2017_val_panoptic_separated",)
TEST: ("coco_2017_val_panoptic_separated",)
SOLVER:
BASE_LR: 0.01
WARMUP_FACTOR: 0.001
WARMUP_ITERS: 500
STEPS: (5500,)
MAX_ITER: 7000
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 46.70, 1.1], ["segm", "AP", 39.0, 0.7], ["sem_seg", "mIoU", 64.73, 1.3], ["panoptic_seg", "PQ", 48.13, 0.8]]
_BASE_: "../COCO-Detection/retinanet_R_50_FPN_3x.yaml"
MODEL:
WEIGHTS: "detectron2://COCO-Detection/retinanet_R_50_FPN_3x/190397829/model_final_5bd44e.pkl"
DATASETS:
TEST: ("coco_2017_val_100",)
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 44.45, 0.02]]
_BASE_: "../COCO-Detection/retinanet_R_50_FPN_1x.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
DATASETS:
TRAIN: ("coco_2017_val_100",)
TEST: ("coco_2017_val_100",)
SOLVER:
BASE_LR: 0.005
STEPS: (30,)
MAX_ITER: 40
IMS_PER_BATCH: 4
DATALOADER:
NUM_WORKERS: 2
_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml"
MODEL:
WEIGHTS: "detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl"
DATASETS:
TEST: ("coco_2017_val_100",)
TEST:
EXPECTED_RESULTS: [["box_proposals", "AR@1000", 58.16, 0.02]]
_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
DATASETS:
TRAIN: ("coco_2017_val_100",)
TEST: ("coco_2017_val_100",)
SOLVER:
STEPS: (30,)
MAX_ITER: 40
BASE_LR: 0.005
IMS_PER_BATCH: 4
DATALOADER:
NUM_WORKERS: 2
_BASE_: "../Base-RCNN-FPN.yaml"
MODEL:
META_ARCHITECTURE: "SemanticSegmentor"
WEIGHTS: "detectron2://semantic_R_50_FPN_1x/111802073/model_final_c18079783c55a94968edc28b7101c5f0.pkl"
RESNETS:
DEPTH: 50
DATASETS:
TEST: ("coco_2017_val_100_panoptic_stuffonly",)
TEST:
EXPECTED_RESULTS: [["sem_seg", "mIoU", 39.53, 0.02], ["sem_seg", "mACC", 51.50, 0.02]]
_BASE_: "../Base-RCNN-FPN.yaml"
MODEL:
META_ARCHITECTURE: "SemanticSegmentor"
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
RESNETS:
DEPTH: 50
DATASETS:
TRAIN: ("coco_2017_val_100_panoptic_stuffonly",)
TEST: ("coco_2017_val_100_panoptic_stuffonly",)
INPUT:
MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
SOLVER:
BASE_LR: 0.005
STEPS: (30,)
MAX_ITER: 40
IMS_PER_BATCH: 4
DATALOADER:
NUM_WORKERS: 2
_BASE_: "../Base-RCNN-FPN.yaml"
MODEL:
META_ARCHITECTURE: "SemanticSegmentor"
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
RESNETS:
DEPTH: 50
DATASETS:
TRAIN: ("coco_2017_val_panoptic_stuffonly",)
TEST: ("coco_2017_val_panoptic_stuffonly",)
SOLVER:
BASE_LR: 0.01
WARMUP_FACTOR: 0.001
WARMUP_ITERS: 300
STEPS: (5500,)
MAX_ITER: 7000
TEST:
EXPECTED_RESULTS: [["sem_seg", "mIoU", 76.51, 1.0], ["sem_seg", "mACC", 83.25, 1.0]]
INPUT:
# no scale augmentation
MIN_SIZE_TRAIN: (800, )
# Use Builtin Datasets
A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog)
for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc).
This document explains how to setup the builtin datasets so they can be used by the above APIs.
[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`,
and how to add new datasets to them.
Detectron2 has builtin support for a few datasets.
The datasets are assumed to exist in a directory specified by the environment variable
`DETECTRON2_DATASETS`.
Under this directory, detectron2 will look for datasets in the structure described below, if needed.
```
$DETECTRON2_DATASETS/
coco/
lvis/
cityscapes/
VOC20{07,12}/
```
You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`.
If left unset, the default is `./datasets` relative to your current working directory.
The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md)
contains configs and models that use these builtin datasets.
## Expected dataset structure for [COCO instance/keypoint detection](https://cocodataset.org/#download):
```
coco/
annotations/
instances_{train,val}2017.json
person_keypoints_{train,val}2017.json
{train,val}2017/
# image files that are mentioned in the corresponding json
```
You can use the 2014 version of the dataset as well.
Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset,
which you can download with `./datasets/prepare_for_tests.sh`.
## Expected dataset structure for PanopticFPN:
Extract panoptic annotations from [COCO website](https://cocodataset.org/#download)
into the following structure:
```
coco/
annotations/
panoptic_{train,val}2017.json
panoptic_{train,val}2017/ # png annotations
panoptic_stuff_{train,val}2017/ # generated by the script mentioned below
```
Install panopticapi by:
```
pip install git+https://github.com/cocodataset/panopticapi.git
```
Then, run `python datasets/prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations.
## Expected dataset structure for [LVIS instance segmentation](https://www.lvisdataset.org/dataset):
```
coco/
{train,val,test}2017/
lvis/
lvis_v0.5_{train,val}.json
lvis_v0.5_image_info_test.json
lvis_v1_{train,val}.json
lvis_v1_image_info_test{,_challenge}.json
```
Install lvis-api by:
```
pip install git+https://github.com/lvis-dataset/lvis-api.git
```
To evaluate models trained on the COCO dataset using LVIS annotations,
run `python datasets/prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations.
## Expected dataset structure for [cityscapes](https://www.cityscapes-dataset.com/downloads/):
```
cityscapes/
gtFine/
train/
aachen/
color.png, instanceIds.png, labelIds.png, polygons.json,
labelTrainIds.png
...
val/
test/
# below are generated Cityscapes panoptic annotation
cityscapes_panoptic_train.json
cityscapes_panoptic_train/
cityscapes_panoptic_val.json
cityscapes_panoptic_val/
cityscapes_panoptic_test.json
cityscapes_panoptic_test/
leftImg8bit/
train/
val/
test/
```
Install cityscapes scripts by:
```
pip install git+https://github.com/mcordts/cityscapesScripts.git
```
Note: to create labelTrainIds.png, first prepare the above structure, then run cityscapesescript with:
```
CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py
```
These files are not needed for instance segmentation.
Note: to generate Cityscapes panoptic dataset, run cityscapesescript with:
```
CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createPanopticImgs.py
```
These files are not needed for semantic and instance segmentation.
## Expected dataset structure for [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html):
```
VOC20{07,12}/
Annotations/
ImageSets/
Main/
trainval.txt
test.txt
# train.txt or val.txt, if you use these splits
JPEGImages/
```
## Expected dataset structure for [ADE20k Scene Parsing](http://sceneparsing.csail.mit.edu/):
```
ADEChallengeData2016/
annotations/
annotations_detectron2/
images/
objectInfo150.txt
```
The directory `annotations_detectron2` is generated by running `python datasets/prepare_ade20k_sem_seg.py`.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import os
from pathlib import Path
import tqdm
from PIL import Image
def convert(input, output):
img = np.asarray(Image.open(input))
assert img.dtype == np.uint8
img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
Image.fromarray(img).save(output)
if __name__ == "__main__":
dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016"
for name in ["training", "validation"]:
annotation_dir = dataset_dir / "annotations" / name
output_dir = dataset_dir / "annotations_detectron2" / name
output_dir.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(list(annotation_dir.iterdir())):
output_file = output_dir / file.name
convert(file, output_file)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import json
import os
from collections import defaultdict
# This mapping is extracted from the official LVIS mapping:
# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
COCO_SYNSET_CATEGORIES = [
{"synset": "person.n.01", "coco_cat_id": 1},
{"synset": "bicycle.n.01", "coco_cat_id": 2},
{"synset": "car.n.01", "coco_cat_id": 3},
{"synset": "motorcycle.n.01", "coco_cat_id": 4},
{"synset": "airplane.n.01", "coco_cat_id": 5},
{"synset": "bus.n.01", "coco_cat_id": 6},
{"synset": "train.n.01", "coco_cat_id": 7},
{"synset": "truck.n.01", "coco_cat_id": 8},
{"synset": "boat.n.01", "coco_cat_id": 9},
{"synset": "traffic_light.n.01", "coco_cat_id": 10},
{"synset": "fireplug.n.01", "coco_cat_id": 11},
{"synset": "stop_sign.n.01", "coco_cat_id": 13},
{"synset": "parking_meter.n.01", "coco_cat_id": 14},
{"synset": "bench.n.01", "coco_cat_id": 15},
{"synset": "bird.n.01", "coco_cat_id": 16},
{"synset": "cat.n.01", "coco_cat_id": 17},
{"synset": "dog.n.01", "coco_cat_id": 18},
{"synset": "horse.n.01", "coco_cat_id": 19},
{"synset": "sheep.n.01", "coco_cat_id": 20},
{"synset": "beef.n.01", "coco_cat_id": 21},
{"synset": "elephant.n.01", "coco_cat_id": 22},
{"synset": "bear.n.01", "coco_cat_id": 23},
{"synset": "zebra.n.01", "coco_cat_id": 24},
{"synset": "giraffe.n.01", "coco_cat_id": 25},
{"synset": "backpack.n.01", "coco_cat_id": 27},
{"synset": "umbrella.n.01", "coco_cat_id": 28},
{"synset": "bag.n.04", "coco_cat_id": 31},
{"synset": "necktie.n.01", "coco_cat_id": 32},
{"synset": "bag.n.06", "coco_cat_id": 33},
{"synset": "frisbee.n.01", "coco_cat_id": 34},
{"synset": "ski.n.01", "coco_cat_id": 35},
{"synset": "snowboard.n.01", "coco_cat_id": 36},
{"synset": "ball.n.06", "coco_cat_id": 37},
{"synset": "kite.n.03", "coco_cat_id": 38},
{"synset": "baseball_bat.n.01", "coco_cat_id": 39},
{"synset": "baseball_glove.n.01", "coco_cat_id": 40},
{"synset": "skateboard.n.01", "coco_cat_id": 41},
{"synset": "surfboard.n.01", "coco_cat_id": 42},
{"synset": "tennis_racket.n.01", "coco_cat_id": 43},
{"synset": "bottle.n.01", "coco_cat_id": 44},
{"synset": "wineglass.n.01", "coco_cat_id": 46},
{"synset": "cup.n.01", "coco_cat_id": 47},
{"synset": "fork.n.01", "coco_cat_id": 48},
{"synset": "knife.n.01", "coco_cat_id": 49},
{"synset": "spoon.n.01", "coco_cat_id": 50},
{"synset": "bowl.n.03", "coco_cat_id": 51},
{"synset": "banana.n.02", "coco_cat_id": 52},
{"synset": "apple.n.01", "coco_cat_id": 53},
{"synset": "sandwich.n.01", "coco_cat_id": 54},
{"synset": "orange.n.01", "coco_cat_id": 55},
{"synset": "broccoli.n.01", "coco_cat_id": 56},
{"synset": "carrot.n.01", "coco_cat_id": 57},
{"synset": "frank.n.02", "coco_cat_id": 58},
{"synset": "pizza.n.01", "coco_cat_id": 59},
{"synset": "doughnut.n.02", "coco_cat_id": 60},
{"synset": "cake.n.03", "coco_cat_id": 61},
{"synset": "chair.n.01", "coco_cat_id": 62},
{"synset": "sofa.n.01", "coco_cat_id": 63},
{"synset": "pot.n.04", "coco_cat_id": 64},
{"synset": "bed.n.01", "coco_cat_id": 65},
{"synset": "dining_table.n.01", "coco_cat_id": 67},
{"synset": "toilet.n.02", "coco_cat_id": 70},
{"synset": "television_receiver.n.01", "coco_cat_id": 72},
{"synset": "laptop.n.01", "coco_cat_id": 73},
{"synset": "mouse.n.04", "coco_cat_id": 74},
{"synset": "remote_control.n.01", "coco_cat_id": 75},
{"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
{"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
{"synset": "microwave.n.02", "coco_cat_id": 78},
{"synset": "oven.n.01", "coco_cat_id": 79},
{"synset": "toaster.n.02", "coco_cat_id": 80},
{"synset": "sink.n.01", "coco_cat_id": 81},
{"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
{"synset": "book.n.01", "coco_cat_id": 84},
{"synset": "clock.n.01", "coco_cat_id": 85},
{"synset": "vase.n.01", "coco_cat_id": 86},
{"synset": "scissors.n.01", "coco_cat_id": 87},
{"synset": "teddy.n.01", "coco_cat_id": 88},
{"synset": "hand_blower.n.01", "coco_cat_id": 89},
{"synset": "toothbrush.n.01", "coco_cat_id": 90},
]
def cocofy_lvis(input_filename, output_filename):
"""
Filter LVIS instance segmentation annotations to remove all categories that are not included in
COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
the output json are the incontiguous COCO dataset ids.
Args:
input_filename (str): path to the LVIS json file.
output_filename (str): path to the COCOfied json file.
"""
with open(input_filename, "r") as f:
lvis_json = json.load(f)
lvis_annos = lvis_json.pop("annotations")
cocofied_lvis = copy.deepcopy(lvis_json)
lvis_json["annotations"] = lvis_annos
# Mapping from lvis cat id to coco cat id via synset
lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
# Synsets that we will keep in the dataset
synsets_to_keep = set(synset_to_coco_cat_id.keys())
coco_cat_id_with_instances = defaultdict(int)
new_annos = []
ann_id = 1
for ann in lvis_annos:
lvis_cat_id = ann["category_id"]
synset = lvis_cat_id_to_synset[lvis_cat_id]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_ann = copy.deepcopy(ann)
new_ann["category_id"] = coco_cat_id
new_ann["id"] = ann_id
ann_id += 1
new_annos.append(new_ann)
coco_cat_id_with_instances[coco_cat_id] += 1
cocofied_lvis["annotations"] = new_annos
for image in cocofied_lvis["images"]:
for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
new_category_list = []
for lvis_cat_id in image[key]:
synset = lvis_cat_id_to_synset[lvis_cat_id]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_category_list.append(coco_cat_id)
coco_cat_id_with_instances[coco_cat_id] += 1
image[key] = new_category_list
coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
new_categories = []
for cat in lvis_json["categories"]:
synset = cat["synset"]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
if coco_cat_id not in coco_cat_id_with_instances:
continue
new_cat = copy.deepcopy(cat)
new_cat["id"] = coco_cat_id
new_categories.append(new_cat)
cocofied_lvis["categories"] = new_categories
with open(output_filename, "w") as f:
json.dump(cocofied_lvis, f)
print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
if __name__ == "__main__":
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
print("Start COCOfing {}.".format(s))
cocofy_lvis(
os.path.join(dataset_dir, "{}.json".format(s)),
os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
)
#!/bin/bash -e
# Copyright (c) Facebook, Inc. and its affiliates.
# Download some files needed for running tests.
cd "${0%/*}"
BASE=https://dl.fbaipublicfiles.com/detectron2
mkdir -p coco/annotations
for anno in instances_val2017_100 \
person_keypoints_val2017_100 \
instances_minival2014_100 \
person_keypoints_minival2014_100; do
dest=coco/annotations/$anno.json
[[ -s $dest ]] && {
echo "$dest exists. Skipping ..."
} || {
wget $BASE/annotations/coco/$anno.json -O $dest
}
done
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment