Commit fb3ba095 authored by Yanghan Wang's avatar Yanghan Wang Committed by Facebook GitHub Bot
Browse files

reduce memory usage and speed up TestToolsExporter

Reviewed By: zhanghang1989

Differential Revision: D27783989

fbshipit-source-id: f05c11e396a2f62366721b365929b29f05d5bc02
parent c4f0fbe6
......@@ -83,7 +83,7 @@ def create_toy_dataset(
@contextlib.contextmanager
def register_toy_dataset(
def _register_toy_dataset(
dataset_name, image_generator, num_images, num_classes=-1, num_keypoints=0
):
json_dataset, meta_data = create_toy_dataset(
......@@ -112,6 +112,26 @@ def register_toy_dataset(
MetadataCatalog.remove(dataset_name)
@contextlib.contextmanager
def register_toy_coco_dataset(
dataset_name, num_images=3, image_size=(5, 10), num_classes=-1, num_keypoints=0
):
width, height = image_size
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
with _register_toy_dataset(
dataset_name,
image_generator,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
):
yield
def create_local_dataset(
out_dir,
num_images,
......@@ -170,34 +190,24 @@ class LocalImageGenerator:
@contextlib.contextmanager
def create_fake_detection_data_loader(height, width, is_train):
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"]
min_size = min(width, height)
max_size = max(width, height)
cfg.INPUT.MIN_SIZE_TRAIN = (min_size,)
cfg.INPUT.MAX_SIZE_TRAIN = max_size
cfg.INPUT.MIN_SIZE_TEST = min_size
cfg.INPUT.MAX_SIZE_TEST = max_size
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
if is_train:
with register_toy_dataset(
"default_dataset_train", image_generator, num_images=3
):
train_loader = runner.build_detection_train_loader(cfg)
yield train_loader
else:
with register_toy_dataset(
"default_dataset_test", image_generator, num_images=3
):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
yield test_loader
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"]
min_size = min(width, height)
max_size = max(width, height)
cfg.INPUT.MIN_SIZE_TRAIN = (min_size,)
cfg.INPUT.MAX_SIZE_TRAIN = max_size
cfg.INPUT.MIN_SIZE_TEST = min_size
cfg.INPUT.MAX_SIZE_TEST = max_size
if is_train:
with register_toy_coco_dataset("default_dataset_train", num_images=3):
train_loader = runner.build_detection_train_loader(cfg)
yield train_loader
else:
with register_toy_coco_dataset("default_dataset_test", num_images=3):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
yield test_loader
......@@ -184,33 +184,55 @@ def _validate_outputs(inputs, outputs):
# TODO: figure out how to validate outputs
def get_quick_test_config_opts():
epsilon = 1e-4
return [
str(x)
for x in [
"MODEL.RPN.POST_NMS_TOPK_TEST",
1,
"TEST.DETECTIONS_PER_IMAGE",
1,
"MODEL.PROPOSAL_GENERATOR.MIN_SIZE",
0,
"MODEL.RPN.NMS_THRESH",
1.0 + epsilon,
"MODEL.ROI_HEADS.NMS_THRESH_TEST",
1.0 + epsilon,
"MODEL.ROI_HEADS.SCORE_THRESH_TEST",
0.0 - epsilon,
]
+ [
"MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION",
1,
]
]
def get_quick_test_config_opts(
fixed_single_proposals=True,
small_pooler_resolution=True,
small_resize_resolution=True,
):
ret = []
if fixed_single_proposals:
epsilon = 1e-4
ret.extend(
[
"MODEL.RPN.POST_NMS_TOPK_TEST",
1,
"TEST.DETECTIONS_PER_IMAGE",
1,
"MODEL.PROPOSAL_GENERATOR.MIN_SIZE",
0,
"MODEL.RPN.NMS_THRESH",
1.0 + epsilon,
"MODEL.ROI_HEADS.NMS_THRESH_TEST",
1.0 + epsilon,
"MODEL.ROI_HEADS.SCORE_THRESH_TEST",
0.0 - epsilon,
]
)
if small_pooler_resolution:
ret.extend(
[
"MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION",
1,
]
)
if small_resize_resolution:
ret.extend(
[
"INPUT.MIN_SIZE_TRAIN",
(10,),
"INPUT.MAX_SIZE_TRAIN",
10,
"INPUT.MIN_SIZE_TEST",
10,
"INPUT.MAX_SIZE_TEST",
10,
]
)
return [str(x) for x in ret]
class RCNNBaseTestCases:
......
......@@ -2,15 +2,10 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
from d2go.runner import create_runner
from d2go.utils.testing.data_loader_helper import (
LocalImageGenerator,
register_toy_dataset,
)
from mobile_cv.common.misc.file_utils import make_temp_directory
from d2go.utils.testing.data_loader_helper import register_toy_coco_dataset
class TestD2GoDatasetMapper(unittest.TestCase):
......@@ -25,28 +20,19 @@ class TestD2GoDatasetMapper(unittest.TestCase):
cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"]
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=80, height=60)
with register_toy_dataset(
"default_dataset_train", image_generator, num_images=3
):
train_loader = runner.build_detection_train_loader(cfg)
for i, data in enumerate(train_loader):
self.assertIsNotNone(data)
# for training loader, it has infinite length
if i == 6:
break
with register_toy_dataset(
"default_dataset_test", image_generator, num_images=3
):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
all_data = []
for data in test_loader:
all_data.append(data)
self.assertEqual(len(all_data), 3)
with register_toy_coco_dataset("default_dataset_train", num_images=3):
train_loader = runner.build_detection_train_loader(cfg)
for i, data in enumerate(train_loader):
self.assertIsNotNone(data)
# for training loader, it has infinite length
if i == 6:
break
with register_toy_coco_dataset("default_dataset_test", num_images=3):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
all_data = []
for data in test_loader:
all_data.append(data)
self.assertEqual(len(all_data), 3)
......@@ -2,7 +2,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
import numpy as np
......@@ -13,13 +12,9 @@ from d2go.modeling.kmeans_anchors import (
compute_kmeans_anchors_hook,
)
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import (
LocalImageGenerator,
register_toy_dataset,
)
from d2go.utils.testing.data_loader_helper import register_toy_coco_dataset
from detectron2.data import DatasetCatalog, DatasetFromList, MapDataset
from detectron2.engine import SimpleTrainer
from mobile_cv.common.misc.file_utils import make_temp_directory
from torch.utils.data.sampler import BatchSampler, Sampler
......@@ -123,29 +118,25 @@ class TestKmeansAnchors(unittest.TestCase):
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator"
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=80, height=60)
with register_toy_dataset(
"toy_dataset",
image_generator,
num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
):
model = self.runner.build_model(cfg)
trainer = SimpleTrainer(model, data_loader=[], optimizer=None)
trainer_hooks = [compute_kmeans_anchors_hook(self.runner, cfg)]
trainer.register_hooks(trainer_hooks)
trainer.before_train()
anchor_generator = model.proposal_generator.anchor_generator
cell_anchors = [x for x in anchor_generator.cell_anchors]
gt_anchors = np.array(
[
[-20, -15, 20, 15] # toy_dataset's bbox is half size of image
for _ in range(cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS)
]
)
np.testing.assert_allclose(cell_anchors[0], gt_anchors)
with register_toy_coco_dataset(
"toy_dataset",
image_size=(80, 60), # w, h
num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
):
model = self.runner.build_model(cfg)
trainer = SimpleTrainer(model, data_loader=[], optimizer=None)
trainer_hooks = [compute_kmeans_anchors_hook(self.runner, cfg)]
trainer.register_hooks(trainer_hooks)
trainer.before_train()
anchor_generator = model.proposal_generator.anchor_generator
cell_anchors = list(anchor_generator.cell_anchors)
gt_anchors = np.array(
[
[-20, -15, 20, 15] # toy_dataset's bbox is half size of image
for _ in range(cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS)
]
)
np.testing.assert_allclose(cell_anchors[0], gt_anchors)
if __name__ == "__main__":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment