Commit fb3ba095 authored by Yanghan Wang's avatar Yanghan Wang Committed by Facebook GitHub Bot
Browse files

reduce memory usage and speed up TestToolsExporter

Reviewed By: zhanghang1989

Differential Revision: D27783989

fbshipit-source-id: f05c11e396a2f62366721b365929b29f05d5bc02
parent c4f0fbe6
...@@ -83,7 +83,7 @@ def create_toy_dataset( ...@@ -83,7 +83,7 @@ def create_toy_dataset(
@contextlib.contextmanager @contextlib.contextmanager
def register_toy_dataset( def _register_toy_dataset(
dataset_name, image_generator, num_images, num_classes=-1, num_keypoints=0 dataset_name, image_generator, num_images, num_classes=-1, num_keypoints=0
): ):
json_dataset, meta_data = create_toy_dataset( json_dataset, meta_data = create_toy_dataset(
...@@ -112,6 +112,26 @@ def register_toy_dataset( ...@@ -112,6 +112,26 @@ def register_toy_dataset(
MetadataCatalog.remove(dataset_name) MetadataCatalog.remove(dataset_name)
@contextlib.contextmanager
def register_toy_coco_dataset(
dataset_name, num_images=3, image_size=(5, 10), num_classes=-1, num_keypoints=0
):
width, height = image_size
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
with _register_toy_dataset(
dataset_name,
image_generator,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
):
yield
def create_local_dataset( def create_local_dataset(
out_dir, out_dir,
num_images, num_images,
...@@ -170,7 +190,6 @@ class LocalImageGenerator: ...@@ -170,7 +190,6 @@ class LocalImageGenerator:
@contextlib.contextmanager @contextlib.contextmanager
def create_fake_detection_data_loader(height, width, is_train): def create_fake_detection_data_loader(height, width, is_train):
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
runner = create_runner("d2go.runner.GeneralizedRCNNRunner") runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg() cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = ["default_dataset_train"] cfg.DATASETS.TRAIN = ["default_dataset_train"]
...@@ -182,21 +201,12 @@ def create_fake_detection_data_loader(height, width, is_train): ...@@ -182,21 +201,12 @@ def create_fake_detection_data_loader(height, width, is_train):
cfg.INPUT.MIN_SIZE_TEST = min_size cfg.INPUT.MIN_SIZE_TEST = min_size
cfg.INPUT.MAX_SIZE_TEST = max_size cfg.INPUT.MAX_SIZE_TEST = max_size
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
if is_train: if is_train:
with register_toy_dataset( with register_toy_coco_dataset("default_dataset_train", num_images=3):
"default_dataset_train", image_generator, num_images=3
):
train_loader = runner.build_detection_train_loader(cfg) train_loader = runner.build_detection_train_loader(cfg)
yield train_loader yield train_loader
else: else:
with register_toy_dataset( with register_toy_coco_dataset("default_dataset_test", num_images=3):
"default_dataset_test", image_generator, num_images=3
):
test_loader = runner.build_detection_test_loader( test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test" cfg, dataset_name="default_dataset_test"
) )
......
...@@ -184,11 +184,16 @@ def _validate_outputs(inputs, outputs): ...@@ -184,11 +184,16 @@ def _validate_outputs(inputs, outputs):
# TODO: figure out how to validate outputs # TODO: figure out how to validate outputs
def get_quick_test_config_opts(): def get_quick_test_config_opts(
fixed_single_proposals=True,
small_pooler_resolution=True,
small_resize_resolution=True,
):
ret = []
if fixed_single_proposals:
epsilon = 1e-4 epsilon = 1e-4
return [ ret.extend(
str(x) [
for x in [
"MODEL.RPN.POST_NMS_TOPK_TEST", "MODEL.RPN.POST_NMS_TOPK_TEST",
1, 1,
"TEST.DETECTIONS_PER_IMAGE", "TEST.DETECTIONS_PER_IMAGE",
...@@ -202,7 +207,10 @@ def get_quick_test_config_opts(): ...@@ -202,7 +207,10 @@ def get_quick_test_config_opts():
"MODEL.ROI_HEADS.SCORE_THRESH_TEST", "MODEL.ROI_HEADS.SCORE_THRESH_TEST",
0.0 - epsilon, 0.0 - epsilon,
] ]
+ [ )
if small_pooler_resolution:
ret.extend(
[
"MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION", "MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION",
1, 1,
"MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION", "MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION",
...@@ -210,7 +218,21 @@ def get_quick_test_config_opts(): ...@@ -210,7 +218,21 @@ def get_quick_test_config_opts():
"MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION", "MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION",
1, 1,
] ]
)
if small_resize_resolution:
ret.extend(
[
"INPUT.MIN_SIZE_TRAIN",
(10,),
"INPUT.MAX_SIZE_TRAIN",
10,
"INPUT.MIN_SIZE_TEST",
10,
"INPUT.MAX_SIZE_TEST",
10,
] ]
)
return [str(x) for x in ret]
class RCNNBaseTestCases: class RCNNBaseTestCases:
......
...@@ -2,15 +2,10 @@ ...@@ -2,15 +2,10 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest import unittest
from d2go.runner import create_runner from d2go.runner import create_runner
from d2go.utils.testing.data_loader_helper import ( from d2go.utils.testing.data_loader_helper import register_toy_coco_dataset
LocalImageGenerator,
register_toy_dataset,
)
from mobile_cv.common.misc.file_utils import make_temp_directory
class TestD2GoDatasetMapper(unittest.TestCase): class TestD2GoDatasetMapper(unittest.TestCase):
...@@ -25,14 +20,7 @@ class TestD2GoDatasetMapper(unittest.TestCase): ...@@ -25,14 +20,7 @@ class TestD2GoDatasetMapper(unittest.TestCase):
cfg.DATASETS.TRAIN = ["default_dataset_train"] cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"] cfg.DATASETS.TEST = ["default_dataset_test"]
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir: with register_toy_coco_dataset("default_dataset_train", num_images=3):
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=80, height=60)
with register_toy_dataset(
"default_dataset_train", image_generator, num_images=3
):
train_loader = runner.build_detection_train_loader(cfg) train_loader = runner.build_detection_train_loader(cfg)
for i, data in enumerate(train_loader): for i, data in enumerate(train_loader):
self.assertIsNotNone(data) self.assertIsNotNone(data)
...@@ -40,9 +28,7 @@ class TestD2GoDatasetMapper(unittest.TestCase): ...@@ -40,9 +28,7 @@ class TestD2GoDatasetMapper(unittest.TestCase):
if i == 6: if i == 6:
break break
with register_toy_dataset( with register_toy_coco_dataset("default_dataset_test", num_images=3):
"default_dataset_test", image_generator, num_images=3
):
test_loader = runner.build_detection_test_loader( test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test" cfg, dataset_name="default_dataset_test"
) )
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest import unittest
import numpy as np import numpy as np
...@@ -13,13 +12,9 @@ from d2go.modeling.kmeans_anchors import ( ...@@ -13,13 +12,9 @@ from d2go.modeling.kmeans_anchors import (
compute_kmeans_anchors_hook, compute_kmeans_anchors_hook,
) )
from d2go.runner import GeneralizedRCNNRunner from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import ( from d2go.utils.testing.data_loader_helper import register_toy_coco_dataset
LocalImageGenerator,
register_toy_dataset,
)
from detectron2.data import DatasetCatalog, DatasetFromList, MapDataset from detectron2.data import DatasetCatalog, DatasetFromList, MapDataset
from detectron2.engine import SimpleTrainer from detectron2.engine import SimpleTrainer
from mobile_cv.common.misc.file_utils import make_temp_directory
from torch.utils.data.sampler import BatchSampler, Sampler from torch.utils.data.sampler import BatchSampler, Sampler
...@@ -123,13 +118,9 @@ class TestKmeansAnchors(unittest.TestCase): ...@@ -123,13 +118,9 @@ class TestKmeansAnchors(unittest.TestCase):
cfg.MODEL.DEVICE = "cpu" cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator" cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator"
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir: with register_toy_coco_dataset(
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=80, height=60)
with register_toy_dataset(
"toy_dataset", "toy_dataset",
image_generator, image_size=(80, 60), # w, h
num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG, num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
): ):
model = self.runner.build_model(cfg) model = self.runner.build_model(cfg)
...@@ -138,7 +129,7 @@ class TestKmeansAnchors(unittest.TestCase): ...@@ -138,7 +129,7 @@ class TestKmeansAnchors(unittest.TestCase):
trainer.register_hooks(trainer_hooks) trainer.register_hooks(trainer_hooks)
trainer.before_train() trainer.before_train()
anchor_generator = model.proposal_generator.anchor_generator anchor_generator = model.proposal_generator.anchor_generator
cell_anchors = [x for x in anchor_generator.cell_anchors] cell_anchors = list(anchor_generator.cell_anchors)
gt_anchors = np.array( gt_anchors = np.array(
[ [
[-20, -15, 20, 15] # toy_dataset's bbox is half size of image [-20, -15, 20, 15] # toy_dataset's bbox is half size of image
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment