"examples/pytorch/gatv2/train.py" did not exist on "82499e602bbf9de8f2ec50a92f30512f7a2b14ef"
Commit 647a3fdf authored by Yanghan Wang's avatar Yanghan Wang Committed by Facebook GitHub Bot
Browse files

support do_postprocess when tracing rcnn model in D2 style

Summary:
Pull Request resolved: https://github.com/facebookresearch/d2go/pull/200

Currently when exporting the RCNN model, we call it with `self.model.inference(inputs, do_postprocess=False)[0]`, therefore the output of exported model is not post-processed, eg. the mask is in the squared shape. This diff adds the option to include postprocess in the exported model.

Worth noting that since the input is a single tensor, the post-process doesn't resize the output to original resolution, and we can't apply the post-process twice to further resize it in the Predictor's PostProcessFunc, add an assertion to raise error in this case. But this is fine for most production use cases where the input is not resized.

Set `RCNN_EXPORT.INCLUDE_POSTPROCESS` to `True` to enable this.

Reviewed By: tglik

Differential Revision: D34904058

fbshipit-source-id: 65f120eadc9747e9918d26ce0bd7dd265931cfb5
parent 312c6b62
......@@ -106,17 +106,22 @@ def default_rcnn_prepare_for_export(self, cfg, inputs, predictor_type):
)
else:
do_postprocess = cfg.RCNN_EXPORT.INCLUDE_POSTPROCESS
preprocess_info = FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Preprocess, params={}
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=D2RCNNInferenceWrapper(pytorch_model),
model=D2RCNNInferenceWrapper(
pytorch_model,
do_postprocess=do_postprocess,
),
data_generator=lambda x: (preprocess_func(x),),
model_export_method=predictor_type,
preprocess_info=preprocess_info,
postprocess_info=FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Postprocess, params={}
D2RCNNInferenceWrapper.Postprocess,
params={"detector_postprocess_done_in_model": do_postprocess},
),
)
......@@ -407,9 +412,14 @@ class D2Caffe2MetaArchPostprocessFunc(object):
class D2RCNNInferenceWrapper(nn.Module):
def __init__(self, model):
def __init__(
self,
model,
do_postprocess=False,
):
super().__init__()
self.model = model
self.do_postprocess = do_postprocess
def forward(self, image):
"""
......@@ -417,6 +427,20 @@ class D2RCNNInferenceWrapper(nn.Module):
contains non-tensor, therefore the TracingAdaptedTorchscriptExport must be used in
order to convert the output back from flattened tensors.
"""
if self.do_postprocess:
inputs = [
{
"image": image,
# NOTE: the width/height is not available since the model takes a
# single image tensor as input. Therefore even though post-process
# is specified, the wrapped model doesn't resize the output to its
# original width/height.
# TODO: If this is needed, we might make the model take extra
# width/height info like the C2-style inputs.
}
]
return self.model.forward(inputs)[0]["instances"]
else:
inputs = [{"image": image}]
return self.model.inference(inputs, do_postprocess=False)[0]
......@@ -432,6 +456,14 @@ class D2RCNNInferenceWrapper(nn.Module):
return batch[0]["image"]
class Postprocess(object):
def __init__(self, detector_postprocess_done_in_model=False):
"""
Args:
detector_postprocess_done_in_model (bool): whether `detector_postprocess`
has already applied in the D2RCNNInferenceWrapper
"""
self.detector_postprocess_done_in_model = detector_postprocess_done_in_model
def __call__(self, batch, inputs, outputs):
"""
This function describes how to run the predictor using exported model. Note
......@@ -440,6 +472,18 @@ class D2RCNNInferenceWrapper(nn.Module):
"""
assert len(batch) == 1, "only support single batch"
width, height = batch[0]["width"], batch[0]["height"]
if self.detector_postprocess_done_in_model:
image_shape = batch[0]["image"].shape # chw
if image_shape[1] != height or image_shape[2] != width:
raise NotImplementedError(
f"Image tensor (shape: {image_shape}) doesn't match the"
f" input width ({width}) height ({height}). Since post-process"
f" has been done inside the torchscript without width/height"
f" information, can't recover the post-processed output to "
f"orignail resolution."
)
return [{"instances": outputs}]
else:
r = detector_postprocess(outputs, height, width)
return [{"instances": r}]
......
......@@ -618,6 +618,12 @@ def _add_rcnn_default_config(_C):
_C.EXPORT_CAFFE2 = CfgNode()
_C.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False
# Options about how to export the model
_C.RCNN_EXPORT = CfgNode()
# whether or not to include the postprocess (GeneralizedRCNN._postprocess) step
# inside the exported model
_C.RCNN_EXPORT.INCLUDE_POSTPROCESS = False
_C.RCNN_PREPARE_FOR_EXPORT = "default_rcnn_prepare_for_export"
_C.RCNN_PREPARE_FOR_QUANT = "default_rcnn_prepare_for_quant"
_C.RCNN_PREPARE_FOR_QUANT_CONVERT = "default_rcnn_prepare_for_quant_convert"
......
......@@ -229,13 +229,13 @@ def get_quick_test_config_opts(
ret.extend(
[
"INPUT.MIN_SIZE_TRAIN",
(10,),
(8,),
"INPUT.MAX_SIZE_TRAIN",
10,
9,
"INPUT.MIN_SIZE_TEST",
10,
"INPUT.MAX_SIZE_TEST",
10,
11,
]
)
return [str(x) for x in ret]
......@@ -279,6 +279,29 @@ class RCNNBaseTestCases:
self.test_dir = tempfile.mkdtemp(prefix="test_export_")
self.addCleanup(shutil.rmtree, self.test_dir)
def _get_test_image_sizes_default(self, is_train):
# model should work for any size, so don't alway use power of 2 or multiple
# of size_divisibility for testing.
side_length = max(self.test_model.backbone.size_divisibility, 10)
# make it non-square to cover error caused by messing up width & height
h, w = side_length, side_length * 2
return h, w
def _get_test_image_size_no_resize(self, is_train):
# use cfg.INPUT to make sure data loader doesn't resize the image
if is_train:
assert len(self.cfg.INPUT.MAX_SIZE_TRAIN) == 1
h = self.cfg.INPUT.MIN_SIZE_TRAIN[0]
w = self.cfg.INPUT.MAX_SIZE_TRAIN
else:
h = self.cfg.INPUT.MIN_SIZE_TEST
w = self.cfg.INPUT.MAX_SIZE_TEST
return h, w
def _get_test_image_sizes(self, is_train):
"""override this method to use other image size strategy"""
return self._get_test_image_sizes_default(is_train)
def setup_custom_test(self):
"""
Override this when using different runner, using different base config file,
......@@ -299,11 +322,12 @@ class RCNNBaseTestCases:
self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
@contextlib.contextmanager
def _create_data_loader(self, image_height, image_width, is_train):
def _create_data_loader(self, is_train):
"""
Creating the data loader used for the test case. Note that it's better
to use "fake" data for quick test and isolating I/O.
"""
image_height, image_width = self._get_test_image_sizes(is_train=False)
with create_detection_data_loader_on_toy_dataset(
self.cfg,
image_height,
......@@ -314,9 +338,7 @@ class RCNNBaseTestCases:
yield data_loader
def _test_export(self, predictor_type, compare_match=True):
size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with self._create_data_loader(h, w, is_train=False) as data_loader:
with self._create_data_loader(is_train=False) as data_loader:
inputs = next(iter(data_loader))
# TODO: the export may change model it self, need to fix this
......@@ -341,6 +363,7 @@ class RCNNBaseTestCases:
assert_instances_allclose(
predictor_outputs[0]["instances"],
pytorch_outputs[0]["instances"],
size_as_tensor=True,
)
return predictor_path
......@@ -348,10 +371,7 @@ class RCNNBaseTestCases:
# TODO: add test_train
def _test_inference(self):
size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with self._create_data_loader(h, w, is_train=False) as data_loader:
with self._create_data_loader(is_train=False) as data_loader:
inputs = next(iter(data_loader))
with torch.no_grad():
......
......@@ -191,5 +191,19 @@ class TestTorchVisionExport(unittest.TestCase):
scripted_model.save(os.path.join(tmp_dir, "new_file.pt"))
class TestMaskRCNNExportOptions(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
def _get_test_image_sizes(self, is_train):
# postprocessing requires no resize from "data loader"
return self._get_test_image_size_no_resize(is_train)
def test_tracing_with_postprocess(self):
self.cfg.merge_from_list(["RCNN_EXPORT.INCLUDE_POSTPROCESS", True])
self._test_export("torchscript@tracing", compare_match=True)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment