Unverified Commit 2ab93592 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Improve model parameterization on tests (#3926)

* Improve model parameterization on tests.

* Code review changes.
parent c2cdad4f
...@@ -74,12 +74,37 @@ autocast_flaky_numerics = ( ...@@ -74,12 +74,37 @@ autocast_flaky_numerics = (
) )
# The following contains configuration parameters for all models which are used by
# the _test_*_model methods.
_model_params = {
'inception_v3': {
'input_shape': (1, 3, 299, 299)
},
'retinanet_resnet50_fpn': {
'score_thresh': 0.01,
},
'fasterrcnn_mobilenet_v3_large_fpn': {
'box_score_thresh': 0.02076,
},
'fasterrcnn_mobilenet_v3_large_320_fpn': {
'box_score_thresh': 0.02076,
'rpn_pre_nms_top_n_test': 1000,
'rpn_post_nms_top_n_test': 1000,
}
}
class ModelTester(TestCase): class ModelTester(TestCase):
def _test_classification_model(self, name, input_shape, dev): def _test_classification_model(self, name, dev):
set_rng_seed(0) set_rng_seed(0)
# passing num_class equal to a number other than 1000 helps in making the test defaults = {
# more enforcing in nature 'num_classes': 50,
model = models.__dict__[name](num_classes=50) 'input_shape': (1, 3, 224, 224),
}
kwargs = {**defaults, **_model_params.get(name, {})}
input_shape = kwargs.pop('input_shape')
model = models.__dict__[name](**kwargs)
model.eval().to(device=dev) model.eval().to(device=dev)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev) x = torch.rand(input_shape).to(device=dev)
...@@ -98,11 +123,16 @@ class ModelTester(TestCase): ...@@ -98,11 +123,16 @@ class ModelTester(TestCase):
def _test_segmentation_model(self, name, dev): def _test_segmentation_model(self, name, dev):
set_rng_seed(0) set_rng_seed(0)
# passing num_classes equal to a number other than 21 helps in making the test's defaults = {
# expected file size smaller 'num_classes': 10,
model = models.segmentation.__dict__[name](num_classes=10, pretrained_backbone=False) 'pretrained_backbone': False,
'input_shape': (1, 3, 32, 32),
}
kwargs = {**defaults, **_model_params.get(name, {})}
input_shape = kwargs.pop('input_shape')
model = models.segmentation.__dict__[name](**kwargs)
model.eval().to(device=dev) model.eval().to(device=dev)
input_shape = (1, 3, 32, 32)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev) x = torch.rand(input_shape).to(device=dev)
out = model(x)["out"] out = model(x)["out"]
...@@ -146,18 +176,16 @@ class ModelTester(TestCase): ...@@ -146,18 +176,16 @@ class ModelTester(TestCase):
def _test_detection_model(self, name, dev): def _test_detection_model(self, name, dev):
set_rng_seed(0) set_rng_seed(0)
kwargs = {} defaults = {
if "retinanet" in name: 'num_classes': 50,
# Reduce the default threshold to ensure the returned boxes are not empty. 'pretrained_backbone': False,
kwargs["score_thresh"] = 0.01 'input_shape': (3, 300, 300),
elif "fasterrcnn_mobilenet_v3_large" in name: }
kwargs["box_score_thresh"] = 0.02076 kwargs = {**defaults, **_model_params.get(name, {})}
if "fasterrcnn_mobilenet_v3_large_320_fpn" in name: input_shape = kwargs.pop('input_shape')
kwargs["rpn_pre_nms_top_n_test"] = 1000
kwargs["rpn_post_nms_top_n_test"] = 1000 model = models.detection.__dict__[name](**kwargs)
model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False, **kwargs)
model.eval().to(device=dev) model.eval().to(device=dev)
input_shape = (3, 300, 300)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev) x = torch.rand(input_shape).to(device=dev)
model_input = [x] model_input = [x]
...@@ -435,8 +463,7 @@ _devs = [torch.device("cpu"), torch.device("cuda")] if torch.cuda.is_available() ...@@ -435,8 +463,7 @@ _devs = [torch.device("cpu"), torch.device("cuda")] if torch.cuda.is_available()
@pytest.mark.parametrize('model_name', get_available_classification_models()) @pytest.mark.parametrize('model_name', get_available_classification_models())
@pytest.mark.parametrize('dev', _devs) @pytest.mark.parametrize('dev', _devs)
def test_classification_model(model_name, dev): def test_classification_model(model_name, dev):
input_shape = (1, 3, 299, 299) if model_name == 'inception_v3' else (1, 3, 224, 224) ModelTester()._test_classification_model(model_name, dev)
ModelTester()._test_classification_model(model_name, input_shape, dev)
@pytest.mark.parametrize('model_name', get_available_segmentation_models()) @pytest.mark.parametrize('model_name', get_available_segmentation_models())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment