Unverified Commit 3855901e authored by Joao Gomes's avatar Joao Gomes Committed by GitHub
Browse files

Add a test that compares the output of our quantized models against expected cached values (#4597)



* adding tests to check output of quantized models

* adding test quantized model weights

* merge test_new_quantized_classification_model with test_quantized_classification_model

* adding skipif removed by mistake

* addressing comments from PR

* removing unused argument

* fixing lint errors

* changing model to eval model and updating weights

* Update test/test_models.py
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>

* enforce single test in circleci

* changing random seed

* updating weights for new seed

* adding missing empty line

* try 128 random seed

* try 256 random seed

* try 16 random seed

* disable inception_v3 input/output quantization tests

* removing ModelTester.test_inception_v3_quantized_expect.pkl

* reverting temporary ci run_test.sh changes
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 203671ab
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
...@@ -220,6 +220,11 @@ autocast_flaky_numerics = ( ...@@ -220,6 +220,11 @@ autocast_flaky_numerics = (
"maskrcnn_resnet50_fpn", "maskrcnn_resnet50_fpn",
) )
# The tests for the following quantized models are flaky possibly due to inconsistent
# rounding errors in different platforms. For this reason the input/output consistency
# tests under test_quantized_classification_model will be skipped for the following models.
quantized_flaky_models = ("inception_v3",)
# The following contains configuration parameters for all models which are used by # The following contains configuration parameters for all models which are used by
# the _test_*_model methods. # the _test_*_model methods.
...@@ -687,7 +692,9 @@ def test_video_model(model_name, dev): ...@@ -687,7 +692,9 @@ def test_video_model(model_name, dev):
) )
@pytest.mark.parametrize("model_name", get_available_quantizable_models()) @pytest.mark.parametrize("model_name", get_available_quantizable_models())
def test_quantized_classification_model(model_name): def test_quantized_classification_model(model_name):
set_rng_seed(0)
defaults = { defaults = {
"num_classes": 5,
"input_shape": (1, 3, 224, 224), "input_shape": (1, 3, 224, 224),
"pretrained": False, "pretrained": False,
"quantize": True, "quantize": True,
...@@ -697,8 +704,15 @@ def test_quantized_classification_model(model_name): ...@@ -697,8 +704,15 @@ def test_quantized_classification_model(model_name):
# First check if quantize=True provides models that can run with input data # First check if quantize=True provides models that can run with input data
model = torchvision.models.quantization.__dict__[model_name](**kwargs) model = torchvision.models.quantization.__dict__[model_name](**kwargs)
model.eval()
x = torch.rand(input_shape) x = torch.rand(input_shape)
model(x) out = model(x)
if model_name not in quantized_flaky_models:
_assert_expected(out, model_name + "_quantized", prec=0.1)
assert out.shape[-1] == 5
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None))
_check_fx_compatible(model, x)
kwargs["quantize"] = False kwargs["quantize"] = False
for eval_mode in [True, False]: for eval_mode in [True, False]:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment