Unverified Commit 3a1f05ed authored by Ambuj Pawar's avatar Ambuj Pawar Committed by GitHub
Browse files

Remove warnings pytest models (#6593)



* ADD: init_weights config for googlenet

* Fix: Inception and googlenet warnings

* Fix: warning in test_datasets.py

* Fix: Formatting error with ufmt

* Fix: Failing tests in quantized_classification_model

* Update test/test_models.py to make googlenet in 1 line
Co-authored-by: default avatarPhilip Meier <github.pmeier@posteo.de>

* Refactor: Change inception quantisation class initialization to use args/kwargs

* Resolve mypy issue

* Move *args before inception_blocks

* Move args keywords before other arguments
Co-authored-by: default avatarAmbuj Pawar <your_email@abc.example>
Co-authored-by: default avatarPhilip Meier <github.pmeier@posteo.de>
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 6ebbdfe8
......@@ -617,7 +617,6 @@ class VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase):
year=[f"20{year:02d}" for year in range(7, 13)], image_set=("train", "val", "trainval")
),
dict(year="2007", image_set="test"),
dict(year="2007-test", image_set="test"),
)
def inject_fake_data(self, tmpdir, config):
......
......@@ -244,7 +244,7 @@ quantized_flaky_models = ("inception_v3", "resnet50")
# The following contains configuration parameters for all models which are used by
# the _test_*_model methods.
_model_params = {
"inception_v3": {"input_shape": (1, 3, 299, 299)},
"inception_v3": {"input_shape": (1, 3, 299, 299), "init_weights": True},
"retinanet_resnet50_fpn": {
"num_classes": 20,
"score_thresh": 0.01,
......@@ -318,6 +318,7 @@ _model_params = {
"s3d": {
"input_shape": (1, 3, 16, 224, 224),
},
"googlenet": {"init_weights": True},
}
# speeding up slow models:
slow_models = [
......
......@@ -39,7 +39,7 @@ class QuantizableBasicConv2d(BasicConv2d):
class QuantizableInception(Inception):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.cat = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -50,7 +50,7 @@ class QuantizableInception(Inception):
class QuantizableInceptionAux(InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
......@@ -75,7 +75,7 @@ class QuantizableGoogLeNet(GoogLeNet):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__( # type: ignore[misc]
blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], *args, **kwargs
*args, blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], **kwargs
)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
......
......@@ -41,7 +41,7 @@ class QuantizableBasicConv2d(inception_module.BasicConv2d):
class QuantizableInceptionA(inception_module.InceptionA):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -52,7 +52,7 @@ class QuantizableInceptionA(inception_module.InceptionA):
class QuantizableInceptionB(inception_module.InceptionB):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -63,7 +63,7 @@ class QuantizableInceptionB(inception_module.InceptionB):
class QuantizableInceptionC(inception_module.InceptionC):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -74,7 +74,7 @@ class QuantizableInceptionC(inception_module.InceptionC):
class QuantizableInceptionD(inception_module.InceptionD):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -85,7 +85,7 @@ class QuantizableInceptionD(inception_module.InceptionD):
class QuantizableInceptionE(inception_module.InceptionE):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop1 = nn.quantized.FloatFunctional()
self.myop2 = nn.quantized.FloatFunctional()
self.myop3 = nn.quantized.FloatFunctional()
......@@ -119,20 +119,13 @@ class QuantizableInceptionE(inception_module.InceptionE):
class QuantizableInceptionAux(inception_module.InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
class QuantizableInception3(inception_module.Inception3):
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
) -> None:
super().__init__(
num_classes=num_classes,
aux_logits=aux_logits,
transform_input=transform_input,
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__( # type: ignore[misc]
*args,
inception_blocks=[
QuantizableBasicConv2d,
QuantizableInceptionA,
......@@ -142,6 +135,7 @@ class QuantizableInception3(inception_module.Inception3):
QuantizableInceptionE,
QuantizableInceptionAux,
],
**kwargs,
)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
......
......@@ -83,7 +83,7 @@ class QuantizableSqueezeExcitation(SqueezeExcitation):
class QuantizableInvertedResidual(InvertedResidual):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(se_layer=QuantizableSqueezeExcitation, *args, **kwargs) # type: ignore[misc]
super().__init__(*args, se_layer=QuantizableSqueezeExcitation, **kwargs) # type: ignore[misc]
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment