"...text-generation-inference.git" did not exist on "37df6df38edb4dc8eee89a42ec3791e89442c851"
Unverified Commit 1b7c0f54 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Temporarily removing quantized mobilenet_v3_small. (#3366)

parent 97885cb1
...@@ -280,7 +280,6 @@ a model with random weights by calling its constructor: ...@@ -280,7 +280,6 @@ a model with random weights by calling its constructor:
inception_v3 = models.quantization.inception_v3() inception_v3 = models.quantization.inception_v3()
mobilenet_v2 = models.quantization.mobilenet_v2() mobilenet_v2 = models.quantization.mobilenet_v2()
mobilenet_v3_large = models.quantization.mobilenet_v3_large() mobilenet_v3_large = models.quantization.mobilenet_v3_large()
mobilenet_v3_small = models.quantization.mobilenet_v3_small()
resnet18 = models.quantization.resnet18() resnet18 = models.quantization.resnet18()
resnet50 = models.quantization.resnet50() resnet50 = models.quantization.resnet50()
resnext101_32x8d = models.quantization.resnext101_32x8d() resnext101_32x8d = models.quantization.resnext101_32x8d()
......
from .mobilenetv2 import QuantizableMobileNetV2, mobilenet_v2, __all__ as mv2_all from .mobilenetv2 import QuantizableMobileNetV2, mobilenet_v2, __all__ as mv2_all
from .mobilenetv3 import QuantizableMobileNetV3, mobilenet_v3_large, mobilenet_v3_small, __all__ as mv3_all from .mobilenetv3 import QuantizableMobileNetV3, mobilenet_v3_large, __all__ as mv3_all
__all__ = mv2_all + mv3_all __all__ = mv2_all + mv3_all
...@@ -8,12 +8,11 @@ from typing import Any, List, Optional ...@@ -8,12 +8,11 @@ from typing import Any, List, Optional
from .utils import _replace_relu from .utils import _replace_relu
__all__ = ['QuantizableMobileNetV3', 'mobilenet_v3_large', 'mobilenet_v3_small'] __all__ = ['QuantizableMobileNetV3', 'mobilenet_v3_large']
quant_model_urls = { quant_model_urls = {
'mobilenet_v3_large_qnnpack': 'mobilenet_v3_large_qnnpack':
"https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth", "https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
'mobilenet_v3_small_qnnpack': None,
} }
...@@ -130,22 +129,3 @@ def mobilenet_v3_large(pretrained=False, progress=True, quantize=False, **kwargs ...@@ -130,22 +129,3 @@ def mobilenet_v3_large(pretrained=False, progress=True, quantize=False, **kwargs
arch = "mobilenet_v3_large" arch = "mobilenet_v3_large"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, kwargs) inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, kwargs)
return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs) return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)
def mobilenet_v3_small(pretrained=False, progress=True, quantize=False, **kwargs):
"""
Constructs a MobileNetV3 Small architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Note that quantize = True returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet.
progress (bool): If True, displays a progress bar of the download to stderr
quantize (bool): If True, returns a quantized model, else returns a float model
"""
arch = "mobilenet_v3_small"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, kwargs)
return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment