Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
1b7c0f54
"...text-generation-inference.git" did not exist on "37df6df38edb4dc8eee89a42ec3791e89442c851"
Unverified
Commit
1b7c0f54
authored
Feb 09, 2021
by
Vasilis Vryniotis
Committed by
GitHub
Feb 09, 2021
Browse files
Temporarily removing quantized mobilenet_v3_small. (#3366)
parent
97885cb1
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
2 additions
and
23 deletions
+2
-23
docs/source/models.rst
docs/source/models.rst
+0
-1
torchvision/models/quantization/mobilenet.py
torchvision/models/quantization/mobilenet.py
+1
-1
torchvision/models/quantization/mobilenetv3.py
torchvision/models/quantization/mobilenetv3.py
+1
-21
No files found.
docs/source/models.rst
View file @
1b7c0f54
...
@@ -280,7 +280,6 @@ a model with random weights by calling its constructor:
...
@@ -280,7 +280,6 @@ a model with random weights by calling its constructor:
inception_v3
=
models
.
quantization
.
inception_v3
()
inception_v3
=
models
.
quantization
.
inception_v3
()
mobilenet_v2
=
models
.
quantization
.
mobilenet_v2
()
mobilenet_v2
=
models
.
quantization
.
mobilenet_v2
()
mobilenet_v3_large
=
models
.
quantization
.
mobilenet_v3_large
()
mobilenet_v3_large
=
models
.
quantization
.
mobilenet_v3_large
()
mobilenet_v3_small
=
models
.
quantization
.
mobilenet_v3_small
()
resnet18
=
models
.
quantization
.
resnet18
()
resnet18
=
models
.
quantization
.
resnet18
()
resnet50
=
models
.
quantization
.
resnet50
()
resnet50
=
models
.
quantization
.
resnet50
()
resnext101_32x8d
=
models
.
quantization
.
resnext101_32x8d
()
resnext101_32x8d
=
models
.
quantization
.
resnext101_32x8d
()
...
...
torchvision/models/quantization/mobilenet.py
View file @
1b7c0f54
from
.mobilenetv2
import
QuantizableMobileNetV2
,
mobilenet_v2
,
__all__
as
mv2_all
from
.mobilenetv2
import
QuantizableMobileNetV2
,
mobilenet_v2
,
__all__
as
mv2_all
from
.mobilenetv3
import
QuantizableMobileNetV3
,
mobilenet_v3_large
,
mobilenet_v3_small
,
__all__
as
mv3_all
from
.mobilenetv3
import
QuantizableMobileNetV3
,
mobilenet_v3_large
,
__all__
as
mv3_all
__all__
=
mv2_all
+
mv3_all
__all__
=
mv2_all
+
mv3_all
torchvision/models/quantization/mobilenetv3.py
View file @
1b7c0f54
...
@@ -8,12 +8,11 @@ from typing import Any, List, Optional
...
@@ -8,12 +8,11 @@ from typing import Any, List, Optional
from
.utils
import
_replace_relu
from
.utils
import
_replace_relu
__all__
=
[
'QuantizableMobileNetV3'
,
'mobilenet_v3_large'
,
'mobilenet_v3_small'
]
__all__
=
[
'QuantizableMobileNetV3'
,
'mobilenet_v3_large'
]
quant_model_urls
=
{
quant_model_urls
=
{
'mobilenet_v3_large_qnnpack'
:
'mobilenet_v3_large_qnnpack'
:
"https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth"
,
"https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth"
,
'mobilenet_v3_small_qnnpack'
:
None
,
}
}
...
@@ -130,22 +129,3 @@ def mobilenet_v3_large(pretrained=False, progress=True, quantize=False, **kwargs
...
@@ -130,22 +129,3 @@ def mobilenet_v3_large(pretrained=False, progress=True, quantize=False, **kwargs
arch
=
"mobilenet_v3_large"
arch
=
"mobilenet_v3_large"
inverted_residual_setting
,
last_channel
=
_mobilenet_v3_conf
(
arch
,
kwargs
)
inverted_residual_setting
,
last_channel
=
_mobilenet_v3_conf
(
arch
,
kwargs
)
return
_mobilenet_v3_model
(
arch
,
inverted_residual_setting
,
last_channel
,
pretrained
,
progress
,
quantize
,
**
kwargs
)
return
_mobilenet_v3_model
(
arch
,
inverted_residual_setting
,
last_channel
,
pretrained
,
progress
,
quantize
,
**
kwargs
)
def
mobilenet_v3_small
(
pretrained
=
False
,
progress
=
True
,
quantize
=
False
,
**
kwargs
):
"""
Constructs a MobileNetV3 Small architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Note that quantize = True returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet.
progress (bool): If True, displays a progress bar of the download to stderr
quantize (bool): If True, returns a quantized model, else returns a float model
"""
arch
=
"mobilenet_v3_small"
inverted_residual_setting
,
last_channel
=
_mobilenet_v3_conf
(
arch
,
kwargs
)
return
_mobilenet_v3_model
(
arch
,
inverted_residual_setting
,
last_channel
,
pretrained
,
progress
,
quantize
,
**
kwargs
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment