Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
d367a01a
Unverified
Commit
d367a01a
authored
Oct 28, 2021
by
Jirka Borovec
Committed by
GitHub
Oct 28, 2021
Browse files
Use f-strings almost everywhere, and other cleanups by applying pyupgrade (#4585)
Co-authored-by:
Nicolas Hug
<
nicolashug@fb.com
>
parent
50dfe207
Changes
136
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
61 additions
and
81 deletions
+61
-81
torchvision/models/mobilenetv3.py
torchvision/models/mobilenetv3.py
+2
-2
torchvision/models/quantization/googlenet.py
torchvision/models/quantization/googlenet.py
+6
-10
torchvision/models/quantization/inception.py
torchvision/models/quantization/inception.py
+9
-21
torchvision/models/quantization/mobilenetv2.py
torchvision/models/quantization/mobilenetv2.py
+2
-2
torchvision/models/quantization/mobilenetv3.py
torchvision/models/quantization/mobilenetv3.py
+1
-1
torchvision/models/quantization/resnet.py
torchvision/models/quantization/resnet.py
+3
-3
torchvision/models/quantization/shufflenetv2.py
torchvision/models/quantization/shufflenetv2.py
+2
-4
torchvision/models/resnet.py
torchvision/models/resnet.py
+4
-4
torchvision/models/segmentation/_utils.py
torchvision/models/segmentation/_utils.py
+2
-2
torchvision/models/segmentation/deeplabv3.py
torchvision/models/segmentation/deeplabv3.py
+4
-4
torchvision/models/segmentation/fcn.py
torchvision/models/segmentation/fcn.py
+1
-1
torchvision/models/shufflenetv2.py
torchvision/models/shufflenetv2.py
+4
-4
torchvision/models/squeezenet.py
torchvision/models/squeezenet.py
+3
-3
torchvision/models/vgg.py
torchvision/models/vgg.py
+1
-1
torchvision/models/video/resnet.py
torchvision/models/video/resnet.py
+8
-8
torchvision/ops/_register_onnx_ops.py
torchvision/ops/_register_onnx_ops.py
+1
-1
torchvision/ops/deform_conv.py
torchvision/ops/deform_conv.py
+2
-4
torchvision/ops/feature_pyramid_network.py
torchvision/ops/feature_pyramid_network.py
+2
-2
torchvision/ops/misc.py
torchvision/ops/misc.py
+2
-2
torchvision/ops/poolers.py
torchvision/ops/poolers.py
+2
-2
No files found.
torchvision/models/mobilenetv3.py
View file @
d367a01a
...
@@ -278,7 +278,7 @@ def _mobilenet_v3_conf(
...
@@ -278,7 +278,7 @@ def _mobilenet_v3_conf(
]
]
last_channel
=
adjust_channels
(
1024
//
reduce_divider
)
# C5
last_channel
=
adjust_channels
(
1024
//
reduce_divider
)
# C5
else
:
else
:
raise
ValueError
(
"Unsupported model type {
}"
.
format
(
arch
)
)
raise
ValueError
(
f
"Unsupported model type
{
arch
}
"
)
return
inverted_residual_setting
,
last_channel
return
inverted_residual_setting
,
last_channel
...
@@ -294,7 +294,7 @@ def _mobilenet_v3(
...
@@ -294,7 +294,7 @@ def _mobilenet_v3(
model
=
MobileNetV3
(
inverted_residual_setting
,
last_channel
,
**
kwargs
)
model
=
MobileNetV3
(
inverted_residual_setting
,
last_channel
,
**
kwargs
)
if
pretrained
:
if
pretrained
:
if
model_urls
.
get
(
arch
,
None
)
is
None
:
if
model_urls
.
get
(
arch
,
None
)
is
None
:
raise
ValueError
(
"No checkpoint is available for model type {
}"
.
format
(
arch
)
)
raise
ValueError
(
f
"No checkpoint is available for model type
{
arch
}
"
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
arch
],
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
arch
],
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
return
model
return
model
...
...
torchvision/models/quantization/googlenet.py
View file @
d367a01a
...
@@ -49,7 +49,7 @@ def googlenet(
...
@@ -49,7 +49,7 @@ def googlenet(
kwargs
[
"aux_logits"
]
=
False
kwargs
[
"aux_logits"
]
=
False
if
kwargs
[
"aux_logits"
]:
if
kwargs
[
"aux_logits"
]:
warnings
.
warn
(
warnings
.
warn
(
"auxiliary heads in the pretrained googlenet model are NOT pretrained,
"
"
so make sure to train them"
"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
)
)
original_aux_logits
=
kwargs
[
"aux_logits"
]
original_aux_logits
=
kwargs
[
"aux_logits"
]
kwargs
[
"aux_logits"
]
=
True
kwargs
[
"aux_logits"
]
=
True
...
@@ -67,7 +67,7 @@ def googlenet(
...
@@ -67,7 +67,7 @@ def googlenet(
if
pretrained
:
if
pretrained
:
if
quantize
:
if
quantize
:
model_url
=
quant_model_urls
[
"googlenet
"
+
"
_"
+
backend
]
model_url
=
quant_model_urls
[
"googlenet_"
+
backend
]
else
:
else
:
model_url
=
model_urls
[
"googlenet"
]
model_url
=
model_urls
[
"googlenet"
]
...
@@ -84,7 +84,7 @@ def googlenet(
...
@@ -84,7 +84,7 @@ def googlenet(
class
QuantizableBasicConv2d
(
BasicConv2d
):
class
QuantizableBasicConv2d
(
BasicConv2d
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableBasicConv2d
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
relu
=
nn
.
ReLU
()
self
.
relu
=
nn
.
ReLU
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -99,9 +99,7 @@ class QuantizableBasicConv2d(BasicConv2d):
...
@@ -99,9 +99,7 @@ class QuantizableBasicConv2d(BasicConv2d):
class
QuantizableInception
(
Inception
):
class
QuantizableInception
(
Inception
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInception
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
self
.
cat
=
nn
.
quantized
.
FloatFunctional
()
self
.
cat
=
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -112,9 +110,7 @@ class QuantizableInception(Inception):
...
@@ -112,9 +110,7 @@ class QuantizableInception(Inception):
class
QuantizableInceptionAux
(
InceptionAux
):
class
QuantizableInceptionAux
(
InceptionAux
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInceptionAux
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
self
.
relu
=
nn
.
ReLU
()
self
.
relu
=
nn
.
ReLU
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -138,7 +134,7 @@ class QuantizableInceptionAux(InceptionAux):
...
@@ -138,7 +134,7 @@ class QuantizableInceptionAux(InceptionAux):
class
QuantizableGoogLeNet
(
GoogLeNet
):
class
QuantizableGoogLeNet
(
GoogLeNet
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableGoogLeNet
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
# type: ignore[misc]
blocks
=
[
QuantizableBasicConv2d
,
QuantizableInception
,
QuantizableInceptionAux
],
*
args
,
**
kwargs
blocks
=
[
QuantizableBasicConv2d
,
QuantizableInception
,
QuantizableInceptionAux
],
*
args
,
**
kwargs
)
)
self
.
quant
=
torch
.
quantization
.
QuantStub
()
self
.
quant
=
torch
.
quantization
.
QuantStub
()
...
...
torchvision/models/quantization/inception.py
View file @
d367a01a
...
@@ -75,7 +75,7 @@ def inception_v3(
...
@@ -75,7 +75,7 @@ def inception_v3(
if
not
original_aux_logits
:
if
not
original_aux_logits
:
model
.
aux_logits
=
False
model
.
aux_logits
=
False
model
.
AuxLogits
=
None
model
.
AuxLogits
=
None
model_url
=
quant_model_urls
[
"inception_v3_google
"
+
"
_"
+
backend
]
model_url
=
quant_model_urls
[
"inception_v3_google_"
+
backend
]
else
:
else
:
model_url
=
inception_module
.
model_urls
[
"inception_v3_google"
]
model_url
=
inception_module
.
model_urls
[
"inception_v3_google"
]
...
@@ -92,7 +92,7 @@ def inception_v3(
...
@@ -92,7 +92,7 @@ def inception_v3(
class
QuantizableBasicConv2d
(
inception_module
.
BasicConv2d
):
class
QuantizableBasicConv2d
(
inception_module
.
BasicConv2d
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableBasicConv2d
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
relu
=
nn
.
ReLU
()
self
.
relu
=
nn
.
ReLU
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -108,9 +108,7 @@ class QuantizableBasicConv2d(inception_module.BasicConv2d):
...
@@ -108,9 +108,7 @@ class QuantizableBasicConv2d(inception_module.BasicConv2d):
class
QuantizableInceptionA
(
inception_module
.
InceptionA
):
class
QuantizableInceptionA
(
inception_module
.
InceptionA
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInceptionA
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -121,9 +119,7 @@ class QuantizableInceptionA(inception_module.InceptionA):
...
@@ -121,9 +119,7 @@ class QuantizableInceptionA(inception_module.InceptionA):
class
QuantizableInceptionB
(
inception_module
.
InceptionB
):
class
QuantizableInceptionB
(
inception_module
.
InceptionB
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInceptionB
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -134,9 +130,7 @@ class QuantizableInceptionB(inception_module.InceptionB):
...
@@ -134,9 +130,7 @@ class QuantizableInceptionB(inception_module.InceptionB):
class
QuantizableInceptionC
(
inception_module
.
InceptionC
):
class
QuantizableInceptionC
(
inception_module
.
InceptionC
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInceptionC
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -147,9 +141,7 @@ class QuantizableInceptionC(inception_module.InceptionC):
...
@@ -147,9 +141,7 @@ class QuantizableInceptionC(inception_module.InceptionC):
class
QuantizableInceptionD
(
inception_module
.
InceptionD
):
class
QuantizableInceptionD
(
inception_module
.
InceptionD
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInceptionD
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop
=
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -160,9 +152,7 @@ class QuantizableInceptionD(inception_module.InceptionD):
...
@@ -160,9 +152,7 @@ class QuantizableInceptionD(inception_module.InceptionD):
class
QuantizableInceptionE
(
inception_module
.
InceptionE
):
class
QuantizableInceptionE
(
inception_module
.
InceptionE
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInceptionE
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
self
.
myop1
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop1
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop2
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop2
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop3
=
nn
.
quantized
.
FloatFunctional
()
self
.
myop3
=
nn
.
quantized
.
FloatFunctional
()
...
@@ -196,9 +186,7 @@ class QuantizableInceptionE(inception_module.InceptionE):
...
@@ -196,9 +186,7 @@ class QuantizableInceptionE(inception_module.InceptionE):
class
QuantizableInceptionAux
(
inception_module
.
InceptionAux
):
class
QuantizableInceptionAux
(
inception_module
.
InceptionAux
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInceptionAux
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
# type: ignore[misc]
conv_block
=
QuantizableBasicConv2d
,
*
args
,
**
kwargs
)
class
QuantizableInception3
(
inception_module
.
Inception3
):
class
QuantizableInception3
(
inception_module
.
Inception3
):
...
@@ -208,7 +196,7 @@ class QuantizableInception3(inception_module.Inception3):
...
@@ -208,7 +196,7 @@ class QuantizableInception3(inception_module.Inception3):
aux_logits
:
bool
=
True
,
aux_logits
:
bool
=
True
,
transform_input
:
bool
=
False
,
transform_input
:
bool
=
False
,
)
->
None
:
)
->
None
:
super
(
QuantizableInception3
,
self
).
__init__
(
super
().
__init__
(
num_classes
=
num_classes
,
num_classes
=
num_classes
,
aux_logits
=
aux_logits
,
aux_logits
=
aux_logits
,
transform_input
=
transform_input
,
transform_input
=
transform_input
,
...
...
torchvision/models/quantization/mobilenetv2.py
View file @
d367a01a
...
@@ -19,7 +19,7 @@ quant_model_urls = {
...
@@ -19,7 +19,7 @@ quant_model_urls = {
class
QuantizableInvertedResidual
(
InvertedResidual
):
class
QuantizableInvertedResidual
(
InvertedResidual
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInvertedResidual
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
skip_add
=
nn
.
quantized
.
FloatFunctional
()
self
.
skip_add
=
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -42,7 +42,7 @@ class QuantizableMobileNetV2(MobileNetV2):
...
@@ -42,7 +42,7 @@ class QuantizableMobileNetV2(MobileNetV2):
Args:
Args:
Inherits args from floating point MobileNetV2
Inherits args from floating point MobileNetV2
"""
"""
super
(
QuantizableMobileNetV2
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
quant
=
QuantStub
()
self
.
quant
=
QuantStub
()
self
.
dequant
=
DeQuantStub
()
self
.
dequant
=
DeQuantStub
()
...
...
torchvision/models/quantization/mobilenetv3.py
View file @
d367a01a
...
@@ -110,7 +110,7 @@ class QuantizableMobileNetV3(MobileNetV3):
...
@@ -110,7 +110,7 @@ class QuantizableMobileNetV3(MobileNetV3):
def
_load_weights
(
arch
:
str
,
model
:
QuantizableMobileNetV3
,
model_url
:
Optional
[
str
],
progress
:
bool
)
->
None
:
def
_load_weights
(
arch
:
str
,
model
:
QuantizableMobileNetV3
,
model_url
:
Optional
[
str
],
progress
:
bool
)
->
None
:
if
model_url
is
None
:
if
model_url
is
None
:
raise
ValueError
(
"No checkpoint is available for {
}"
.
format
(
arch
)
)
raise
ValueError
(
f
"No checkpoint is available for
{
arch
}
"
)
state_dict
=
load_state_dict_from_url
(
model_url
,
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_url
,
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
...
...
torchvision/models/quantization/resnet.py
View file @
d367a01a
...
@@ -21,7 +21,7 @@ quant_model_urls = {
...
@@ -21,7 +21,7 @@ quant_model_urls = {
class
QuantizableBasicBlock
(
BasicBlock
):
class
QuantizableBasicBlock
(
BasicBlock
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableBasicBlock
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
add_relu
=
torch
.
nn
.
quantized
.
FloatFunctional
()
self
.
add_relu
=
torch
.
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -49,7 +49,7 @@ class QuantizableBasicBlock(BasicBlock):
...
@@ -49,7 +49,7 @@ class QuantizableBasicBlock(BasicBlock):
class
QuantizableBottleneck
(
Bottleneck
):
class
QuantizableBottleneck
(
Bottleneck
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableBottleneck
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
skip_add_relu
=
nn
.
quantized
.
FloatFunctional
()
self
.
skip_add_relu
=
nn
.
quantized
.
FloatFunctional
()
self
.
relu1
=
nn
.
ReLU
(
inplace
=
False
)
self
.
relu1
=
nn
.
ReLU
(
inplace
=
False
)
self
.
relu2
=
nn
.
ReLU
(
inplace
=
False
)
self
.
relu2
=
nn
.
ReLU
(
inplace
=
False
)
...
@@ -80,7 +80,7 @@ class QuantizableBottleneck(Bottleneck):
...
@@ -80,7 +80,7 @@ class QuantizableBottleneck(Bottleneck):
class
QuantizableResNet
(
ResNet
):
class
QuantizableResNet
(
ResNet
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableResNet
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
quant
=
torch
.
quantization
.
QuantStub
()
self
.
quant
=
torch
.
quantization
.
QuantStub
()
self
.
dequant
=
torch
.
quantization
.
DeQuantStub
()
self
.
dequant
=
torch
.
quantization
.
DeQuantStub
()
...
...
torchvision/models/quantization/shufflenetv2.py
View file @
d367a01a
...
@@ -26,7 +26,7 @@ quant_model_urls = {
...
@@ -26,7 +26,7 @@ quant_model_urls = {
class
QuantizableInvertedResidual
(
shufflenetv2
.
InvertedResidual
):
class
QuantizableInvertedResidual
(
shufflenetv2
.
InvertedResidual
):
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableInvertedResidual
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
self
.
cat
=
nn
.
quantized
.
FloatFunctional
()
self
.
cat
=
nn
.
quantized
.
FloatFunctional
()
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
def
forward
(
self
,
x
:
Tensor
)
->
Tensor
:
...
@@ -44,9 +44,7 @@ class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
...
@@ -44,9 +44,7 @@ class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
class
QuantizableShuffleNetV2
(
shufflenetv2
.
ShuffleNetV2
):
class
QuantizableShuffleNetV2
(
shufflenetv2
.
ShuffleNetV2
):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
*
args
:
Any
,
**
kwargs
:
Any
)
->
None
:
super
(
QuantizableShuffleNetV2
,
self
).
__init__
(
# type: ignore[misc]
super
().
__init__
(
*
args
,
inverted_residual
=
QuantizableInvertedResidual
,
**
kwargs
)
# type: ignore[misc]
*
args
,
inverted_residual
=
QuantizableInvertedResidual
,
**
kwargs
)
self
.
quant
=
torch
.
quantization
.
QuantStub
()
self
.
quant
=
torch
.
quantization
.
QuantStub
()
self
.
dequant
=
torch
.
quantization
.
DeQuantStub
()
self
.
dequant
=
torch
.
quantization
.
DeQuantStub
()
...
...
torchvision/models/resnet.py
View file @
d367a01a
...
@@ -68,7 +68,7 @@ class BasicBlock(nn.Module):
...
@@ -68,7 +68,7 @@ class BasicBlock(nn.Module):
dilation
:
int
=
1
,
dilation
:
int
=
1
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
)
->
None
:
)
->
None
:
super
(
BasicBlock
,
self
).
__init__
()
super
().
__init__
()
if
norm_layer
is
None
:
if
norm_layer
is
None
:
norm_layer
=
nn
.
BatchNorm2d
norm_layer
=
nn
.
BatchNorm2d
if
groups
!=
1
or
base_width
!=
64
:
if
groups
!=
1
or
base_width
!=
64
:
...
@@ -123,7 +123,7 @@ class Bottleneck(nn.Module):
...
@@ -123,7 +123,7 @@ class Bottleneck(nn.Module):
dilation
:
int
=
1
,
dilation
:
int
=
1
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
)
->
None
:
)
->
None
:
super
(
Bottleneck
,
self
).
__init__
()
super
().
__init__
()
if
norm_layer
is
None
:
if
norm_layer
is
None
:
norm_layer
=
nn
.
BatchNorm2d
norm_layer
=
nn
.
BatchNorm2d
width
=
int
(
planes
*
(
base_width
/
64.0
))
*
groups
width
=
int
(
planes
*
(
base_width
/
64.0
))
*
groups
...
@@ -173,7 +173,7 @@ class ResNet(nn.Module):
...
@@ -173,7 +173,7 @@ class ResNet(nn.Module):
replace_stride_with_dilation
:
Optional
[
List
[
bool
]]
=
None
,
replace_stride_with_dilation
:
Optional
[
List
[
bool
]]
=
None
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
)
->
None
:
)
->
None
:
super
(
ResNet
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
if
norm_layer
is
None
:
if
norm_layer
is
None
:
norm_layer
=
nn
.
BatchNorm2d
norm_layer
=
nn
.
BatchNorm2d
...
@@ -188,7 +188,7 @@ class ResNet(nn.Module):
...
@@ -188,7 +188,7 @@ class ResNet(nn.Module):
if
len
(
replace_stride_with_dilation
)
!=
3
:
if
len
(
replace_stride_with_dilation
)
!=
3
:
raise
ValueError
(
raise
ValueError
(
"replace_stride_with_dilation should be None "
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {
}"
.
format
(
replace_stride_with_dilation
)
f
"or a 3-element tuple, got
{
replace_stride_with_dilation
}
"
)
)
self
.
groups
=
groups
self
.
groups
=
groups
self
.
base_width
=
width_per_group
self
.
base_width
=
width_per_group
...
...
torchvision/models/segmentation/_utils.py
View file @
d367a01a
...
@@ -11,7 +11,7 @@ class _SimpleSegmentationModel(nn.Module):
...
@@ -11,7 +11,7 @@ class _SimpleSegmentationModel(nn.Module):
__constants__
=
[
"aux_classifier"
]
__constants__
=
[
"aux_classifier"
]
def
__init__
(
self
,
backbone
:
nn
.
Module
,
classifier
:
nn
.
Module
,
aux_classifier
:
Optional
[
nn
.
Module
]
=
None
)
->
None
:
def
__init__
(
self
,
backbone
:
nn
.
Module
,
classifier
:
nn
.
Module
,
aux_classifier
:
Optional
[
nn
.
Module
]
=
None
)
->
None
:
super
(
_SimpleSegmentationModel
,
self
).
__init__
()
super
().
__init__
()
self
.
backbone
=
backbone
self
.
backbone
=
backbone
self
.
classifier
=
classifier
self
.
classifier
=
classifier
self
.
aux_classifier
=
aux_classifier
self
.
aux_classifier
=
aux_classifier
...
@@ -38,6 +38,6 @@ class _SimpleSegmentationModel(nn.Module):
...
@@ -38,6 +38,6 @@ class _SimpleSegmentationModel(nn.Module):
def
_load_weights
(
arch
:
str
,
model
:
nn
.
Module
,
model_url
:
Optional
[
str
],
progress
:
bool
)
->
None
:
def
_load_weights
(
arch
:
str
,
model
:
nn
.
Module
,
model_url
:
Optional
[
str
],
progress
:
bool
)
->
None
:
if
model_url
is
None
:
if
model_url
is
None
:
raise
ValueError
(
"No checkpoint is available for {
}"
.
format
(
arch
)
)
raise
ValueError
(
f
"No checkpoint is available for
{
arch
}
"
)
state_dict
=
load_state_dict_from_url
(
model_url
,
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_url
,
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
torchvision/models/segmentation/deeplabv3.py
View file @
d367a01a
...
@@ -47,7 +47,7 @@ class DeepLabV3(_SimpleSegmentationModel):
...
@@ -47,7 +47,7 @@ class DeepLabV3(_SimpleSegmentationModel):
class
DeepLabHead
(
nn
.
Sequential
):
class
DeepLabHead
(
nn
.
Sequential
):
def
__init__
(
self
,
in_channels
:
int
,
num_classes
:
int
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
num_classes
:
int
)
->
None
:
super
(
DeepLabHead
,
self
).
__init__
(
super
().
__init__
(
ASPP
(
in_channels
,
[
12
,
24
,
36
]),
ASPP
(
in_channels
,
[
12
,
24
,
36
]),
nn
.
Conv2d
(
256
,
256
,
3
,
padding
=
1
,
bias
=
False
),
nn
.
Conv2d
(
256
,
256
,
3
,
padding
=
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
256
),
nn
.
BatchNorm2d
(
256
),
...
@@ -63,12 +63,12 @@ class ASPPConv(nn.Sequential):
...
@@ -63,12 +63,12 @@ class ASPPConv(nn.Sequential):
nn
.
BatchNorm2d
(
out_channels
),
nn
.
BatchNorm2d
(
out_channels
),
nn
.
ReLU
(),
nn
.
ReLU
(),
]
]
super
(
ASPPConv
,
self
).
__init__
(
*
modules
)
super
().
__init__
(
*
modules
)
class
ASPPPooling
(
nn
.
Sequential
):
class
ASPPPooling
(
nn
.
Sequential
):
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
)
->
None
:
super
(
ASPPPooling
,
self
).
__init__
(
super
().
__init__
(
nn
.
AdaptiveAvgPool2d
(
1
),
nn
.
AdaptiveAvgPool2d
(
1
),
nn
.
Conv2d
(
in_channels
,
out_channels
,
1
,
bias
=
False
),
nn
.
Conv2d
(
in_channels
,
out_channels
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
out_channels
),
nn
.
BatchNorm2d
(
out_channels
),
...
@@ -84,7 +84,7 @@ class ASPPPooling(nn.Sequential):
...
@@ -84,7 +84,7 @@ class ASPPPooling(nn.Sequential):
class
ASPP
(
nn
.
Module
):
class
ASPP
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
:
int
,
atrous_rates
:
List
[
int
],
out_channels
:
int
=
256
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
atrous_rates
:
List
[
int
],
out_channels
:
int
=
256
)
->
None
:
super
(
ASPP
,
self
).
__init__
()
super
().
__init__
()
modules
=
[]
modules
=
[]
modules
.
append
(
modules
.
append
(
nn
.
Sequential
(
nn
.
Conv2d
(
in_channels
,
out_channels
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
out_channels
),
nn
.
ReLU
())
nn
.
Sequential
(
nn
.
Conv2d
(
in_channels
,
out_channels
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
out_channels
),
nn
.
ReLU
())
...
...
torchvision/models/segmentation/fcn.py
View file @
d367a01a
...
@@ -44,7 +44,7 @@ class FCNHead(nn.Sequential):
...
@@ -44,7 +44,7 @@ class FCNHead(nn.Sequential):
nn
.
Conv2d
(
inter_channels
,
channels
,
1
),
nn
.
Conv2d
(
inter_channels
,
channels
,
1
),
]
]
super
(
FCNHead
,
self
).
__init__
(
*
layers
)
super
().
__init__
(
*
layers
)
def
_fcn_resnet
(
def
_fcn_resnet
(
...
...
torchvision/models/shufflenetv2.py
View file @
d367a01a
...
@@ -35,7 +35,7 @@ def channel_shuffle(x: Tensor, groups: int) -> Tensor:
...
@@ -35,7 +35,7 @@ def channel_shuffle(x: Tensor, groups: int) -> Tensor:
class
InvertedResidual
(
nn
.
Module
):
class
InvertedResidual
(
nn
.
Module
):
def
__init__
(
self
,
inp
:
int
,
oup
:
int
,
stride
:
int
)
->
None
:
def
__init__
(
self
,
inp
:
int
,
oup
:
int
,
stride
:
int
)
->
None
:
super
(
InvertedResidual
,
self
).
__init__
()
super
().
__init__
()
if
not
(
1
<=
stride
<=
3
):
if
not
(
1
<=
stride
<=
3
):
raise
ValueError
(
"illegal stride value"
)
raise
ValueError
(
"illegal stride value"
)
...
@@ -99,7 +99,7 @@ class ShuffleNetV2(nn.Module):
...
@@ -99,7 +99,7 @@ class ShuffleNetV2(nn.Module):
num_classes
:
int
=
1000
,
num_classes
:
int
=
1000
,
inverted_residual
:
Callable
[...,
nn
.
Module
]
=
InvertedResidual
,
inverted_residual
:
Callable
[...,
nn
.
Module
]
=
InvertedResidual
,
)
->
None
:
)
->
None
:
super
(
ShuffleNetV2
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
if
len
(
stages_repeats
)
!=
3
:
if
len
(
stages_repeats
)
!=
3
:
...
@@ -123,7 +123,7 @@ class ShuffleNetV2(nn.Module):
...
@@ -123,7 +123,7 @@ class ShuffleNetV2(nn.Module):
self
.
stage2
:
nn
.
Sequential
self
.
stage2
:
nn
.
Sequential
self
.
stage3
:
nn
.
Sequential
self
.
stage3
:
nn
.
Sequential
self
.
stage4
:
nn
.
Sequential
self
.
stage4
:
nn
.
Sequential
stage_names
=
[
"stage{}"
.
format
(
i
)
for
i
in
[
2
,
3
,
4
]]
stage_names
=
[
f
"stage
{
i
}
"
for
i
in
[
2
,
3
,
4
]]
for
name
,
repeats
,
output_channels
in
zip
(
stage_names
,
stages_repeats
,
self
.
_stage_out_channels
[
1
:]):
for
name
,
repeats
,
output_channels
in
zip
(
stage_names
,
stages_repeats
,
self
.
_stage_out_channels
[
1
:]):
seq
=
[
inverted_residual
(
input_channels
,
output_channels
,
2
)]
seq
=
[
inverted_residual
(
input_channels
,
output_channels
,
2
)]
for
i
in
range
(
repeats
-
1
):
for
i
in
range
(
repeats
-
1
):
...
@@ -162,7 +162,7 @@ def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwa
...
@@ -162,7 +162,7 @@ def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwa
if
pretrained
:
if
pretrained
:
model_url
=
model_urls
[
arch
]
model_url
=
model_urls
[
arch
]
if
model_url
is
None
:
if
model_url
is
None
:
raise
NotImplementedError
(
"pretrained {} is not supported as of now"
.
format
(
arch
)
)
raise
NotImplementedError
(
f
"pretrained
{
arch
}
is not supported as of now"
)
else
:
else
:
state_dict
=
load_state_dict_from_url
(
model_url
,
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_url
,
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
...
...
torchvision/models/squeezenet.py
View file @
d367a01a
...
@@ -17,7 +17,7 @@ model_urls = {
...
@@ -17,7 +17,7 @@ model_urls = {
class
Fire
(
nn
.
Module
):
class
Fire
(
nn
.
Module
):
def
__init__
(
self
,
inplanes
:
int
,
squeeze_planes
:
int
,
expand1x1_planes
:
int
,
expand3x3_planes
:
int
)
->
None
:
def
__init__
(
self
,
inplanes
:
int
,
squeeze_planes
:
int
,
expand1x1_planes
:
int
,
expand3x3_planes
:
int
)
->
None
:
super
(
Fire
,
self
).
__init__
()
super
().
__init__
()
self
.
inplanes
=
inplanes
self
.
inplanes
=
inplanes
self
.
squeeze
=
nn
.
Conv2d
(
inplanes
,
squeeze_planes
,
kernel_size
=
1
)
self
.
squeeze
=
nn
.
Conv2d
(
inplanes
,
squeeze_planes
,
kernel_size
=
1
)
self
.
squeeze_activation
=
nn
.
ReLU
(
inplace
=
True
)
self
.
squeeze_activation
=
nn
.
ReLU
(
inplace
=
True
)
...
@@ -35,7 +35,7 @@ class Fire(nn.Module):
...
@@ -35,7 +35,7 @@ class Fire(nn.Module):
class
SqueezeNet
(
nn
.
Module
):
class
SqueezeNet
(
nn
.
Module
):
def
__init__
(
self
,
version
:
str
=
"1_0"
,
num_classes
:
int
=
1000
,
dropout
:
float
=
0.5
)
->
None
:
def
__init__
(
self
,
version
:
str
=
"1_0"
,
num_classes
:
int
=
1000
,
dropout
:
float
=
0.5
)
->
None
:
super
(
SqueezeNet
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
self
.
num_classes
=
num_classes
self
.
num_classes
=
num_classes
if
version
==
"1_0"
:
if
version
==
"1_0"
:
...
@@ -74,7 +74,7 @@ class SqueezeNet(nn.Module):
...
@@ -74,7 +74,7 @@ class SqueezeNet(nn.Module):
# FIXME: Is this needed? SqueezeNet should only be called from the
# FIXME: Is this needed? SqueezeNet should only be called from the
# FIXME: squeezenet1_x() functions
# FIXME: squeezenet1_x() functions
# FIXME: This checking is not done for the other models
# FIXME: This checking is not done for the other models
raise
ValueError
(
"Unsupported SqueezeNet version {version}:
"
"
1_0 or 1_1 expected"
.
format
(
version
=
version
)
)
raise
ValueError
(
f
"Unsupported SqueezeNet version
{
version
}
:
1_0 or 1_1 expected"
)
# Final convolution is initialized differently from the rest
# Final convolution is initialized differently from the rest
final_conv
=
nn
.
Conv2d
(
512
,
self
.
num_classes
,
kernel_size
=
1
)
final_conv
=
nn
.
Conv2d
(
512
,
self
.
num_classes
,
kernel_size
=
1
)
...
...
torchvision/models/vgg.py
View file @
d367a01a
...
@@ -36,7 +36,7 @@ class VGG(nn.Module):
...
@@ -36,7 +36,7 @@ class VGG(nn.Module):
def
__init__
(
def
__init__
(
self
,
features
:
nn
.
Module
,
num_classes
:
int
=
1000
,
init_weights
:
bool
=
True
,
dropout
:
float
=
0.5
self
,
features
:
nn
.
Module
,
num_classes
:
int
=
1000
,
init_weights
:
bool
=
True
,
dropout
:
float
=
0.5
)
->
None
:
)
->
None
:
super
(
VGG
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
self
.
features
=
features
self
.
features
=
features
self
.
avgpool
=
nn
.
AdaptiveAvgPool2d
((
7
,
7
))
self
.
avgpool
=
nn
.
AdaptiveAvgPool2d
((
7
,
7
))
...
...
torchvision/models/video/resnet.py
View file @
d367a01a
...
@@ -20,7 +20,7 @@ class Conv3DSimple(nn.Conv3d):
...
@@ -20,7 +20,7 @@ class Conv3DSimple(nn.Conv3d):
self
,
in_planes
:
int
,
out_planes
:
int
,
midplanes
:
Optional
[
int
]
=
None
,
stride
:
int
=
1
,
padding
:
int
=
1
self
,
in_planes
:
int
,
out_planes
:
int
,
midplanes
:
Optional
[
int
]
=
None
,
stride
:
int
=
1
,
padding
:
int
=
1
)
->
None
:
)
->
None
:
super
(
Conv3DSimple
,
self
).
__init__
(
super
().
__init__
(
in_channels
=
in_planes
,
in_channels
=
in_planes
,
out_channels
=
out_planes
,
out_channels
=
out_planes
,
kernel_size
=
(
3
,
3
,
3
),
kernel_size
=
(
3
,
3
,
3
),
...
@@ -36,7 +36,7 @@ class Conv3DSimple(nn.Conv3d):
...
@@ -36,7 +36,7 @@ class Conv3DSimple(nn.Conv3d):
class
Conv2Plus1D
(
nn
.
Sequential
):
class
Conv2Plus1D
(
nn
.
Sequential
):
def
__init__
(
self
,
in_planes
:
int
,
out_planes
:
int
,
midplanes
:
int
,
stride
:
int
=
1
,
padding
:
int
=
1
)
->
None
:
def
__init__
(
self
,
in_planes
:
int
,
out_planes
:
int
,
midplanes
:
int
,
stride
:
int
=
1
,
padding
:
int
=
1
)
->
None
:
super
(
Conv2Plus1D
,
self
).
__init__
(
super
().
__init__
(
nn
.
Conv3d
(
nn
.
Conv3d
(
in_planes
,
in_planes
,
midplanes
,
midplanes
,
...
@@ -62,7 +62,7 @@ class Conv3DNoTemporal(nn.Conv3d):
...
@@ -62,7 +62,7 @@ class Conv3DNoTemporal(nn.Conv3d):
self
,
in_planes
:
int
,
out_planes
:
int
,
midplanes
:
Optional
[
int
]
=
None
,
stride
:
int
=
1
,
padding
:
int
=
1
self
,
in_planes
:
int
,
out_planes
:
int
,
midplanes
:
Optional
[
int
]
=
None
,
stride
:
int
=
1
,
padding
:
int
=
1
)
->
None
:
)
->
None
:
super
(
Conv3DNoTemporal
,
self
).
__init__
(
super
().
__init__
(
in_channels
=
in_planes
,
in_channels
=
in_planes
,
out_channels
=
out_planes
,
out_channels
=
out_planes
,
kernel_size
=
(
1
,
3
,
3
),
kernel_size
=
(
1
,
3
,
3
),
...
@@ -90,7 +90,7 @@ class BasicBlock(nn.Module):
...
@@ -90,7 +90,7 @@ class BasicBlock(nn.Module):
)
->
None
:
)
->
None
:
midplanes
=
(
inplanes
*
planes
*
3
*
3
*
3
)
//
(
inplanes
*
3
*
3
+
3
*
planes
)
midplanes
=
(
inplanes
*
planes
*
3
*
3
*
3
)
//
(
inplanes
*
3
*
3
+
3
*
planes
)
super
(
BasicBlock
,
self
).
__init__
()
super
().
__init__
()
self
.
conv1
=
nn
.
Sequential
(
self
.
conv1
=
nn
.
Sequential
(
conv_builder
(
inplanes
,
planes
,
midplanes
,
stride
),
nn
.
BatchNorm3d
(
planes
),
nn
.
ReLU
(
inplace
=
True
)
conv_builder
(
inplanes
,
planes
,
midplanes
,
stride
),
nn
.
BatchNorm3d
(
planes
),
nn
.
ReLU
(
inplace
=
True
)
)
)
...
@@ -125,7 +125,7 @@ class Bottleneck(nn.Module):
...
@@ -125,7 +125,7 @@ class Bottleneck(nn.Module):
downsample
:
Optional
[
nn
.
Module
]
=
None
,
downsample
:
Optional
[
nn
.
Module
]
=
None
,
)
->
None
:
)
->
None
:
super
(
Bottleneck
,
self
).
__init__
()
super
().
__init__
()
midplanes
=
(
inplanes
*
planes
*
3
*
3
*
3
)
//
(
inplanes
*
3
*
3
+
3
*
planes
)
midplanes
=
(
inplanes
*
planes
*
3
*
3
*
3
)
//
(
inplanes
*
3
*
3
+
3
*
planes
)
# 1x1x1
# 1x1x1
...
@@ -166,7 +166,7 @@ class BasicStem(nn.Sequential):
...
@@ -166,7 +166,7 @@ class BasicStem(nn.Sequential):
"""The default conv-batchnorm-relu stem"""
"""The default conv-batchnorm-relu stem"""
def
__init__
(
self
)
->
None
:
def
__init__
(
self
)
->
None
:
super
(
BasicStem
,
self
).
__init__
(
super
().
__init__
(
nn
.
Conv3d
(
3
,
64
,
kernel_size
=
(
3
,
7
,
7
),
stride
=
(
1
,
2
,
2
),
padding
=
(
1
,
3
,
3
),
bias
=
False
),
nn
.
Conv3d
(
3
,
64
,
kernel_size
=
(
3
,
7
,
7
),
stride
=
(
1
,
2
,
2
),
padding
=
(
1
,
3
,
3
),
bias
=
False
),
nn
.
BatchNorm3d
(
64
),
nn
.
BatchNorm3d
(
64
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
ReLU
(
inplace
=
True
),
...
@@ -177,7 +177,7 @@ class R2Plus1dStem(nn.Sequential):
...
@@ -177,7 +177,7 @@ class R2Plus1dStem(nn.Sequential):
"""R(2+1)D stem is different than the default one as it uses separated 3D convolution"""
"""R(2+1)D stem is different than the default one as it uses separated 3D convolution"""
def
__init__
(
self
)
->
None
:
def
__init__
(
self
)
->
None
:
super
(
R2Plus1dStem
,
self
).
__init__
(
super
().
__init__
(
nn
.
Conv3d
(
3
,
45
,
kernel_size
=
(
1
,
7
,
7
),
stride
=
(
1
,
2
,
2
),
padding
=
(
0
,
3
,
3
),
bias
=
False
),
nn
.
Conv3d
(
3
,
45
,
kernel_size
=
(
1
,
7
,
7
),
stride
=
(
1
,
2
,
2
),
padding
=
(
0
,
3
,
3
),
bias
=
False
),
nn
.
BatchNorm3d
(
45
),
nn
.
BatchNorm3d
(
45
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
ReLU
(
inplace
=
True
),
...
@@ -208,7 +208,7 @@ class VideoResNet(nn.Module):
...
@@ -208,7 +208,7 @@ class VideoResNet(nn.Module):
num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
"""
"""
super
(
VideoResNet
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
self
.
inplanes
=
64
self
.
inplanes
=
64
...
...
torchvision/ops/_register_onnx_ops.py
View file @
d367a01a
...
@@ -38,7 +38,7 @@ def _register_custom_op():
...
@@ -38,7 +38,7 @@ def _register_custom_op():
# ONNX doesn't support negative sampling_ratio
# ONNX doesn't support negative sampling_ratio
if
sampling_ratio
<
0
:
if
sampling_ratio
<
0
:
warnings
.
warn
(
warnings
.
warn
(
"ONNX doesn't support negative sampling ratio,
"
"
therefore is is set to 0 in order to be exported."
"ONNX doesn't support negative sampling ratio,
therefore is is set to 0 in order to be exported."
)
)
sampling_ratio
=
0
sampling_ratio
=
0
return
g
.
op
(
return
g
.
op
(
...
...
torchvision/ops/deform_conv.py
View file @
d367a01a
...
@@ -83,9 +83,7 @@ def deform_conv2d(
...
@@ -83,9 +83,7 @@ def deform_conv2d(
raise
RuntimeError
(
raise
RuntimeError
(
"the shape of the offset tensor at dimension 1 is not valid. It should "
"the shape of the offset tensor at dimension 1 is not valid. It should "
"be a multiple of 2 * weight.size[2] * weight.size[3].
\n
"
"be a multiple of 2 * weight.size[2] * weight.size[3].
\n
"
"Got offset.shape[1]={}, while 2 * weight.size[2] * weight.size[3]={}"
.
format
(
f
"Got offset.shape[1]=
{
offset
.
shape
[
1
]
}
, while 2 * weight.size[2] * weight.size[3]=
{
2
*
weights_h
*
weights_w
}
"
offset
.
shape
[
1
],
2
*
weights_h
*
weights_w
)
)
)
return
torch
.
ops
.
torchvision
.
deform_conv2d
(
return
torch
.
ops
.
torchvision
.
deform_conv2d
(
...
@@ -122,7 +120,7 @@ class DeformConv2d(nn.Module):
...
@@ -122,7 +120,7 @@ class DeformConv2d(nn.Module):
groups
:
int
=
1
,
groups
:
int
=
1
,
bias
:
bool
=
True
,
bias
:
bool
=
True
,
):
):
super
(
DeformConv2d
,
self
).
__init__
()
super
().
__init__
()
if
in_channels
%
groups
!=
0
:
if
in_channels
%
groups
!=
0
:
raise
ValueError
(
"in_channels must be divisible by groups"
)
raise
ValueError
(
"in_channels must be divisible by groups"
)
...
...
torchvision/ops/feature_pyramid_network.py
View file @
d367a01a
...
@@ -74,7 +74,7 @@ class FeaturePyramidNetwork(nn.Module):
...
@@ -74,7 +74,7 @@ class FeaturePyramidNetwork(nn.Module):
out_channels
:
int
,
out_channels
:
int
,
extra_blocks
:
Optional
[
ExtraFPNBlock
]
=
None
,
extra_blocks
:
Optional
[
ExtraFPNBlock
]
=
None
,
):
):
super
(
FeaturePyramidNetwork
,
self
).
__init__
()
super
().
__init__
()
self
.
inner_blocks
=
nn
.
ModuleList
()
self
.
inner_blocks
=
nn
.
ModuleList
()
self
.
layer_blocks
=
nn
.
ModuleList
()
self
.
layer_blocks
=
nn
.
ModuleList
()
for
in_channels
in
in_channels_list
:
for
in_channels
in
in_channels_list
:
...
@@ -180,7 +180,7 @@ class LastLevelP6P7(ExtraFPNBlock):
...
@@ -180,7 +180,7 @@ class LastLevelP6P7(ExtraFPNBlock):
"""
"""
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
):
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
):
super
(
LastLevelP6P7
,
self
).
__init__
()
super
().
__init__
()
self
.
p6
=
nn
.
Conv2d
(
in_channels
,
out_channels
,
3
,
2
,
1
)
self
.
p6
=
nn
.
Conv2d
(
in_channels
,
out_channels
,
3
,
2
,
1
)
self
.
p7
=
nn
.
Conv2d
(
out_channels
,
out_channels
,
3
,
2
,
1
)
self
.
p7
=
nn
.
Conv2d
(
out_channels
,
out_channels
,
3
,
2
,
1
)
for
module
in
[
self
.
p6
,
self
.
p7
]:
for
module
in
[
self
.
p6
,
self
.
p7
]:
...
...
torchvision/ops/misc.py
View file @
d367a01a
...
@@ -65,7 +65,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
...
@@ -65,7 +65,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
if
n
is
not
None
:
if
n
is
not
None
:
warnings
.
warn
(
"`n` argument is deprecated and has been renamed `num_features`"
,
DeprecationWarning
)
warnings
.
warn
(
"`n` argument is deprecated and has been renamed `num_features`"
,
DeprecationWarning
)
num_features
=
n
num_features
=
n
super
(
FrozenBatchNorm2d
,
self
).
__init__
()
super
().
__init__
()
self
.
eps
=
eps
self
.
eps
=
eps
self
.
register_buffer
(
"weight"
,
torch
.
ones
(
num_features
))
self
.
register_buffer
(
"weight"
,
torch
.
ones
(
num_features
))
self
.
register_buffer
(
"bias"
,
torch
.
zeros
(
num_features
))
self
.
register_buffer
(
"bias"
,
torch
.
zeros
(
num_features
))
...
@@ -86,7 +86,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
...
@@ -86,7 +86,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
if
num_batches_tracked_key
in
state_dict
:
if
num_batches_tracked_key
in
state_dict
:
del
state_dict
[
num_batches_tracked_key
]
del
state_dict
[
num_batches_tracked_key
]
super
(
FrozenBatchNorm2d
,
self
).
_load_from_state_dict
(
super
().
_load_from_state_dict
(
state_dict
,
prefix
,
local_metadata
,
strict
,
missing_keys
,
unexpected_keys
,
error_msgs
state_dict
,
prefix
,
local_metadata
,
strict
,
missing_keys
,
unexpected_keys
,
error_msgs
)
)
...
...
torchvision/ops/poolers.py
View file @
d367a01a
...
@@ -42,7 +42,7 @@ def initLevelMapper(
...
@@ -42,7 +42,7 @@ def initLevelMapper(
return
LevelMapper
(
k_min
,
k_max
,
canonical_scale
,
canonical_level
,
eps
)
return
LevelMapper
(
k_min
,
k_max
,
canonical_scale
,
canonical_level
,
eps
)
class
LevelMapper
(
object
)
:
class
LevelMapper
:
"""Determine which FPN level each RoI in a set of RoIs should map to based
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
on the heuristic in the FPN paper.
...
@@ -129,7 +129,7 @@ class MultiScaleRoIAlign(nn.Module):
...
@@ -129,7 +129,7 @@ class MultiScaleRoIAlign(nn.Module):
canonical_scale
:
int
=
224
,
canonical_scale
:
int
=
224
,
canonical_level
:
int
=
4
,
canonical_level
:
int
=
4
,
):
):
super
(
MultiScaleRoIAlign
,
self
).
__init__
()
super
().
__init__
()
if
isinstance
(
output_size
,
int
):
if
isinstance
(
output_size
,
int
):
output_size
=
(
output_size
,
output_size
)
output_size
=
(
output_size
,
output_size
)
self
.
featmap_names
=
featmap_names
self
.
featmap_names
=
featmap_names
...
...
Prev
1
2
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment