Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
d367a01a
Unverified
Commit
d367a01a
authored
Oct 28, 2021
by
Jirka Borovec
Committed by
GitHub
Oct 28, 2021
Browse files
Use f-strings almost everywhere, and other cleanups by applying pyupgrade (#4585)
Co-authored-by:
Nicolas Hug
<
nicolashug@fb.com
>
parent
50dfe207
Changes
136
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
77 additions
and
87 deletions
+77
-87
torchvision/models/detection/_utils.py
torchvision/models/detection/_utils.py
+5
-5
torchvision/models/detection/anchor_utils.py
torchvision/models/detection/anchor_utils.py
+3
-2
torchvision/models/detection/backbone_utils.py
torchvision/models/detection/backbone_utils.py
+2
-2
torchvision/models/detection/faster_rcnn.py
torchvision/models/detection/faster_rcnn.py
+5
-5
torchvision/models/detection/generalized_rcnn.py
torchvision/models/detection/generalized_rcnn.py
+4
-6
torchvision/models/detection/image_list.py
torchvision/models/detection/image_list.py
+1
-1
torchvision/models/detection/keypoint_rcnn.py
torchvision/models/detection/keypoint_rcnn.py
+3
-3
torchvision/models/detection/mask_rcnn.py
torchvision/models/detection/mask_rcnn.py
+5
-5
torchvision/models/detection/retinanet.py
torchvision/models/detection/retinanet.py
+3
-5
torchvision/models/detection/roi_heads.py
torchvision/models/detection/roi_heads.py
+1
-1
torchvision/models/detection/rpn.py
torchvision/models/detection/rpn.py
+2
-2
torchvision/models/detection/ssd.py
torchvision/models/detection/ssd.py
+4
-6
torchvision/models/detection/ssdlite.py
torchvision/models/detection/ssdlite.py
+1
-1
torchvision/models/detection/transform.py
torchvision/models/detection/transform.py
+4
-8
torchvision/models/efficientnet.py
torchvision/models/efficientnet.py
+2
-2
torchvision/models/feature_extraction.py
torchvision/models/feature_extraction.py
+12
-12
torchvision/models/googlenet.py
torchvision/models/googlenet.py
+5
-5
torchvision/models/inception.py
torchvision/models/inception.py
+8
-8
torchvision/models/mnasnet.py
torchvision/models/mnasnet.py
+4
-4
torchvision/models/mobilenetv2.py
torchvision/models/mobilenetv2.py
+3
-4
No files found.
torchvision/models/detection/_utils.py
View file @
d367a01a
...
@@ -7,7 +7,7 @@ from torch import Tensor, nn
...
@@ -7,7 +7,7 @@ from torch import Tensor, nn
from
torchvision.ops.misc
import
FrozenBatchNorm2d
from
torchvision.ops.misc
import
FrozenBatchNorm2d
class
BalancedPositiveNegativeSampler
(
object
)
:
class
BalancedPositiveNegativeSampler
:
"""
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
"""
...
@@ -118,7 +118,7 @@ def encode_boxes(reference_boxes: Tensor, proposals: Tensor, weights: Tensor) ->
...
@@ -118,7 +118,7 @@ def encode_boxes(reference_boxes: Tensor, proposals: Tensor, weights: Tensor) ->
return
targets
return
targets
class
BoxCoder
(
object
)
:
class
BoxCoder
:
"""
"""
This class encodes and decodes a set of bounding boxes into
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
the representation used for training the regressors.
...
@@ -217,7 +217,7 @@ class BoxCoder(object):
...
@@ -217,7 +217,7 @@ class BoxCoder(object):
return
pred_boxes
return
pred_boxes
class
Matcher
(
object
)
:
class
Matcher
:
"""
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
element. Each predicted element will have exactly zero or one matches; each
...
@@ -275,9 +275,9 @@ class Matcher(object):
...
@@ -275,9 +275,9 @@ class Matcher(object):
if
match_quality_matrix
.
numel
()
==
0
:
if
match_quality_matrix
.
numel
()
==
0
:
# empty targets or proposals not supported during training
# empty targets or proposals not supported during training
if
match_quality_matrix
.
shape
[
0
]
==
0
:
if
match_quality_matrix
.
shape
[
0
]
==
0
:
raise
ValueError
(
"No ground-truth boxes available for one of the images
"
"
during training"
)
raise
ValueError
(
"No ground-truth boxes available for one of the images during training"
)
else
:
else
:
raise
ValueError
(
"No proposal boxes available for one of the images
"
"
during training"
)
raise
ValueError
(
"No proposal boxes available for one of the images during training"
)
# match_quality_matrix is M (gt) x N (predicted)
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
# Max over gt elements (dim 0) to find best gt candidate for each prediction
...
...
torchvision/models/detection/anchor_utils.py
View file @
d367a01a
...
@@ -37,7 +37,7 @@ class AnchorGenerator(nn.Module):
...
@@ -37,7 +37,7 @@ class AnchorGenerator(nn.Module):
sizes
=
((
128
,
256
,
512
),),
sizes
=
((
128
,
256
,
512
),),
aspect_ratios
=
((
0.5
,
1.0
,
2.0
),),
aspect_ratios
=
((
0.5
,
1.0
,
2.0
),),
):
):
super
(
AnchorGenerator
,
self
).
__init__
()
super
().
__init__
()
if
not
isinstance
(
sizes
[
0
],
(
list
,
tuple
)):
if
not
isinstance
(
sizes
[
0
],
(
list
,
tuple
)):
# TODO change this
# TODO change this
...
@@ -216,7 +216,8 @@ class DefaultBoxGenerator(nn.Module):
...
@@ -216,7 +216,8 @@ class DefaultBoxGenerator(nn.Module):
for
k
,
f_k
in
enumerate
(
grid_sizes
):
for
k
,
f_k
in
enumerate
(
grid_sizes
):
# Now add the default boxes for each width-height pair
# Now add the default boxes for each width-height pair
if
self
.
steps
is
not
None
:
if
self
.
steps
is
not
None
:
x_f_k
,
y_f_k
=
[
img_shape
/
self
.
steps
[
k
]
for
img_shape
in
image_size
]
x_f_k
=
image_size
[
0
]
/
self
.
steps
[
k
]
y_f_k
=
image_size
[
1
]
/
self
.
steps
[
k
]
else
:
else
:
y_f_k
,
x_f_k
=
f_k
y_f_k
,
x_f_k
=
f_k
...
...
torchvision/models/detection/backbone_utils.py
View file @
d367a01a
...
@@ -37,7 +37,7 @@ class BackboneWithFPN(nn.Module):
...
@@ -37,7 +37,7 @@ class BackboneWithFPN(nn.Module):
out_channels
:
int
,
out_channels
:
int
,
extra_blocks
:
Optional
[
ExtraFPNBlock
]
=
None
,
extra_blocks
:
Optional
[
ExtraFPNBlock
]
=
None
,
)
->
None
:
)
->
None
:
super
(
BackboneWithFPN
,
self
).
__init__
()
super
().
__init__
()
if
extra_blocks
is
None
:
if
extra_blocks
is
None
:
extra_blocks
=
LastLevelMaxPool
()
extra_blocks
=
LastLevelMaxPool
()
...
@@ -145,7 +145,7 @@ def _validate_trainable_layers(
...
@@ -145,7 +145,7 @@ def _validate_trainable_layers(
warnings
.
warn
(
warnings
.
warn
(
"Changing trainable_backbone_layers has not effect if "
"Changing trainable_backbone_layers has not effect if "
"neither pretrained nor pretrained_backbone have been set to True, "
"neither pretrained nor pretrained_backbone have been set to True, "
"falling back to trainable_backbone_layers={} so that all layers are trainable"
.
format
(
max_value
)
f
"falling back to trainable_backbone_layers=
{
max_value
}
so that all layers are trainable"
)
)
trainable_backbone_layers
=
max_value
trainable_backbone_layers
=
max_value
...
...
torchvision/models/detection/faster_rcnn.py
View file @
d367a01a
...
@@ -195,7 +195,7 @@ class FasterRCNN(GeneralizedRCNN):
...
@@ -195,7 +195,7 @@ class FasterRCNN(GeneralizedRCNN):
raise
ValueError
(
"num_classes should be None when box_predictor is specified"
)
raise
ValueError
(
"num_classes should be None when box_predictor is specified"
)
else
:
else
:
if
box_predictor
is
None
:
if
box_predictor
is
None
:
raise
ValueError
(
"num_classes should not be None when box_predictor
"
"
is not specified"
)
raise
ValueError
(
"num_classes should not be None when box_predictor is not specified"
)
out_channels
=
backbone
.
out_channels
out_channels
=
backbone
.
out_channels
...
@@ -255,7 +255,7 @@ class FasterRCNN(GeneralizedRCNN):
...
@@ -255,7 +255,7 @@ class FasterRCNN(GeneralizedRCNN):
image_std
=
[
0.229
,
0.224
,
0.225
]
image_std
=
[
0.229
,
0.224
,
0.225
]
transform
=
GeneralizedRCNNTransform
(
min_size
,
max_size
,
image_mean
,
image_std
)
transform
=
GeneralizedRCNNTransform
(
min_size
,
max_size
,
image_mean
,
image_std
)
super
(
FasterRCNN
,
self
).
__init__
(
backbone
,
rpn
,
roi_heads
,
transform
)
super
().
__init__
(
backbone
,
rpn
,
roi_heads
,
transform
)
class
TwoMLPHead
(
nn
.
Module
):
class
TwoMLPHead
(
nn
.
Module
):
...
@@ -268,7 +268,7 @@ class TwoMLPHead(nn.Module):
...
@@ -268,7 +268,7 @@ class TwoMLPHead(nn.Module):
"""
"""
def
__init__
(
self
,
in_channels
,
representation_size
):
def
__init__
(
self
,
in_channels
,
representation_size
):
super
(
TwoMLPHead
,
self
).
__init__
()
super
().
__init__
()
self
.
fc6
=
nn
.
Linear
(
in_channels
,
representation_size
)
self
.
fc6
=
nn
.
Linear
(
in_channels
,
representation_size
)
self
.
fc7
=
nn
.
Linear
(
representation_size
,
representation_size
)
self
.
fc7
=
nn
.
Linear
(
representation_size
,
representation_size
)
...
@@ -293,7 +293,7 @@ class FastRCNNPredictor(nn.Module):
...
@@ -293,7 +293,7 @@ class FastRCNNPredictor(nn.Module):
"""
"""
def
__init__
(
self
,
in_channels
,
num_classes
):
def
__init__
(
self
,
in_channels
,
num_classes
):
super
(
FastRCNNPredictor
,
self
).
__init__
()
super
().
__init__
()
self
.
cls_score
=
nn
.
Linear
(
in_channels
,
num_classes
)
self
.
cls_score
=
nn
.
Linear
(
in_channels
,
num_classes
)
self
.
bbox_pred
=
nn
.
Linear
(
in_channels
,
num_classes
*
4
)
self
.
bbox_pred
=
nn
.
Linear
(
in_channels
,
num_classes
*
4
)
...
@@ -436,7 +436,7 @@ def _fasterrcnn_mobilenet_v3_large_fpn(
...
@@ -436,7 +436,7 @@ def _fasterrcnn_mobilenet_v3_large_fpn(
)
)
if
pretrained
:
if
pretrained
:
if
model_urls
.
get
(
weights_name
,
None
)
is
None
:
if
model_urls
.
get
(
weights_name
,
None
)
is
None
:
raise
ValueError
(
"No checkpoint is available for model {
}"
.
format
(
weights_name
)
)
raise
ValueError
(
f
"No checkpoint is available for model
{
weights_name
}
"
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
weights_name
],
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
weights_name
],
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
return
model
return
model
...
...
torchvision/models/detection/generalized_rcnn.py
View file @
d367a01a
...
@@ -26,7 +26,7 @@ class GeneralizedRCNN(nn.Module):
...
@@ -26,7 +26,7 @@ class GeneralizedRCNN(nn.Module):
"""
"""
def
__init__
(
self
,
backbone
,
rpn
,
roi_heads
,
transform
):
def
__init__
(
self
,
backbone
,
rpn
,
roi_heads
,
transform
):
super
(
GeneralizedRCNN
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
self
.
transform
=
transform
self
.
transform
=
transform
self
.
backbone
=
backbone
self
.
backbone
=
backbone
...
@@ -65,11 +65,9 @@ class GeneralizedRCNN(nn.Module):
...
@@ -65,11 +65,9 @@ class GeneralizedRCNN(nn.Module):
boxes
=
target
[
"boxes"
]
boxes
=
target
[
"boxes"
]
if
isinstance
(
boxes
,
torch
.
Tensor
):
if
isinstance
(
boxes
,
torch
.
Tensor
):
if
len
(
boxes
.
shape
)
!=
2
or
boxes
.
shape
[
-
1
]
!=
4
:
if
len
(
boxes
.
shape
)
!=
2
or
boxes
.
shape
[
-
1
]
!=
4
:
raise
ValueError
(
raise
ValueError
(
f
"Expected target boxes to be a tensor of shape [N, 4], got
{
boxes
.
shape
}
."
)
"Expected target boxes to be a tensor"
"of shape [N, 4], got {:}."
.
format
(
boxes
.
shape
)
)
else
:
else
:
raise
ValueError
(
"Expected target boxes to be of type
"
"
Tensor, got {
:}."
.
format
(
type
(
boxes
)
)
)
raise
ValueError
(
f
"Expected target boxes to be of type Tensor, got
{
type
(
boxes
)
}
."
)
original_image_sizes
:
List
[
Tuple
[
int
,
int
]]
=
[]
original_image_sizes
:
List
[
Tuple
[
int
,
int
]]
=
[]
for
img
in
images
:
for
img
in
images
:
...
@@ -91,7 +89,7 @@ class GeneralizedRCNN(nn.Module):
...
@@ -91,7 +89,7 @@ class GeneralizedRCNN(nn.Module):
degen_bb
:
List
[
float
]
=
boxes
[
bb_idx
].
tolist
()
degen_bb
:
List
[
float
]
=
boxes
[
bb_idx
].
tolist
()
raise
ValueError
(
raise
ValueError
(
"All bounding boxes should have positive height and width."
"All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {
}."
.
format
(
degen_bb
,
target_idx
)
f
" Found invalid box
{
degen_bb
}
for target at index
{
target_idx
}
."
)
)
features
=
self
.
backbone
(
images
.
tensors
)
features
=
self
.
backbone
(
images
.
tensors
)
...
...
torchvision/models/detection/image_list.py
View file @
d367a01a
...
@@ -4,7 +4,7 @@ import torch
...
@@ -4,7 +4,7 @@ import torch
from
torch
import
Tensor
from
torch
import
Tensor
class
ImageList
(
object
)
:
class
ImageList
:
"""
"""
Structure that holds a list of images (of possibly
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
varying sizes) as a single tensor.
...
...
torchvision/models/detection/keypoint_rcnn.py
View file @
d367a01a
...
@@ -212,7 +212,7 @@ class KeypointRCNN(FasterRCNN):
...
@@ -212,7 +212,7 @@ class KeypointRCNN(FasterRCNN):
keypoint_dim_reduced
=
512
# == keypoint_layers[-1]
keypoint_dim_reduced
=
512
# == keypoint_layers[-1]
keypoint_predictor
=
KeypointRCNNPredictor
(
keypoint_dim_reduced
,
num_keypoints
)
keypoint_predictor
=
KeypointRCNNPredictor
(
keypoint_dim_reduced
,
num_keypoints
)
super
(
KeypointRCNN
,
self
).
__init__
(
super
().
__init__
(
backbone
,
backbone
,
num_classes
,
num_classes
,
# transform parameters
# transform parameters
...
@@ -260,7 +260,7 @@ class KeypointRCNNHeads(nn.Sequential):
...
@@ -260,7 +260,7 @@ class KeypointRCNNHeads(nn.Sequential):
d
.
append
(
nn
.
Conv2d
(
next_feature
,
out_channels
,
3
,
stride
=
1
,
padding
=
1
))
d
.
append
(
nn
.
Conv2d
(
next_feature
,
out_channels
,
3
,
stride
=
1
,
padding
=
1
))
d
.
append
(
nn
.
ReLU
(
inplace
=
True
))
d
.
append
(
nn
.
ReLU
(
inplace
=
True
))
next_feature
=
out_channels
next_feature
=
out_channels
super
(
KeypointRCNNHeads
,
self
).
__init__
(
*
d
)
super
().
__init__
(
*
d
)
for
m
in
self
.
children
():
for
m
in
self
.
children
():
if
isinstance
(
m
,
nn
.
Conv2d
):
if
isinstance
(
m
,
nn
.
Conv2d
):
nn
.
init
.
kaiming_normal_
(
m
.
weight
,
mode
=
"fan_out"
,
nonlinearity
=
"relu"
)
nn
.
init
.
kaiming_normal_
(
m
.
weight
,
mode
=
"fan_out"
,
nonlinearity
=
"relu"
)
...
@@ -269,7 +269,7 @@ class KeypointRCNNHeads(nn.Sequential):
...
@@ -269,7 +269,7 @@ class KeypointRCNNHeads(nn.Sequential):
class
KeypointRCNNPredictor
(
nn
.
Module
):
class
KeypointRCNNPredictor
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
,
num_keypoints
):
def
__init__
(
self
,
in_channels
,
num_keypoints
):
super
(
KeypointRCNNPredictor
,
self
).
__init__
()
super
().
__init__
()
input_features
=
in_channels
input_features
=
in_channels
deconv_kernel
=
4
deconv_kernel
=
4
self
.
kps_score_lowres
=
nn
.
ConvTranspose2d
(
self
.
kps_score_lowres
=
nn
.
ConvTranspose2d
(
...
...
torchvision/models/detection/mask_rcnn.py
View file @
d367a01a
...
@@ -212,7 +212,7 @@ class MaskRCNN(FasterRCNN):
...
@@ -212,7 +212,7 @@ class MaskRCNN(FasterRCNN):
mask_dim_reduced
=
256
mask_dim_reduced
=
256
mask_predictor
=
MaskRCNNPredictor
(
mask_predictor_in_channels
,
mask_dim_reduced
,
num_classes
)
mask_predictor
=
MaskRCNNPredictor
(
mask_predictor_in_channels
,
mask_dim_reduced
,
num_classes
)
super
(
MaskRCNN
,
self
).
__init__
(
super
().
__init__
(
backbone
,
backbone
,
num_classes
,
num_classes
,
# transform parameters
# transform parameters
...
@@ -263,13 +263,13 @@ class MaskRCNNHeads(nn.Sequential):
...
@@ -263,13 +263,13 @@ class MaskRCNNHeads(nn.Sequential):
d
=
OrderedDict
()
d
=
OrderedDict
()
next_feature
=
in_channels
next_feature
=
in_channels
for
layer_idx
,
layer_features
in
enumerate
(
layers
,
1
):
for
layer_idx
,
layer_features
in
enumerate
(
layers
,
1
):
d
[
"mask_fcn{
}"
.
format
(
layer_idx
)
]
=
nn
.
Conv2d
(
d
[
f
"mask_fcn
{
layer_idx
}
"
]
=
nn
.
Conv2d
(
next_feature
,
layer_features
,
kernel_size
=
3
,
stride
=
1
,
padding
=
dilation
,
dilation
=
dilation
next_feature
,
layer_features
,
kernel_size
=
3
,
stride
=
1
,
padding
=
dilation
,
dilation
=
dilation
)
)
d
[
"relu{
}"
.
format
(
layer_idx
)
]
=
nn
.
ReLU
(
inplace
=
True
)
d
[
f
"relu
{
layer_idx
}
"
]
=
nn
.
ReLU
(
inplace
=
True
)
next_feature
=
layer_features
next_feature
=
layer_features
super
(
MaskRCNNHeads
,
self
).
__init__
(
d
)
super
().
__init__
(
d
)
for
name
,
param
in
self
.
named_parameters
():
for
name
,
param
in
self
.
named_parameters
():
if
"weight"
in
name
:
if
"weight"
in
name
:
nn
.
init
.
kaiming_normal_
(
param
,
mode
=
"fan_out"
,
nonlinearity
=
"relu"
)
nn
.
init
.
kaiming_normal_
(
param
,
mode
=
"fan_out"
,
nonlinearity
=
"relu"
)
...
@@ -279,7 +279,7 @@ class MaskRCNNHeads(nn.Sequential):
...
@@ -279,7 +279,7 @@ class MaskRCNNHeads(nn.Sequential):
class
MaskRCNNPredictor
(
nn
.
Sequential
):
class
MaskRCNNPredictor
(
nn
.
Sequential
):
def
__init__
(
self
,
in_channels
,
dim_reduced
,
num_classes
):
def
__init__
(
self
,
in_channels
,
dim_reduced
,
num_classes
):
super
(
MaskRCNNPredictor
,
self
).
__init__
(
super
().
__init__
(
OrderedDict
(
OrderedDict
(
[
[
(
"conv5_mask"
,
nn
.
ConvTranspose2d
(
in_channels
,
dim_reduced
,
2
,
2
,
0
)),
(
"conv5_mask"
,
nn
.
ConvTranspose2d
(
in_channels
,
dim_reduced
,
2
,
2
,
0
)),
...
...
torchvision/models/detection/retinanet.py
View file @
d367a01a
...
@@ -493,11 +493,9 @@ class RetinaNet(nn.Module):
...
@@ -493,11 +493,9 @@ class RetinaNet(nn.Module):
boxes
=
target
[
"boxes"
]
boxes
=
target
[
"boxes"
]
if
isinstance
(
boxes
,
torch
.
Tensor
):
if
isinstance
(
boxes
,
torch
.
Tensor
):
if
len
(
boxes
.
shape
)
!=
2
or
boxes
.
shape
[
-
1
]
!=
4
:
if
len
(
boxes
.
shape
)
!=
2
or
boxes
.
shape
[
-
1
]
!=
4
:
raise
ValueError
(
raise
ValueError
(
f
"Expected target boxes to be a tensor of shape [N, 4], got
{
boxes
.
shape
}
."
)
"Expected target boxes to be a tensor"
"of shape [N, 4], got {:}."
.
format
(
boxes
.
shape
)
)
else
:
else
:
raise
ValueError
(
"Expected target boxes to be of type
"
"
Tensor, got {
:}."
.
format
(
type
(
boxes
)
)
)
raise
ValueError
(
f
"Expected target boxes to be of type Tensor, got
{
type
(
boxes
)
}
."
)
# get the original image sizes
# get the original image sizes
original_image_sizes
:
List
[
Tuple
[
int
,
int
]]
=
[]
original_image_sizes
:
List
[
Tuple
[
int
,
int
]]
=
[]
...
@@ -521,7 +519,7 @@ class RetinaNet(nn.Module):
...
@@ -521,7 +519,7 @@ class RetinaNet(nn.Module):
degen_bb
:
List
[
float
]
=
boxes
[
bb_idx
].
tolist
()
degen_bb
:
List
[
float
]
=
boxes
[
bb_idx
].
tolist
()
raise
ValueError
(
raise
ValueError
(
"All bounding boxes should have positive height and width."
"All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {
}."
.
format
(
degen_bb
,
target_idx
)
f
" Found invalid box
{
degen_bb
}
for target at index
{
target_idx
}
."
)
)
# get the features from the backbone
# get the features from the backbone
...
...
torchvision/models/detection/roi_heads.py
View file @
d367a01a
...
@@ -517,7 +517,7 @@ class RoIHeads(nn.Module):
...
@@ -517,7 +517,7 @@ class RoIHeads(nn.Module):
keypoint_head
=
None
,
keypoint_head
=
None
,
keypoint_predictor
=
None
,
keypoint_predictor
=
None
,
):
):
super
(
RoIHeads
,
self
).
__init__
()
super
().
__init__
()
self
.
box_similarity
=
box_ops
.
box_iou
self
.
box_similarity
=
box_ops
.
box_iou
# assign ground-truth boxes for each proposal
# assign ground-truth boxes for each proposal
...
...
torchvision/models/detection/rpn.py
View file @
d367a01a
...
@@ -34,7 +34,7 @@ class RPNHead(nn.Module):
...
@@ -34,7 +34,7 @@ class RPNHead(nn.Module):
"""
"""
def
__init__
(
self
,
in_channels
:
int
,
num_anchors
:
int
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
num_anchors
:
int
)
->
None
:
super
(
RPNHead
,
self
).
__init__
()
super
().
__init__
()
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
in_channels
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
in_channels
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
cls_logits
=
nn
.
Conv2d
(
in_channels
,
num_anchors
,
kernel_size
=
1
,
stride
=
1
)
self
.
cls_logits
=
nn
.
Conv2d
(
in_channels
,
num_anchors
,
kernel_size
=
1
,
stride
=
1
)
self
.
bbox_pred
=
nn
.
Conv2d
(
in_channels
,
num_anchors
*
4
,
kernel_size
=
1
,
stride
=
1
)
self
.
bbox_pred
=
nn
.
Conv2d
(
in_channels
,
num_anchors
*
4
,
kernel_size
=
1
,
stride
=
1
)
...
@@ -132,7 +132,7 @@ class RegionProposalNetwork(torch.nn.Module):
...
@@ -132,7 +132,7 @@ class RegionProposalNetwork(torch.nn.Module):
nms_thresh
:
float
,
nms_thresh
:
float
,
score_thresh
:
float
=
0.0
,
score_thresh
:
float
=
0.0
,
)
->
None
:
)
->
None
:
super
(
RegionProposalNetwork
,
self
).
__init__
()
super
().
__init__
()
self
.
anchor_generator
=
anchor_generator
self
.
anchor_generator
=
anchor_generator
self
.
head
=
head
self
.
head
=
head
self
.
box_coder
=
det_utils
.
BoxCoder
(
weights
=
(
1.0
,
1.0
,
1.0
,
1.0
))
self
.
box_coder
=
det_utils
.
BoxCoder
(
weights
=
(
1.0
,
1.0
,
1.0
,
1.0
))
...
...
torchvision/models/detection/ssd.py
View file @
d367a01a
...
@@ -313,11 +313,9 @@ class SSD(nn.Module):
...
@@ -313,11 +313,9 @@ class SSD(nn.Module):
boxes
=
target
[
"boxes"
]
boxes
=
target
[
"boxes"
]
if
isinstance
(
boxes
,
torch
.
Tensor
):
if
isinstance
(
boxes
,
torch
.
Tensor
):
if
len
(
boxes
.
shape
)
!=
2
or
boxes
.
shape
[
-
1
]
!=
4
:
if
len
(
boxes
.
shape
)
!=
2
or
boxes
.
shape
[
-
1
]
!=
4
:
raise
ValueError
(
raise
ValueError
(
f
"Expected target boxes to be a tensor of shape [N, 4], got
{
boxes
.
shape
}
."
)
"Expected target boxes to be a tensor"
"of shape [N, 4], got {:}."
.
format
(
boxes
.
shape
)
)
else
:
else
:
raise
ValueError
(
"Expected target boxes to be of type
"
"
Tensor, got {
:}."
.
format
(
type
(
boxes
)
)
)
raise
ValueError
(
f
"Expected target boxes to be of type Tensor, got
{
type
(
boxes
)
}
."
)
# get the original image sizes
# get the original image sizes
original_image_sizes
:
List
[
Tuple
[
int
,
int
]]
=
[]
original_image_sizes
:
List
[
Tuple
[
int
,
int
]]
=
[]
...
@@ -339,7 +337,7 @@ class SSD(nn.Module):
...
@@ -339,7 +337,7 @@ class SSD(nn.Module):
degen_bb
:
List
[
float
]
=
boxes
[
bb_idx
].
tolist
()
degen_bb
:
List
[
float
]
=
boxes
[
bb_idx
].
tolist
()
raise
ValueError
(
raise
ValueError
(
"All bounding boxes should have positive height and width."
"All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {
}."
.
format
(
degen_bb
,
target_idx
)
f
" Found invalid box
{
degen_bb
}
for target at index
{
target_idx
}
."
)
)
# get the features from the backbone
# get the features from the backbone
...
@@ -625,7 +623,7 @@ def ssd300_vgg16(
...
@@ -625,7 +623,7 @@ def ssd300_vgg16(
if
pretrained
:
if
pretrained
:
weights_name
=
"ssd300_vgg16_coco"
weights_name
=
"ssd300_vgg16_coco"
if
model_urls
.
get
(
weights_name
,
None
)
is
None
:
if
model_urls
.
get
(
weights_name
,
None
)
is
None
:
raise
ValueError
(
"No checkpoint is available for model {
}"
.
format
(
weights_name
)
)
raise
ValueError
(
f
"No checkpoint is available for model
{
weights_name
}
"
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
weights_name
],
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
weights_name
],
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
return
model
return
model
torchvision/models/detection/ssdlite.py
View file @
d367a01a
...
@@ -268,7 +268,7 @@ def ssdlite320_mobilenet_v3_large(
...
@@ -268,7 +268,7 @@ def ssdlite320_mobilenet_v3_large(
if
pretrained
:
if
pretrained
:
weights_name
=
"ssdlite320_mobilenet_v3_large_coco"
weights_name
=
"ssdlite320_mobilenet_v3_large_coco"
if
model_urls
.
get
(
weights_name
,
None
)
is
None
:
if
model_urls
.
get
(
weights_name
,
None
)
is
None
:
raise
ValueError
(
"No checkpoint is available for model {
}"
.
format
(
weights_name
)
)
raise
ValueError
(
f
"No checkpoint is available for model
{
weights_name
}
"
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
weights_name
],
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
weights_name
],
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
return
model
return
model
torchvision/models/detection/transform.py
View file @
d367a01a
...
@@ -92,7 +92,7 @@ class GeneralizedRCNNTransform(nn.Module):
...
@@ -92,7 +92,7 @@ class GeneralizedRCNNTransform(nn.Module):
size_divisible
:
int
=
32
,
size_divisible
:
int
=
32
,
fixed_size
:
Optional
[
Tuple
[
int
,
int
]]
=
None
,
fixed_size
:
Optional
[
Tuple
[
int
,
int
]]
=
None
,
):
):
super
(
GeneralizedRCNNTransform
,
self
).
__init__
()
super
().
__init__
()
if
not
isinstance
(
min_size
,
(
list
,
tuple
)):
if
not
isinstance
(
min_size
,
(
list
,
tuple
)):
min_size
=
(
min_size
,)
min_size
=
(
min_size
,)
self
.
min_size
=
min_size
self
.
min_size
=
min_size
...
@@ -123,9 +123,7 @@ class GeneralizedRCNNTransform(nn.Module):
...
@@ -123,9 +123,7 @@ class GeneralizedRCNNTransform(nn.Module):
target_index
=
targets
[
i
]
if
targets
is
not
None
else
None
target_index
=
targets
[
i
]
if
targets
is
not
None
else
None
if
image
.
dim
()
!=
3
:
if
image
.
dim
()
!=
3
:
raise
ValueError
(
raise
ValueError
(
f
"images is expected to be a list of 3d tensors of shape [C, H, W], got
{
image
.
shape
}
"
)
"images is expected to be a list of 3d tensors "
"of shape [C, H, W], got {}"
.
format
(
image
.
shape
)
)
image
=
self
.
normalize
(
image
)
image
=
self
.
normalize
(
image
)
image
,
target_index
=
self
.
resize
(
image
,
target_index
)
image
,
target_index
=
self
.
resize
(
image
,
target_index
)
images
[
i
]
=
image
images
[
i
]
=
image
...
@@ -264,10 +262,8 @@ class GeneralizedRCNNTransform(nn.Module):
...
@@ -264,10 +262,8 @@ class GeneralizedRCNNTransform(nn.Module):
def
__repr__
(
self
)
->
str
:
def
__repr__
(
self
)
->
str
:
format_string
=
self
.
__class__
.
__name__
+
"("
format_string
=
self
.
__class__
.
__name__
+
"("
_indent
=
"
\n
"
_indent
=
"
\n
"
format_string
+=
"{0}Normalize(mean={1}, std={2})"
.
format
(
_indent
,
self
.
image_mean
,
self
.
image_std
)
format_string
+=
f
"
{
_indent
}
Normalize(mean=
{
self
.
image_mean
}
, std=
{
self
.
image_std
}
)"
format_string
+=
"{0}Resize(min_size={1}, max_size={2}, mode='bilinear')"
.
format
(
format_string
+=
f
"
{
_indent
}
Resize(min_size=
{
self
.
min_size
}
, max_size=
{
self
.
max_size
}
, mode='bilinear')"
_indent
,
self
.
min_size
,
self
.
max_size
)
format_string
+=
"
\n
)"
format_string
+=
"
\n
)"
return
format_string
return
format_string
...
...
torchvision/models/efficientnet.py
View file @
d367a01a
...
@@ -197,7 +197,7 @@ class EfficientNet(nn.Module):
...
@@ -197,7 +197,7 @@ class EfficientNet(nn.Module):
)
)
# building inverted residual blocks
# building inverted residual blocks
total_stage_blocks
=
sum
(
[
cnf
.
num_layers
for
cnf
in
inverted_residual_setting
]
)
total_stage_blocks
=
sum
(
cnf
.
num_layers
for
cnf
in
inverted_residual_setting
)
stage_block_id
=
0
stage_block_id
=
0
for
cnf
in
inverted_residual_setting
:
for
cnf
in
inverted_residual_setting
:
stage
:
List
[
nn
.
Module
]
=
[]
stage
:
List
[
nn
.
Module
]
=
[]
...
@@ -287,7 +287,7 @@ def _efficientnet(
...
@@ -287,7 +287,7 @@ def _efficientnet(
model
=
EfficientNet
(
inverted_residual_setting
,
dropout
,
**
kwargs
)
model
=
EfficientNet
(
inverted_residual_setting
,
dropout
,
**
kwargs
)
if
pretrained
:
if
pretrained
:
if
model_urls
.
get
(
arch
,
None
)
is
None
:
if
model_urls
.
get
(
arch
,
None
)
is
None
:
raise
ValueError
(
"No checkpoint is available for model type {
}"
.
format
(
arch
)
)
raise
ValueError
(
f
"No checkpoint is available for model type
{
arch
}
"
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
arch
],
progress
=
progress
)
state_dict
=
load_state_dict_from_url
(
model_urls
[
arch
],
progress
=
progress
)
model
.
load_state_dict
(
state_dict
)
model
.
load_state_dict
(
state_dict
)
return
model
return
model
...
...
torchvision/models/feature_extraction.py
View file @
d367a01a
...
@@ -26,7 +26,7 @@ class LeafModuleAwareTracer(fx.Tracer):
...
@@ -26,7 +26,7 @@ class LeafModuleAwareTracer(fx.Tracer):
if
"leaf_modules"
in
kwargs
:
if
"leaf_modules"
in
kwargs
:
leaf_modules
=
kwargs
.
pop
(
"leaf_modules"
)
leaf_modules
=
kwargs
.
pop
(
"leaf_modules"
)
self
.
leaf_modules
=
leaf_modules
self
.
leaf_modules
=
leaf_modules
super
(
LeafModuleAwareTracer
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
def
is_leaf_module
(
self
,
m
:
nn
.
Module
,
module_qualname
:
str
)
->
bool
:
def
is_leaf_module
(
self
,
m
:
nn
.
Module
,
module_qualname
:
str
)
->
bool
:
if
isinstance
(
m
,
tuple
(
self
.
leaf_modules
)):
if
isinstance
(
m
,
tuple
(
self
.
leaf_modules
)):
...
@@ -54,7 +54,7 @@ class NodePathTracer(LeafModuleAwareTracer):
...
@@ -54,7 +54,7 @@ class NodePathTracer(LeafModuleAwareTracer):
"""
"""
def
__init__
(
self
,
*
args
,
**
kwargs
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
NodePathTracer
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
# Track the qualified name of the Node being traced
# Track the qualified name of the Node being traced
self
.
current_module_qualname
=
""
self
.
current_module_qualname
=
""
# A map from FX Node to the qualified name\#
# A map from FX Node to the qualified name\#
...
@@ -168,7 +168,7 @@ def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathT
...
@@ -168,7 +168,7 @@ def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathT
"are a subsequence of those obtained in eval mode. "
"are a subsequence of those obtained in eval mode. "
)
)
else
:
else
:
msg
=
"The nodes obtained by tracing the model in train mode
"
"
are different to those obtained in eval mode. "
msg
=
"The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. "
warnings
.
warn
(
msg
+
suggestion_msg
)
warnings
.
warn
(
msg
+
suggestion_msg
)
...
@@ -399,17 +399,17 @@ def create_feature_extractor(
...
@@ -399,17 +399,17 @@ def create_feature_extractor(
"""
"""
is_training
=
model
.
training
is_training
=
model
.
training
assert
any
(
arg
is
not
None
for
arg
in
[
return_nodes
,
train_return_nodes
,
eval_return_nodes
]),
(
assert
any
(
"Either `
return_nodes
` or `
train_return_nodes
` and "
"`
eval_return_nodes
` together, should be specified"
arg
is
not
None
for
arg
in
[
return_nodes
,
train_return_nodes
,
eval_return_nodes
]
)
)
,
"Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified"
assert
not
(
(
train_return_nodes
is
None
)
^
(
eval_return_nodes
is
None
)),
(
assert
not
(
"If any of `
train_return_nodes
` and `
eval_return_nodes
` are "
"specified, then both should be specified"
(
train_return_nodes
is
None
)
^
(
eval_return_nodes
is
None
)
)
)
,
"If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
assert
(
return_nodes
is
None
)
^
(
train_return_nodes
is
None
),
(
assert
(
return_nodes
is
None
)
^
(
"If `
train_return_nodes
` and `eval_return_nodes` are specified, "
"then both should be specified"
train_return_nodes
is
None
)
)
,
"If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
# Put *_return_nodes into Dict[str, str] format
# Put *_return_nodes into Dict[str, str] format
def
to_strdict
(
n
)
->
Dict
[
str
,
str
]:
def
to_strdict
(
n
)
->
Dict
[
str
,
str
]:
...
...
torchvision/models/googlenet.py
View file @
d367a01a
...
@@ -45,7 +45,7 @@ def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
...
@@ -45,7 +45,7 @@ def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
kwargs
[
"aux_logits"
]
=
False
kwargs
[
"aux_logits"
]
=
False
if
kwargs
[
"aux_logits"
]:
if
kwargs
[
"aux_logits"
]:
warnings
.
warn
(
warnings
.
warn
(
"auxiliary heads in the pretrained googlenet model are NOT pretrained,
"
"
so make sure to train them"
"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
)
)
original_aux_logits
=
kwargs
[
"aux_logits"
]
original_aux_logits
=
kwargs
[
"aux_logits"
]
kwargs
[
"aux_logits"
]
=
True
kwargs
[
"aux_logits"
]
=
True
...
@@ -75,7 +75,7 @@ class GoogLeNet(nn.Module):
...
@@ -75,7 +75,7 @@ class GoogLeNet(nn.Module):
dropout
:
float
=
0.2
,
dropout
:
float
=
0.2
,
dropout_aux
:
float
=
0.7
,
dropout_aux
:
float
=
0.7
,
)
->
None
:
)
->
None
:
super
(
GoogLeNet
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
if
blocks
is
None
:
if
blocks
is
None
:
blocks
=
[
BasicConv2d
,
Inception
,
InceptionAux
]
blocks
=
[
BasicConv2d
,
Inception
,
InceptionAux
]
...
@@ -231,7 +231,7 @@ class Inception(nn.Module):
...
@@ -231,7 +231,7 @@ class Inception(nn.Module):
pool_proj
:
int
,
pool_proj
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
)
->
None
:
)
->
None
:
super
(
Inception
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
branch1
=
conv_block
(
in_channels
,
ch1x1
,
kernel_size
=
1
)
self
.
branch1
=
conv_block
(
in_channels
,
ch1x1
,
kernel_size
=
1
)
...
@@ -274,7 +274,7 @@ class InceptionAux(nn.Module):
...
@@ -274,7 +274,7 @@ class InceptionAux(nn.Module):
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
dropout
:
float
=
0.7
,
dropout
:
float
=
0.7
,
)
->
None
:
)
->
None
:
super
(
InceptionAux
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
conv
=
conv_block
(
in_channels
,
128
,
kernel_size
=
1
)
self
.
conv
=
conv_block
(
in_channels
,
128
,
kernel_size
=
1
)
...
@@ -303,7 +303,7 @@ class InceptionAux(nn.Module):
...
@@ -303,7 +303,7 @@ class InceptionAux(nn.Module):
class
BasicConv2d
(
nn
.
Module
):
class
BasicConv2d
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
,
**
kwargs
:
Any
)
->
None
:
super
(
BasicConv2d
,
self
).
__init__
()
super
().
__init__
()
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
out_channels
,
bias
=
False
,
**
kwargs
)
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
out_channels
,
bias
=
False
,
**
kwargs
)
self
.
bn
=
nn
.
BatchNorm2d
(
out_channels
,
eps
=
0.001
)
self
.
bn
=
nn
.
BatchNorm2d
(
out_channels
,
eps
=
0.001
)
...
...
torchvision/models/inception.py
View file @
d367a01a
...
@@ -73,7 +73,7 @@ class Inception3(nn.Module):
...
@@ -73,7 +73,7 @@ class Inception3(nn.Module):
init_weights
:
Optional
[
bool
]
=
None
,
init_weights
:
Optional
[
bool
]
=
None
,
dropout
:
float
=
0.5
,
dropout
:
float
=
0.5
,
)
->
None
:
)
->
None
:
super
(
Inception3
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
if
inception_blocks
is
None
:
if
inception_blocks
is
None
:
inception_blocks
=
[
BasicConv2d
,
InceptionA
,
InceptionB
,
InceptionC
,
InceptionD
,
InceptionE
,
InceptionAux
]
inception_blocks
=
[
BasicConv2d
,
InceptionA
,
InceptionB
,
InceptionC
,
InceptionD
,
InceptionE
,
InceptionAux
]
...
@@ -214,7 +214,7 @@ class InceptionA(nn.Module):
...
@@ -214,7 +214,7 @@ class InceptionA(nn.Module):
def
__init__
(
def
__init__
(
self
,
in_channels
:
int
,
pool_features
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
self
,
in_channels
:
int
,
pool_features
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
)
->
None
:
super
(
InceptionA
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
branch1x1
=
conv_block
(
in_channels
,
64
,
kernel_size
=
1
)
self
.
branch1x1
=
conv_block
(
in_channels
,
64
,
kernel_size
=
1
)
...
@@ -251,7 +251,7 @@ class InceptionA(nn.Module):
...
@@ -251,7 +251,7 @@ class InceptionA(nn.Module):
class
InceptionB
(
nn
.
Module
):
class
InceptionB
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
super
(
InceptionB
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
branch3x3
=
conv_block
(
in_channels
,
384
,
kernel_size
=
3
,
stride
=
2
)
self
.
branch3x3
=
conv_block
(
in_channels
,
384
,
kernel_size
=
3
,
stride
=
2
)
...
@@ -281,7 +281,7 @@ class InceptionC(nn.Module):
...
@@ -281,7 +281,7 @@ class InceptionC(nn.Module):
def
__init__
(
def
__init__
(
self
,
in_channels
:
int
,
channels_7x7
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
self
,
in_channels
:
int
,
channels_7x7
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
)
->
None
:
super
(
InceptionC
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
branch1x1
=
conv_block
(
in_channels
,
192
,
kernel_size
=
1
)
self
.
branch1x1
=
conv_block
(
in_channels
,
192
,
kernel_size
=
1
)
...
@@ -325,7 +325,7 @@ class InceptionC(nn.Module):
...
@@ -325,7 +325,7 @@ class InceptionC(nn.Module):
class
InceptionD
(
nn
.
Module
):
class
InceptionD
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
super
(
InceptionD
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
branch3x3_1
=
conv_block
(
in_channels
,
192
,
kernel_size
=
1
)
self
.
branch3x3_1
=
conv_block
(
in_channels
,
192
,
kernel_size
=
1
)
...
@@ -356,7 +356,7 @@ class InceptionD(nn.Module):
...
@@ -356,7 +356,7 @@ class InceptionD(nn.Module):
class
InceptionE
(
nn
.
Module
):
class
InceptionE
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
super
(
InceptionE
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
branch1x1
=
conv_block
(
in_channels
,
320
,
kernel_size
=
1
)
self
.
branch1x1
=
conv_block
(
in_channels
,
320
,
kernel_size
=
1
)
...
@@ -405,7 +405,7 @@ class InceptionAux(nn.Module):
...
@@ -405,7 +405,7 @@ class InceptionAux(nn.Module):
def
__init__
(
def
__init__
(
self
,
in_channels
:
int
,
num_classes
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
self
,
in_channels
:
int
,
num_classes
:
int
,
conv_block
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
)
->
None
:
super
(
InceptionAux
,
self
).
__init__
()
super
().
__init__
()
if
conv_block
is
None
:
if
conv_block
is
None
:
conv_block
=
BasicConv2d
conv_block
=
BasicConv2d
self
.
conv0
=
conv_block
(
in_channels
,
128
,
kernel_size
=
1
)
self
.
conv0
=
conv_block
(
in_channels
,
128
,
kernel_size
=
1
)
...
@@ -434,7 +434,7 @@ class InceptionAux(nn.Module):
...
@@ -434,7 +434,7 @@ class InceptionAux(nn.Module):
class
BasicConv2d
(
nn
.
Module
):
class
BasicConv2d
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
,
**
kwargs
:
Any
)
->
None
:
def
__init__
(
self
,
in_channels
:
int
,
out_channels
:
int
,
**
kwargs
:
Any
)
->
None
:
super
(
BasicConv2d
,
self
).
__init__
()
super
().
__init__
()
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
out_channels
,
bias
=
False
,
**
kwargs
)
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
out_channels
,
bias
=
False
,
**
kwargs
)
self
.
bn
=
nn
.
BatchNorm2d
(
out_channels
,
eps
=
0.001
)
self
.
bn
=
nn
.
BatchNorm2d
(
out_channels
,
eps
=
0.001
)
...
...
torchvision/models/mnasnet.py
View file @
d367a01a
...
@@ -26,7 +26,7 @@ class _InvertedResidual(nn.Module):
...
@@ -26,7 +26,7 @@ class _InvertedResidual(nn.Module):
def
__init__
(
def
__init__
(
self
,
in_ch
:
int
,
out_ch
:
int
,
kernel_size
:
int
,
stride
:
int
,
expansion_factor
:
int
,
bn_momentum
:
float
=
0.1
self
,
in_ch
:
int
,
out_ch
:
int
,
kernel_size
:
int
,
stride
:
int
,
expansion_factor
:
int
,
bn_momentum
:
float
=
0.1
)
->
None
:
)
->
None
:
super
(
_InvertedResidual
,
self
).
__init__
()
super
().
__init__
()
assert
stride
in
[
1
,
2
]
assert
stride
in
[
1
,
2
]
assert
kernel_size
in
[
3
,
5
]
assert
kernel_size
in
[
3
,
5
]
mid_ch
=
in_ch
*
expansion_factor
mid_ch
=
in_ch
*
expansion_factor
...
@@ -97,7 +97,7 @@ class MNASNet(torch.nn.Module):
...
@@ -97,7 +97,7 @@ class MNASNet(torch.nn.Module):
_version
=
2
_version
=
2
def
__init__
(
self
,
alpha
:
float
,
num_classes
:
int
=
1000
,
dropout
:
float
=
0.2
)
->
None
:
def
__init__
(
self
,
alpha
:
float
,
num_classes
:
int
=
1000
,
dropout
:
float
=
0.2
)
->
None
:
super
(
MNASNet
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
assert
alpha
>
0.0
assert
alpha
>
0.0
self
.
alpha
=
alpha
self
.
alpha
=
alpha
...
@@ -193,14 +193,14 @@ class MNASNet(torch.nn.Module):
...
@@ -193,14 +193,14 @@ class MNASNet(torch.nn.Module):
UserWarning
,
UserWarning
,
)
)
super
(
MNASNet
,
self
).
_load_from_state_dict
(
super
().
_load_from_state_dict
(
state_dict
,
prefix
,
local_metadata
,
strict
,
missing_keys
,
unexpected_keys
,
error_msgs
state_dict
,
prefix
,
local_metadata
,
strict
,
missing_keys
,
unexpected_keys
,
error_msgs
)
)
def
_load_pretrained
(
model_name
:
str
,
model
:
nn
.
Module
,
progress
:
bool
)
->
None
:
def
_load_pretrained
(
model_name
:
str
,
model
:
nn
.
Module
,
progress
:
bool
)
->
None
:
if
model_name
not
in
_MODEL_URLS
or
_MODEL_URLS
[
model_name
]
is
None
:
if
model_name
not
in
_MODEL_URLS
or
_MODEL_URLS
[
model_name
]
is
None
:
raise
ValueError
(
"No checkpoint is available for model type {
}"
.
format
(
model_name
)
)
raise
ValueError
(
f
"No checkpoint is available for model type
{
model_name
}
"
)
checkpoint_url
=
_MODEL_URLS
[
model_name
]
checkpoint_url
=
_MODEL_URLS
[
model_name
]
model
.
load_state_dict
(
load_state_dict_from_url
(
checkpoint_url
,
progress
=
progress
))
model
.
load_state_dict
(
load_state_dict_from_url
(
checkpoint_url
,
progress
=
progress
))
...
...
torchvision/models/mobilenetv2.py
View file @
d367a01a
...
@@ -42,7 +42,7 @@ class InvertedResidual(nn.Module):
...
@@ -42,7 +42,7 @@ class InvertedResidual(nn.Module):
def
__init__
(
def
__init__
(
self
,
inp
:
int
,
oup
:
int
,
stride
:
int
,
expand_ratio
:
int
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
self
,
inp
:
int
,
oup
:
int
,
stride
:
int
,
expand_ratio
:
int
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
)
->
None
:
)
->
None
:
super
(
InvertedResidual
,
self
).
__init__
()
super
().
__init__
()
self
.
stride
=
stride
self
.
stride
=
stride
assert
stride
in
[
1
,
2
]
assert
stride
in
[
1
,
2
]
...
@@ -110,7 +110,7 @@ class MobileNetV2(nn.Module):
...
@@ -110,7 +110,7 @@ class MobileNetV2(nn.Module):
dropout (float): The droupout probability
dropout (float): The droupout probability
"""
"""
super
(
MobileNetV2
,
self
).
__init__
()
super
().
__init__
()
_log_api_usage_once
(
self
)
_log_api_usage_once
(
self
)
if
block
is
None
:
if
block
is
None
:
...
@@ -137,8 +137,7 @@ class MobileNetV2(nn.Module):
...
@@ -137,8 +137,7 @@ class MobileNetV2(nn.Module):
# only check the first element, assuming user knows t,c,n,s are required
# only check the first element, assuming user knows t,c,n,s are required
if
len
(
inverted_residual_setting
)
==
0
or
len
(
inverted_residual_setting
[
0
])
!=
4
:
if
len
(
inverted_residual_setting
)
==
0
or
len
(
inverted_residual_setting
[
0
])
!=
4
:
raise
ValueError
(
raise
ValueError
(
"inverted_residual_setting should be non-empty "
f
"inverted_residual_setting should be non-empty or a 4-element list, got
{
inverted_residual_setting
}
"
"or a 4-element list, got {}"
.
format
(
inverted_residual_setting
)
)
)
# building first layer
# building first layer
...
...
Prev
1
2
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment