Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
bac678c8
Unverified
Commit
bac678c8
authored
Feb 07, 2023
by
Philip Meier
Committed by
GitHub
Feb 07, 2023
Browse files
remove functionality scheduled for 0.15 after deprecation (#7176)
parent
a05d8179
Changes
35
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
0 additions
and
321 deletions
+0
-321
test/test_functional_tensor.py
test/test_functional_tensor.py
+0
-61
test/test_prototype_transforms_consistency.py
test/test_prototype_transforms_consistency.py
+0
-6
test/test_transforms.py
test/test_transforms.py
+0
-22
torchvision/datasets/utils.py
torchvision/datasets/utils.py
+0
-13
torchvision/models/alexnet.py
torchvision/models/alexnet.py
+0
-11
torchvision/models/densenet.py
torchvision/models/densenet.py
+0
-13
torchvision/models/detection/faster_rcnn.py
torchvision/models/detection/faster_rcnn.py
+0
-13
torchvision/models/detection/fcos.py
torchvision/models/detection/fcos.py
+0
-11
torchvision/models/detection/keypoint_rcnn.py
torchvision/models/detection/keypoint_rcnn.py
+0
-13
torchvision/models/detection/mask_rcnn.py
torchvision/models/detection/mask_rcnn.py
+0
-11
torchvision/models/detection/retinanet.py
torchvision/models/detection/retinanet.py
+0
-11
torchvision/models/detection/ssd.py
torchvision/models/detection/ssd.py
+0
-22
torchvision/models/detection/ssdlite.py
torchvision/models/detection/ssdlite.py
+0
-11
torchvision/models/efficientnet.py
torchvision/models/efficientnet.py
+0
-30
torchvision/models/googlenet.py
torchvision/models/googlenet.py
+0
-12
torchvision/models/inception.py
torchvision/models/inception.py
+0
-12
torchvision/models/mobilenetv2.py
torchvision/models/mobilenetv2.py
+0
-11
torchvision/models/mobilenetv3.py
torchvision/models/mobilenetv3.py
+0
-12
torchvision/models/quantization/googlenet.py
torchvision/models/quantization/googlenet.py
+0
-13
torchvision/models/quantization/inception.py
torchvision/models/quantization/inception.py
+0
-13
No files found.
test/test_functional_tensor.py
View file @
bac678c8
...
...
@@ -2,7 +2,6 @@ import colorsys
import
itertools
import
math
import
os
import
re
from
functools
import
partial
from
typing
import
Sequence
...
...
@@ -144,20 +143,6 @@ class TestRotate:
center
=
(
20
,
22
)
_test_fn_on_batch
(
batch_tensors
,
F
.
rotate
,
angle
=
32
,
interpolation
=
NEAREST
,
expand
=
True
,
center
=
center
)
def
test_rotate_interpolation_type
(
self
):
tensor
,
_
=
_create_data
(
26
,
26
)
# assert changed type warning
with
pytest
.
warns
(
UserWarning
,
match
=
re
.
escape
(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1
=
F
.
rotate
(
tensor
,
45
,
interpolation
=
2
)
res2
=
F
.
rotate
(
tensor
,
45
,
interpolation
=
BILINEAR
)
assert_equal
(
res1
,
res2
)
class
TestAffine
:
...
...
@@ -364,22 +349,6 @@ class TestAffine:
_test_fn_on_batch
(
batch_tensors
,
F
.
affine
,
angle
=-
43
,
translate
=
[
-
3
,
4
],
scale
=
1.2
,
shear
=
[
4.0
,
5.0
])
@
pytest
.
mark
.
parametrize
(
"device"
,
cpu_and_gpu
())
def
test_warnings
(
self
,
device
):
tensor
,
pil_img
=
_create_data
(
26
,
26
,
device
=
device
)
# assert changed type warning
with
pytest
.
warns
(
UserWarning
,
match
=
re
.
escape
(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1
=
F
.
affine
(
tensor
,
45
,
translate
=
[
0
,
0
],
scale
=
1.0
,
shear
=
[
0.0
,
0.0
],
interpolation
=
2
)
res2
=
F
.
affine
(
tensor
,
45
,
translate
=
[
0
,
0
],
scale
=
1.0
,
shear
=
[
0.0
,
0.0
],
interpolation
=
BILINEAR
)
assert_equal
(
res1
,
res2
)
def
_get_data_dims_and_points_for_perspective
():
# Ideally we would parametrize independently over data dims and points, but
...
...
@@ -478,23 +447,6 @@ def test_perspective_batch(device, dims_and_points, dt):
)
def
test_perspective_interpolation_warning
():
# assert changed type warning
spoints
=
[[
0
,
0
],
[
33
,
0
],
[
33
,
25
],
[
0
,
25
]]
epoints
=
[[
3
,
2
],
[
32
,
3
],
[
30
,
24
],
[
2
,
25
]]
tensor
=
torch
.
randint
(
0
,
256
,
(
3
,
26
,
26
))
with
pytest
.
warns
(
UserWarning
,
match
=
re
.
escape
(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1
=
F
.
perspective
(
tensor
,
startpoints
=
spoints
,
endpoints
=
epoints
,
interpolation
=
2
)
res2
=
F
.
perspective
(
tensor
,
startpoints
=
spoints
,
endpoints
=
epoints
,
interpolation
=
BILINEAR
)
assert_equal
(
res1
,
res2
)
@
pytest
.
mark
.
parametrize
(
"device"
,
cpu_and_gpu
())
@
pytest
.
mark
.
parametrize
(
"dt"
,
[
None
,
torch
.
float32
,
torch
.
float64
,
torch
.
float16
])
@
pytest
.
mark
.
parametrize
(
...
...
@@ -568,19 +520,6 @@ def test_resize_asserts(device):
tensor
,
pil_img
=
_create_data
(
26
,
36
,
device
=
device
)
# assert changed type warning
with
pytest
.
warns
(
UserWarning
,
match
=
re
.
escape
(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1
=
F
.
resize
(
tensor
,
size
=
32
,
interpolation
=
2
)
res2
=
F
.
resize
(
tensor
,
size
=
32
,
interpolation
=
BILINEAR
)
assert_equal
(
res1
,
res2
)
for
img
in
(
tensor
,
pil_img
):
exp_msg
=
"max_size should only be passed if size specifies the length of the smaller edge"
with
pytest
.
raises
(
ValueError
,
match
=
exp_msg
):
...
...
test/test_prototype_transforms_consistency.py
View file @
bac678c8
...
...
@@ -87,12 +87,6 @@ CONSISTENCY_CONFIGS = [
ArgsKwargs
((
32
,
29
)),
ArgsKwargs
((
31
,
28
),
interpolation
=
prototype_transforms
.
InterpolationMode
.
NEAREST
),
ArgsKwargs
((
33
,
26
),
interpolation
=
prototype_transforms
.
InterpolationMode
.
BICUBIC
),
# FIXME: these are currently failing, since the new transform only supports the enum. The int input is
# already deprecated and scheduled to be removed in 0.15. Should we support ints on the prototype
# transform? I guess it depends if we roll out before 0.15 or not.
# ArgsKwargs((30, 27), interpolation=0),
# ArgsKwargs((35, 29), interpolation=2),
# ArgsKwargs((34, 25), interpolation=3),
NotScriptableArgsKwargs
(
31
,
max_size
=
32
),
ArgsKwargs
([
31
],
max_size
=
32
),
NotScriptableArgsKwargs
(
30
,
max_size
=
100
),
...
...
test/test_transforms.py
View file @
bac678c8
...
...
@@ -1872,17 +1872,6 @@ def test_random_rotation():
# Checking if RandomRotation can be printed as string
t
.
__repr__
()
# assert changed type warning
with
pytest
.
warns
(
UserWarning
,
match
=
re
.
escape
(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
t
=
transforms
.
RandomRotation
((
-
10
,
10
),
interpolation
=
2
)
assert
t
.
interpolation
==
transforms
.
InterpolationMode
.
BILINEAR
def
test_random_rotation_error
():
# assert fill being either a Sequence or a Number
...
...
@@ -2212,17 +2201,6 @@ def test_random_affine():
t
=
transforms
.
RandomAffine
(
10
,
interpolation
=
transforms
.
InterpolationMode
.
BILINEAR
)
assert
"bilinear"
in
t
.
__repr__
()
# assert changed type warning
with
pytest
.
warns
(
UserWarning
,
match
=
re
.
escape
(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
t
=
transforms
.
RandomAffine
(
10
,
interpolation
=
2
)
assert
t
.
interpolation
==
transforms
.
InterpolationMode
.
BILINEAR
def
test_elastic_transformation
():
with
pytest
.
raises
(
TypeError
,
match
=
r
"alpha should be float or a sequence of floats"
):
...
...
torchvision/datasets/utils.py
View file @
bac678c8
...
...
@@ -48,19 +48,6 @@ def _urlretrieve(url: str, filename: str, chunk_size: int = 1024 * 32) -> None:
_save_response_content
(
iter
(
lambda
:
response
.
read
(
chunk_size
),
b
""
),
filename
,
length
=
response
.
length
)
def
gen_bar_updater
()
->
Callable
[[
int
,
int
,
int
],
None
]:
warnings
.
warn
(
"The function `gen_bar_update` is deprecated since 0.13 and will be removed in 0.15."
)
pbar
=
tqdm
(
total
=
None
)
def
bar_update
(
count
,
block_size
,
total_size
):
if
pbar
.
total
is
None
and
total_size
:
pbar
.
total
=
total_size
progress_bytes
=
count
*
block_size
pbar
.
update
(
progress_bytes
-
pbar
.
n
)
return
bar_update
def
calculate_md5
(
fpath
:
str
,
chunk_size
:
int
=
1024
*
1024
)
->
str
:
# Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are
# not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without
...
...
torchvision/models/alexnet.py
View file @
bac678c8
...
...
@@ -117,14 +117,3 @@ def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True,
model
.
load_state_dict
(
weights
.
get_state_dict
(
progress
=
progress
))
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"alexnet"
:
AlexNet_Weights
.
IMAGENET1K_V1
.
url
,
}
)
torchvision/models/densenet.py
View file @
bac678c8
...
...
@@ -446,16 +446,3 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool
weights
=
DenseNet201_Weights
.
verify
(
weights
)
return
_densenet
(
32
,
(
6
,
12
,
48
,
32
),
64
,
weights
,
progress
,
**
kwargs
)
# The dictionary below is internal implementation detail and will be removed in v0.15
from
._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"densenet121"
:
DenseNet121_Weights
.
IMAGENET1K_V1
.
url
,
"densenet169"
:
DenseNet169_Weights
.
IMAGENET1K_V1
.
url
,
"densenet201"
:
DenseNet201_Weights
.
IMAGENET1K_V1
.
url
,
"densenet161"
:
DenseNet161_Weights
.
IMAGENET1K_V1
.
url
,
}
)
torchvision/models/detection/faster_rcnn.py
View file @
bac678c8
...
...
@@ -841,16 +841,3 @@ def fasterrcnn_mobilenet_v3_large_fpn(
trainable_backbone_layers
=
trainable_backbone_layers
,
**
kwargs
,
)
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"fasterrcnn_resnet50_fpn_coco"
:
FasterRCNN_ResNet50_FPN_Weights
.
COCO_V1
.
url
,
"fasterrcnn_mobilenet_v3_large_320_fpn_coco"
:
FasterRCNN_MobileNet_V3_Large_320_FPN_Weights
.
COCO_V1
.
url
,
"fasterrcnn_mobilenet_v3_large_fpn_coco"
:
FasterRCNN_MobileNet_V3_Large_FPN_Weights
.
COCO_V1
.
url
,
}
)
torchvision/models/detection/fcos.py
View file @
bac678c8
...
...
@@ -769,14 +769,3 @@ def fcos_resnet50_fpn(
model
.
load_state_dict
(
weights
.
get_state_dict
(
progress
=
progress
))
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"fcos_resnet50_fpn_coco"
:
FCOS_ResNet50_FPN_Weights
.
COCO_V1
.
url
,
}
)
torchvision/models/detection/keypoint_rcnn.py
View file @
bac678c8
...
...
@@ -470,16 +470,3 @@ def keypointrcnn_resnet50_fpn(
overwrite_eps
(
model
,
0.0
)
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
# legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606
"keypointrcnn_resnet50_fpn_coco_legacy"
:
KeypointRCNN_ResNet50_FPN_Weights
.
COCO_LEGACY
.
url
,
"keypointrcnn_resnet50_fpn_coco"
:
KeypointRCNN_ResNet50_FPN_Weights
.
COCO_V1
.
url
,
}
)
torchvision/models/detection/mask_rcnn.py
View file @
bac678c8
...
...
@@ -585,14 +585,3 @@ def maskrcnn_resnet50_fpn_v2(
model
.
load_state_dict
(
weights
.
get_state_dict
(
progress
=
progress
))
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"maskrcnn_resnet50_fpn_coco"
:
MaskRCNN_ResNet50_FPN_Weights
.
COCO_V1
.
url
,
}
)
torchvision/models/detection/retinanet.py
View file @
bac678c8
...
...
@@ -897,14 +897,3 @@ def retinanet_resnet50_fpn_v2(
model
.
load_state_dict
(
weights
.
get_state_dict
(
progress
=
progress
))
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"retinanet_resnet50_fpn_coco"
:
RetinaNet_ResNet50_FPN_Weights
.
COCO_V1
.
url
,
}
)
torchvision/models/detection/ssd.py
View file @
bac678c8
...
...
@@ -680,25 +680,3 @@ def ssd300_vgg16(
model
.
load_state_dict
(
weights
.
get_state_dict
(
progress
=
progress
))
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"ssd300_vgg16_coco"
:
SSD300_VGG16_Weights
.
COCO_V1
.
url
,
}
)
backbone_urls
=
_ModelURLs
(
{
# We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses
# the same input standardization method as the paper.
# Ref: https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth
# Only the `features` weights have proper values, those on the `classifier` module are filled with nans.
"vgg16_features"
:
VGG16_Weights
.
IMAGENET1K_FEATURES
.
url
,
}
)
torchvision/models/detection/ssdlite.py
View file @
bac678c8
...
...
@@ -329,14 +329,3 @@ def ssdlite320_mobilenet_v3_large(
model
.
load_state_dict
(
weights
.
get_state_dict
(
progress
=
progress
))
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"ssdlite320_mobilenet_v3_large_coco"
:
SSDLite320_MobileNet_V3_Large_Weights
.
COCO_V1
.
url
,
}
)
torchvision/models/efficientnet.py
View file @
bac678c8
import
copy
import
math
import
warnings
from
dataclasses
import
dataclass
from
functools
import
partial
from
typing
import
Any
,
Callable
,
Dict
,
List
,
Optional
,
Sequence
,
Tuple
,
Union
...
...
@@ -239,7 +238,6 @@ class EfficientNet(nn.Module):
num_classes
:
int
=
1000
,
norm_layer
:
Optional
[
Callable
[...,
nn
.
Module
]]
=
None
,
last_channel
:
Optional
[
int
]
=
None
,
**
kwargs
:
Any
,
)
->
None
:
"""
EfficientNet V1 and V2 main class
...
...
@@ -263,16 +261,6 @@ class EfficientNet(nn.Module):
):
raise
TypeError
(
"The inverted_residual_setting should be List[MBConvConfig]"
)
if
"block"
in
kwargs
:
warnings
.
warn
(
"The parameter 'block' is deprecated since 0.13 and will be removed 0.15. "
"Please pass this information on 'MBConvConfig.block' instead."
)
if
kwargs
[
"block"
]
is
not
None
:
for
s
in
inverted_residual_setting
:
if
isinstance
(
s
,
MBConvConfig
):
s
.
block
=
kwargs
[
"block"
]
if
norm_layer
is
None
:
norm_layer
=
nn
.
BatchNorm2d
...
...
@@ -1141,21 +1129,3 @@ def efficientnet_v2_l(
norm_layer
=
partial
(
nn
.
BatchNorm2d
,
eps
=
1e-03
),
**
kwargs
,
)
# The dictionary below is internal implementation detail and will be removed in v0.15
from
._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"efficientnet_b0"
:
EfficientNet_B0_Weights
.
IMAGENET1K_V1
.
url
,
"efficientnet_b1"
:
EfficientNet_B1_Weights
.
IMAGENET1K_V1
.
url
,
"efficientnet_b2"
:
EfficientNet_B2_Weights
.
IMAGENET1K_V1
.
url
,
"efficientnet_b3"
:
EfficientNet_B3_Weights
.
IMAGENET1K_V1
.
url
,
"efficientnet_b4"
:
EfficientNet_B4_Weights
.
IMAGENET1K_V1
.
url
,
"efficientnet_b5"
:
EfficientNet_B5_Weights
.
IMAGENET1K_V1
.
url
,
"efficientnet_b6"
:
EfficientNet_B6_Weights
.
IMAGENET1K_V1
.
url
,
"efficientnet_b7"
:
EfficientNet_B7_Weights
.
IMAGENET1K_V1
.
url
,
}
)
torchvision/models/googlenet.py
View file @
bac678c8
...
...
@@ -343,15 +343,3 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T
)
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
# GoogLeNet ported from TensorFlow
"googlenet"
:
GoogLeNet_Weights
.
IMAGENET1K_V1
.
url
,
}
)
torchvision/models/inception.py
View file @
bac678c8
...
...
@@ -476,15 +476,3 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo
model
.
AuxLogits
=
None
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
# Inception v3 ported from TensorFlow
"inception_v3_google"
:
Inception_V3_Weights
.
IMAGENET1K_V1
.
url
,
}
)
torchvision/models/mobilenetv2.py
View file @
bac678c8
...
...
@@ -258,14 +258,3 @@ def mobilenet_v2(
model
.
load_state_dict
(
weights
.
get_state_dict
(
progress
=
progress
))
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"mobilenet_v2"
:
MobileNet_V2_Weights
.
IMAGENET1K_V1
.
url
,
}
)
torchvision/models/mobilenetv3.py
View file @
bac678c8
...
...
@@ -421,15 +421,3 @@ def mobilenet_v3_small(
inverted_residual_setting
,
last_channel
=
_mobilenet_v3_conf
(
"mobilenet_v3_small"
,
**
kwargs
)
return
_mobilenet_v3
(
inverted_residual_setting
,
last_channel
,
weights
,
progress
,
**
kwargs
)
# The dictionary below is internal implementation detail and will be removed in v0.15
from
._utils
import
_ModelURLs
model_urls
=
_ModelURLs
(
{
"mobilenet_v3_large"
:
MobileNet_V3_Large_Weights
.
IMAGENET1K_V1
.
url
,
"mobilenet_v3_small"
:
MobileNet_V3_Small_Weights
.
IMAGENET1K_V1
.
url
,
}
)
torchvision/models/quantization/googlenet.py
View file @
bac678c8
...
...
@@ -208,16 +208,3 @@ def googlenet(
)
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
from
..googlenet
import
model_urls
# noqa: F401
quant_model_urls
=
_ModelURLs
(
{
# fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch
"googlenet_fbgemm"
:
GoogLeNet_QuantizedWeights
.
IMAGENET1K_FBGEMM_V1
.
url
,
}
)
torchvision/models/quantization/inception.py
View file @
bac678c8
...
...
@@ -271,16 +271,3 @@ def inception_v3(
model
.
AuxLogits
=
None
return
model
# The dictionary below is internal implementation detail and will be removed in v0.15
from
.._utils
import
_ModelURLs
from
..inception
import
model_urls
# noqa: F401
quant_model_urls
=
_ModelURLs
(
{
# fp32 weights ported from TensorFlow, quantized in PyTorch
"inception_v3_google_fbgemm"
:
Inception_V3_QuantizedWeights
.
IMAGENET1K_FBGEMM_V1
.
url
,
}
)
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment