Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
b45969a7
Unverified
Commit
b45969a7
authored
Oct 25, 2022
by
Vasilis Vryniotis
Committed by
GitHub
Oct 25, 2022
Browse files
[prototype] Clean up `features` area (#6834)
* Clean ups on `features` area * remove unncessary imports
parent
7de68b0d
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
11 additions
and
46 deletions
+11
-46
torchvision/prototype/features/__init__.py
torchvision/prototype/features/__init__.py
+2
-19
torchvision/prototype/features/_bounding_box.py
torchvision/prototype/features/_bounding_box.py
+0
-12
torchvision/prototype/features/_image.py
torchvision/prototype/features/_image.py
+2
-4
torchvision/prototype/features/_mask.py
torchvision/prototype/features/_mask.py
+2
-2
torchvision/prototype/features/_video.py
torchvision/prototype/features/_video.py
+2
-4
torchvision/prototype/transforms/functional/_deprecated.py
torchvision/prototype/transforms/functional/_deprecated.py
+2
-2
torchvision/prototype/transforms/functional/_misc.py
torchvision/prototype/transforms/functional/_misc.py
+1
-3
No files found.
torchvision/prototype/features/__init__.py
View file @
b45969a7
from
._bounding_box
import
BoundingBox
,
BoundingBoxFormat
from
._bounding_box
import
BoundingBox
,
BoundingBoxFormat
from
._encoded
import
EncodedData
,
EncodedImage
from
._encoded
import
EncodedData
,
EncodedImage
from
._feature
import
_Feature
,
FillType
,
FillTypeJIT
,
InputType
,
InputTypeJIT
,
is_simple_tensor
from
._feature
import
_Feature
,
FillType
,
FillTypeJIT
,
InputType
,
InputTypeJIT
,
is_simple_tensor
from
._image
import
(
from
._image
import
ColorSpace
,
Image
,
ImageType
,
ImageTypeJIT
,
TensorImageType
,
TensorImageTypeJIT
ColorSpace
,
Image
,
ImageType
,
ImageTypeJIT
,
LegacyImageType
,
LegacyImageTypeJIT
,
TensorImageType
,
TensorImageTypeJIT
,
)
from
._label
import
Label
,
OneHotLabel
from
._label
import
Label
,
OneHotLabel
from
._mask
import
Mask
from
._mask
import
Mask
from
._video
import
(
from
._video
import
TensorVideoType
,
TensorVideoTypeJIT
,
Video
,
VideoType
,
VideoTypeJIT
LegacyVideoType
,
LegacyVideoTypeJIT
,
TensorVideoType
,
TensorVideoTypeJIT
,
Video
,
VideoType
,
VideoTypeJIT
,
)
torchvision/prototype/features/_bounding_box.py
View file @
b45969a7
...
@@ -61,18 +61,6 @@ class BoundingBox(_Feature):
...
@@ -61,18 +61,6 @@ class BoundingBox(_Feature):
def
__repr__
(
self
,
*
,
tensor_contents
:
Any
=
None
)
->
str
:
# type: ignore[override]
def
__repr__
(
self
,
*
,
tensor_contents
:
Any
=
None
)
->
str
:
# type: ignore[override]
return
self
.
_make_repr
(
format
=
self
.
format
,
spatial_size
=
self
.
spatial_size
)
return
self
.
_make_repr
(
format
=
self
.
format
,
spatial_size
=
self
.
spatial_size
)
def
to_format
(
self
,
format
:
Union
[
str
,
BoundingBoxFormat
])
->
BoundingBox
:
if
isinstance
(
format
,
str
):
format
=
BoundingBoxFormat
.
from_str
(
format
.
upper
())
return
BoundingBox
.
wrap_like
(
self
,
self
.
_F
.
convert_format_bounding_box
(
self
.
as_subclass
(
torch
.
Tensor
),
old_format
=
self
.
format
,
new_format
=
format
),
format
=
format
,
)
def
horizontal_flip
(
self
)
->
BoundingBox
:
def
horizontal_flip
(
self
)
->
BoundingBox
:
output
=
self
.
_F
.
horizontal_flip_bounding_box
(
output
=
self
.
_F
.
horizontal_flip_bounding_box
(
self
.
as_subclass
(
torch
.
Tensor
),
format
=
self
.
format
,
spatial_size
=
self
.
spatial_size
self
.
as_subclass
(
torch
.
Tensor
),
format
=
self
.
format
,
spatial_size
=
self
.
spatial_size
...
...
torchvision/prototype/features/_image.py
View file @
b45969a7
from
__future__
import
annotations
from
__future__
import
annotations
import
warnings
import
warnings
from
typing
import
Any
,
cast
,
List
,
Optional
,
Tuple
,
Union
from
typing
import
Any
,
List
,
Optional
,
Tuple
,
Union
import
PIL.Image
import
PIL.Image
import
torch
import
torch
...
@@ -104,7 +104,7 @@ class Image(_Feature):
...
@@ -104,7 +104,7 @@ class Image(_Feature):
@
property
@
property
def
spatial_size
(
self
)
->
Tuple
[
int
,
int
]:
def
spatial_size
(
self
)
->
Tuple
[
int
,
int
]:
return
cast
(
Tuple
[
int
,
int
],
tuple
(
self
.
shape
[
-
2
:]))
return
tuple
(
self
.
shape
[
-
2
:])
# type: ignore[return-value]
@
property
@
property
def
num_channels
(
self
)
->
int
:
def
num_channels
(
self
)
->
int
:
...
@@ -285,7 +285,5 @@ class Image(_Feature):
...
@@ -285,7 +285,5 @@ class Image(_Feature):
ImageType
=
Union
[
torch
.
Tensor
,
PIL
.
Image
.
Image
,
Image
]
ImageType
=
Union
[
torch
.
Tensor
,
PIL
.
Image
.
Image
,
Image
]
ImageTypeJIT
=
torch
.
Tensor
ImageTypeJIT
=
torch
.
Tensor
LegacyImageType
=
Union
[
torch
.
Tensor
,
PIL
.
Image
.
Image
]
LegacyImageTypeJIT
=
torch
.
Tensor
TensorImageType
=
Union
[
torch
.
Tensor
,
Image
]
TensorImageType
=
Union
[
torch
.
Tensor
,
Image
]
TensorImageTypeJIT
=
torch
.
Tensor
TensorImageTypeJIT
=
torch
.
Tensor
torchvision/prototype/features/_mask.py
View file @
b45969a7
from
__future__
import
annotations
from
__future__
import
annotations
from
typing
import
Any
,
cast
,
List
,
Optional
,
Tuple
,
Union
from
typing
import
Any
,
List
,
Optional
,
Tuple
,
Union
import
torch
import
torch
from
torchvision.transforms
import
InterpolationMode
from
torchvision.transforms
import
InterpolationMode
...
@@ -34,7 +34,7 @@ class Mask(_Feature):
...
@@ -34,7 +34,7 @@ class Mask(_Feature):
@
property
@
property
def
spatial_size
(
self
)
->
Tuple
[
int
,
int
]:
def
spatial_size
(
self
)
->
Tuple
[
int
,
int
]:
return
cast
(
Tuple
[
int
,
int
],
tuple
(
self
.
shape
[
-
2
:]))
return
tuple
(
self
.
shape
[
-
2
:])
# type: ignore[return-value]
def
horizontal_flip
(
self
)
->
Mask
:
def
horizontal_flip
(
self
)
->
Mask
:
output
=
self
.
_F
.
horizontal_flip_mask
(
self
.
as_subclass
(
torch
.
Tensor
))
output
=
self
.
_F
.
horizontal_flip_mask
(
self
.
as_subclass
(
torch
.
Tensor
))
...
...
torchvision/prototype/features/_video.py
View file @
b45969a7
from
__future__
import
annotations
from
__future__
import
annotations
import
warnings
import
warnings
from
typing
import
Any
,
cast
,
List
,
Optional
,
Tuple
,
Union
from
typing
import
Any
,
List
,
Optional
,
Tuple
,
Union
import
torch
import
torch
from
torchvision.transforms.functional
import
InterpolationMode
from
torchvision.transforms.functional
import
InterpolationMode
...
@@ -56,7 +56,7 @@ class Video(_Feature):
...
@@ -56,7 +56,7 @@ class Video(_Feature):
@
property
@
property
def
spatial_size
(
self
)
->
Tuple
[
int
,
int
]:
def
spatial_size
(
self
)
->
Tuple
[
int
,
int
]:
return
cast
(
Tuple
[
int
,
int
],
tuple
(
self
.
shape
[
-
2
:]))
return
tuple
(
self
.
shape
[
-
2
:])
# type: ignore[return-value]
@
property
@
property
def
num_channels
(
self
)
->
int
:
def
num_channels
(
self
)
->
int
:
...
@@ -237,7 +237,5 @@ class Video(_Feature):
...
@@ -237,7 +237,5 @@ class Video(_Feature):
VideoType
=
Union
[
torch
.
Tensor
,
Video
]
VideoType
=
Union
[
torch
.
Tensor
,
Video
]
VideoTypeJIT
=
torch
.
Tensor
VideoTypeJIT
=
torch
.
Tensor
LegacyVideoType
=
torch
.
Tensor
LegacyVideoTypeJIT
=
torch
.
Tensor
TensorVideoType
=
Union
[
torch
.
Tensor
,
Video
]
TensorVideoType
=
Union
[
torch
.
Tensor
,
Video
]
TensorVideoTypeJIT
=
torch
.
Tensor
TensorVideoTypeJIT
=
torch
.
Tensor
torchvision/prototype/transforms/functional/_deprecated.py
View file @
b45969a7
...
@@ -23,8 +23,8 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima
...
@@ -23,8 +23,8 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima
def
rgb_to_grayscale
(
def
rgb_to_grayscale
(
inpt
:
Union
[
features
.
Legacy
ImageTypeJIT
,
features
.
Legacy
VideoTypeJIT
],
num_output_channels
:
int
=
1
inpt
:
Union
[
features
.
ImageTypeJIT
,
features
.
VideoTypeJIT
],
num_output_channels
:
int
=
1
)
->
Union
[
features
.
Legacy
ImageTypeJIT
,
features
.
Legacy
VideoTypeJIT
]:
)
->
Union
[
features
.
ImageTypeJIT
,
features
.
VideoTypeJIT
]:
if
not
torch
.
jit
.
is_scripting
()
and
isinstance
(
inpt
,
(
features
.
Image
,
features
.
Video
)):
if
not
torch
.
jit
.
is_scripting
()
and
isinstance
(
inpt
,
(
features
.
Image
,
features
.
Video
)):
inpt
=
inpt
.
as_subclass
(
torch
.
Tensor
)
inpt
=
inpt
.
as_subclass
(
torch
.
Tensor
)
old_color_space
=
None
old_color_space
=
None
...
...
torchvision/prototype/transforms/functional/_misc.py
View file @
b45969a7
...
@@ -16,9 +16,7 @@ def normalize_image_tensor(
...
@@ -16,9 +16,7 @@ def normalize_image_tensor(
raise
TypeError
(
f
"Input tensor should be a float tensor. Got
{
image
.
dtype
}
."
)
raise
TypeError
(
f
"Input tensor should be a float tensor. Got
{
image
.
dtype
}
."
)
if
image
.
ndim
<
3
:
if
image
.
ndim
<
3
:
raise
ValueError
(
raise
ValueError
(
f
"Expected tensor to be a tensor image of size (..., C, H, W). Got
{
image
.
shape
}
."
)
f
"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() =
{
image
.
size
()
}
"
)
if
isinstance
(
std
,
(
tuple
,
list
)):
if
isinstance
(
std
,
(
tuple
,
list
)):
divzero
=
not
all
(
std
)
divzero
=
not
all
(
std
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment