Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
d4d20f01
"src/vscode:/vscode.git/clone" did not exist on "2aad1c0b2ed0a8085ce7029181d7ecc3736bfb15"
Unverified
Commit
d4d20f01
authored
Feb 16, 2023
by
Philip Meier
Committed by
GitHub
Feb 16, 2023
Browse files
make type alias private (#7266)
parent
e405f3c3
Changes
24
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
55 additions
and
55 deletions
+55
-55
torchvision/transforms/v2/functional/_geometry.py
torchvision/transforms/v2/functional/_geometry.py
+42
-42
torchvision/transforms/v2/functional/_meta.py
torchvision/transforms/v2/functional/_meta.py
+9
-9
torchvision/transforms/v2/functional/_misc.py
torchvision/transforms/v2/functional/_misc.py
+3
-3
torchvision/transforms/v2/functional/_temporal.py
torchvision/transforms/v2/functional/_temporal.py
+1
-1
No files found.
torchvision/transforms/v2/functional/_geometry.py
View file @
d4d20f01
...
@@ -71,7 +71,7 @@ def horizontal_flip_video(video: torch.Tensor) -> torch.Tensor:
...
@@ -71,7 +71,7 @@ def horizontal_flip_video(video: torch.Tensor) -> torch.Tensor:
return
horizontal_flip_image_tensor
(
video
)
return
horizontal_flip_image_tensor
(
video
)
def
horizontal_flip
(
inpt
:
datapoints
.
InputTypeJIT
)
->
datapoints
.
InputTypeJIT
:
def
horizontal_flip
(
inpt
:
datapoints
.
_
InputTypeJIT
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
horizontal_flip
)
_log_api_usage_once
(
horizontal_flip
)
...
@@ -120,7 +120,7 @@ def vertical_flip_video(video: torch.Tensor) -> torch.Tensor:
...
@@ -120,7 +120,7 @@ def vertical_flip_video(video: torch.Tensor) -> torch.Tensor:
return
vertical_flip_image_tensor
(
video
)
return
vertical_flip_image_tensor
(
video
)
def
vertical_flip
(
inpt
:
datapoints
.
InputTypeJIT
)
->
datapoints
.
InputTypeJIT
:
def
vertical_flip
(
inpt
:
datapoints
.
_
InputTypeJIT
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
vertical_flip
)
_log_api_usage_once
(
vertical_flip
)
...
@@ -255,12 +255,12 @@ def resize_video(
...
@@ -255,12 +255,12 @@ def resize_video(
def
resize
(
def
resize
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
size
:
List
[
int
],
size
:
List
[
int
],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
max_size
:
Optional
[
int
]
=
None
,
max_size
:
Optional
[
int
]
=
None
,
antialias
:
Optional
[
Union
[
str
,
bool
]]
=
"warn"
,
antialias
:
Optional
[
Union
[
str
,
bool
]]
=
"warn"
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
resize
)
_log_api_usage_once
(
resize
)
if
torch
.
jit
.
is_scripting
()
or
is_simple_tensor
(
inpt
):
if
torch
.
jit
.
is_scripting
()
or
is_simple_tensor
(
inpt
):
...
@@ -428,7 +428,7 @@ def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[in
...
@@ -428,7 +428,7 @@ def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[in
def
_apply_grid_transform
(
def
_apply_grid_transform
(
img
:
torch
.
Tensor
,
grid
:
torch
.
Tensor
,
mode
:
str
,
fill
:
datapoints
.
FillTypeJIT
img
:
torch
.
Tensor
,
grid
:
torch
.
Tensor
,
mode
:
str
,
fill
:
datapoints
.
_
FillTypeJIT
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
# We are using context knowledge that grid should have float dtype
# We are using context knowledge that grid should have float dtype
...
@@ -470,7 +470,7 @@ def _assert_grid_transform_inputs(
...
@@ -470,7 +470,7 @@ def _assert_grid_transform_inputs(
image
:
torch
.
Tensor
,
image
:
torch
.
Tensor
,
matrix
:
Optional
[
List
[
float
]],
matrix
:
Optional
[
List
[
float
]],
interpolation
:
str
,
interpolation
:
str
,
fill
:
datapoints
.
FillTypeJIT
,
fill
:
datapoints
.
_
FillTypeJIT
,
supported_interpolation_modes
:
List
[
str
],
supported_interpolation_modes
:
List
[
str
],
coeffs
:
Optional
[
List
[
float
]]
=
None
,
coeffs
:
Optional
[
List
[
float
]]
=
None
,
)
->
None
:
)
->
None
:
...
@@ -533,7 +533,7 @@ def affine_image_tensor(
...
@@ -533,7 +533,7 @@ def affine_image_tensor(
scale
:
float
,
scale
:
float
,
shear
:
List
[
float
],
shear
:
List
[
float
],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
interpolation
=
_check_interpolation
(
interpolation
)
interpolation
=
_check_interpolation
(
interpolation
)
...
@@ -585,7 +585,7 @@ def affine_image_pil(
...
@@ -585,7 +585,7 @@ def affine_image_pil(
scale
:
float
,
scale
:
float
,
shear
:
List
[
float
],
shear
:
List
[
float
],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
)
->
PIL
.
Image
.
Image
:
)
->
PIL
.
Image
.
Image
:
interpolation
=
_check_interpolation
(
interpolation
)
interpolation
=
_check_interpolation
(
interpolation
)
...
@@ -721,7 +721,7 @@ def affine_mask(
...
@@ -721,7 +721,7 @@ def affine_mask(
translate
:
List
[
float
],
translate
:
List
[
float
],
scale
:
float
,
scale
:
float
,
shear
:
List
[
float
],
shear
:
List
[
float
],
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
if
mask
.
ndim
<
3
:
if
mask
.
ndim
<
3
:
...
@@ -754,7 +754,7 @@ def affine_video(
...
@@ -754,7 +754,7 @@ def affine_video(
scale
:
float
,
scale
:
float
,
shear
:
List
[
float
],
shear
:
List
[
float
],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
return
affine_image_tensor
(
return
affine_image_tensor
(
...
@@ -770,15 +770,15 @@ def affine_video(
...
@@ -770,15 +770,15 @@ def affine_video(
def
affine
(
def
affine
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
angle
:
Union
[
int
,
float
],
angle
:
Union
[
int
,
float
],
translate
:
List
[
float
],
translate
:
List
[
float
],
scale
:
float
,
scale
:
float
,
shear
:
List
[
float
],
shear
:
List
[
float
],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
affine
)
_log_api_usage_once
(
affine
)
...
@@ -822,7 +822,7 @@ def rotate_image_tensor(
...
@@ -822,7 +822,7 @@ def rotate_image_tensor(
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
expand
:
bool
=
False
,
expand
:
bool
=
False
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
interpolation
=
_check_interpolation
(
interpolation
)
interpolation
=
_check_interpolation
(
interpolation
)
...
@@ -867,7 +867,7 @@ def rotate_image_pil(
...
@@ -867,7 +867,7 @@ def rotate_image_pil(
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
expand
:
bool
=
False
,
expand
:
bool
=
False
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
PIL
.
Image
.
Image
:
)
->
PIL
.
Image
.
Image
:
interpolation
=
_check_interpolation
(
interpolation
)
interpolation
=
_check_interpolation
(
interpolation
)
...
@@ -910,7 +910,7 @@ def rotate_mask(
...
@@ -910,7 +910,7 @@ def rotate_mask(
angle
:
float
,
angle
:
float
,
expand
:
bool
=
False
,
expand
:
bool
=
False
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
if
mask
.
ndim
<
3
:
if
mask
.
ndim
<
3
:
mask
=
mask
.
unsqueeze
(
0
)
mask
=
mask
.
unsqueeze
(
0
)
...
@@ -939,19 +939,19 @@ def rotate_video(
...
@@ -939,19 +939,19 @@ def rotate_video(
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
expand
:
bool
=
False
,
expand
:
bool
=
False
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
return
rotate_image_tensor
(
video
,
angle
,
interpolation
=
interpolation
,
expand
=
expand
,
fill
=
fill
,
center
=
center
)
return
rotate_image_tensor
(
video
,
angle
,
interpolation
=
interpolation
,
expand
=
expand
,
fill
=
fill
,
center
=
center
)
def
rotate
(
def
rotate
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
angle
:
float
,
angle
:
float
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
NEAREST
,
expand
:
bool
=
False
,
expand
:
bool
=
False
,
center
:
Optional
[
List
[
float
]]
=
None
,
center
:
Optional
[
List
[
float
]]
=
None
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
rotate
)
_log_api_usage_once
(
rotate
)
...
@@ -1156,11 +1156,11 @@ def pad_video(
...
@@ -1156,11 +1156,11 @@ def pad_video(
def
pad
(
def
pad
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
padding
:
List
[
int
],
padding
:
List
[
int
],
fill
:
Optional
[
Union
[
int
,
float
,
List
[
float
]]]
=
None
,
fill
:
Optional
[
Union
[
int
,
float
,
List
[
float
]]]
=
None
,
padding_mode
:
str
=
"constant"
,
padding_mode
:
str
=
"constant"
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
pad
)
_log_api_usage_once
(
pad
)
...
@@ -1239,7 +1239,7 @@ def crop_video(video: torch.Tensor, top: int, left: int, height: int, width: int
...
@@ -1239,7 +1239,7 @@ def crop_video(video: torch.Tensor, top: int, left: int, height: int, width: int
return
crop_image_tensor
(
video
,
top
,
left
,
height
,
width
)
return
crop_image_tensor
(
video
,
top
,
left
,
height
,
width
)
def
crop
(
inpt
:
datapoints
.
InputTypeJIT
,
top
:
int
,
left
:
int
,
height
:
int
,
width
:
int
)
->
datapoints
.
InputTypeJIT
:
def
crop
(
inpt
:
datapoints
.
_
InputTypeJIT
,
top
:
int
,
left
:
int
,
height
:
int
,
width
:
int
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
crop
)
_log_api_usage_once
(
crop
)
...
@@ -1308,7 +1308,7 @@ def perspective_image_tensor(
...
@@ -1308,7 +1308,7 @@ def perspective_image_tensor(
startpoints
:
Optional
[
List
[
List
[
int
]]],
startpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
perspective_coeffs
=
_perspective_coefficients
(
startpoints
,
endpoints
,
coefficients
)
perspective_coeffs
=
_perspective_coefficients
(
startpoints
,
endpoints
,
coefficients
)
...
@@ -1355,7 +1355,7 @@ def perspective_image_pil(
...
@@ -1355,7 +1355,7 @@ def perspective_image_pil(
startpoints
:
Optional
[
List
[
List
[
int
]]],
startpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BICUBIC
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BICUBIC
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
)
->
PIL
.
Image
.
Image
:
)
->
PIL
.
Image
.
Image
:
perspective_coeffs
=
_perspective_coefficients
(
startpoints
,
endpoints
,
coefficients
)
perspective_coeffs
=
_perspective_coefficients
(
startpoints
,
endpoints
,
coefficients
)
...
@@ -1461,7 +1461,7 @@ def perspective_mask(
...
@@ -1461,7 +1461,7 @@ def perspective_mask(
mask
:
torch
.
Tensor
,
mask
:
torch
.
Tensor
,
startpoints
:
Optional
[
List
[
List
[
int
]]],
startpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
if
mask
.
ndim
<
3
:
if
mask
.
ndim
<
3
:
...
@@ -1485,7 +1485,7 @@ def perspective_video(
...
@@ -1485,7 +1485,7 @@ def perspective_video(
startpoints
:
Optional
[
List
[
List
[
int
]]],
startpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
return
perspective_image_tensor
(
return
perspective_image_tensor
(
...
@@ -1494,13 +1494,13 @@ def perspective_video(
...
@@ -1494,13 +1494,13 @@ def perspective_video(
def
perspective
(
def
perspective
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
startpoints
:
Optional
[
List
[
List
[
int
]]],
startpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
endpoints
:
Optional
[
List
[
List
[
int
]]],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
coefficients
:
Optional
[
List
[
float
]]
=
None
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
perspective
)
_log_api_usage_once
(
perspective
)
if
torch
.
jit
.
is_scripting
()
or
is_simple_tensor
(
inpt
):
if
torch
.
jit
.
is_scripting
()
or
is_simple_tensor
(
inpt
):
...
@@ -1526,7 +1526,7 @@ def elastic_image_tensor(
...
@@ -1526,7 +1526,7 @@ def elastic_image_tensor(
image
:
torch
.
Tensor
,
image
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
interpolation
=
_check_interpolation
(
interpolation
)
interpolation
=
_check_interpolation
(
interpolation
)
...
@@ -1583,7 +1583,7 @@ def elastic_image_pil(
...
@@ -1583,7 +1583,7 @@ def elastic_image_pil(
image
:
PIL
.
Image
.
Image
,
image
:
PIL
.
Image
.
Image
,
displacement
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
PIL
.
Image
.
Image
:
)
->
PIL
.
Image
.
Image
:
t_img
=
pil_to_tensor
(
image
)
t_img
=
pil_to_tensor
(
image
)
output
=
elastic_image_tensor
(
t_img
,
displacement
,
interpolation
=
interpolation
,
fill
=
fill
)
output
=
elastic_image_tensor
(
t_img
,
displacement
,
interpolation
=
interpolation
,
fill
=
fill
)
...
@@ -1656,7 +1656,7 @@ def elastic_bounding_box(
...
@@ -1656,7 +1656,7 @@ def elastic_bounding_box(
def
elastic_mask
(
def
elastic_mask
(
mask
:
torch
.
Tensor
,
mask
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
if
mask
.
ndim
<
3
:
if
mask
.
ndim
<
3
:
mask
=
mask
.
unsqueeze
(
0
)
mask
=
mask
.
unsqueeze
(
0
)
...
@@ -1676,17 +1676,17 @@ def elastic_video(
...
@@ -1676,17 +1676,17 @@ def elastic_video(
video
:
torch
.
Tensor
,
video
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
return
elastic_image_tensor
(
video
,
displacement
,
interpolation
=
interpolation
,
fill
=
fill
)
return
elastic_image_tensor
(
video
,
displacement
,
interpolation
=
interpolation
,
fill
=
fill
)
def
elastic
(
def
elastic
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
displacement
:
torch
.
Tensor
,
displacement
:
torch
.
Tensor
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
fill
:
datapoints
.
FillTypeJIT
=
None
,
fill
:
datapoints
.
_
FillTypeJIT
=
None
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
elastic
)
_log_api_usage_once
(
elastic
)
...
@@ -1802,7 +1802,7 @@ def center_crop_video(video: torch.Tensor, output_size: List[int]) -> torch.Tens
...
@@ -1802,7 +1802,7 @@ def center_crop_video(video: torch.Tensor, output_size: List[int]) -> torch.Tens
return
center_crop_image_tensor
(
video
,
output_size
)
return
center_crop_image_tensor
(
video
,
output_size
)
def
center_crop
(
inpt
:
datapoints
.
InputTypeJIT
,
output_size
:
List
[
int
])
->
datapoints
.
InputTypeJIT
:
def
center_crop
(
inpt
:
datapoints
.
_
InputTypeJIT
,
output_size
:
List
[
int
])
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
center_crop
)
_log_api_usage_once
(
center_crop
)
...
@@ -1888,7 +1888,7 @@ def resized_crop_video(
...
@@ -1888,7 +1888,7 @@ def resized_crop_video(
def
resized_crop
(
def
resized_crop
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
top
:
int
,
top
:
int
,
left
:
int
,
left
:
int
,
height
:
int
,
height
:
int
,
...
@@ -1896,7 +1896,7 @@ def resized_crop(
...
@@ -1896,7 +1896,7 @@ def resized_crop(
size
:
List
[
int
],
size
:
List
[
int
],
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
antialias
:
Optional
[
Union
[
str
,
bool
]]
=
"warn"
,
antialias
:
Optional
[
Union
[
str
,
bool
]]
=
"warn"
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
resized_crop
)
_log_api_usage_once
(
resized_crop
)
...
@@ -1972,7 +1972,7 @@ def five_crop_video(
...
@@ -1972,7 +1972,7 @@ def five_crop_video(
return
five_crop_image_tensor
(
video
,
size
)
return
five_crop_image_tensor
(
video
,
size
)
ImageOrVideoTypeJIT
=
Union
[
datapoints
.
ImageTypeJIT
,
datapoints
.
VideoTypeJIT
]
ImageOrVideoTypeJIT
=
Union
[
datapoints
.
_
ImageTypeJIT
,
datapoints
.
_
VideoTypeJIT
]
def
five_crop
(
def
five_crop
(
...
@@ -2069,7 +2069,7 @@ def ten_crop_video(
...
@@ -2069,7 +2069,7 @@ def ten_crop_video(
def
ten_crop
(
def
ten_crop
(
inpt
:
Union
[
datapoints
.
ImageTypeJIT
,
datapoints
.
VideoTypeJIT
],
size
:
List
[
int
],
vertical_flip
:
bool
=
False
inpt
:
Union
[
datapoints
.
_
ImageTypeJIT
,
datapoints
.
_
VideoTypeJIT
],
size
:
List
[
int
],
vertical_flip
:
bool
=
False
)
->
Tuple
[
)
->
Tuple
[
ImageOrVideoTypeJIT
,
ImageOrVideoTypeJIT
,
ImageOrVideoTypeJIT
,
ImageOrVideoTypeJIT
,
...
...
torchvision/transforms/v2/functional/_meta.py
View file @
d4d20f01
...
@@ -27,7 +27,7 @@ def get_dimensions_image_tensor(image: torch.Tensor) -> List[int]:
...
@@ -27,7 +27,7 @@ def get_dimensions_image_tensor(image: torch.Tensor) -> List[int]:
get_dimensions_image_pil
=
_FP
.
get_dimensions
get_dimensions_image_pil
=
_FP
.
get_dimensions
def
get_dimensions
(
inpt
:
Union
[
datapoints
.
ImageTypeJIT
,
datapoints
.
VideoTypeJIT
])
->
List
[
int
]:
def
get_dimensions
(
inpt
:
Union
[
datapoints
.
_
ImageTypeJIT
,
datapoints
.
_
VideoTypeJIT
])
->
List
[
int
]:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
get_dimensions
)
_log_api_usage_once
(
get_dimensions
)
...
@@ -64,7 +64,7 @@ def get_num_channels_video(video: torch.Tensor) -> int:
...
@@ -64,7 +64,7 @@ def get_num_channels_video(video: torch.Tensor) -> int:
return
get_num_channels_image_tensor
(
video
)
return
get_num_channels_image_tensor
(
video
)
def
get_num_channels
(
inpt
:
Union
[
datapoints
.
ImageTypeJIT
,
datapoints
.
VideoTypeJIT
])
->
int
:
def
get_num_channels
(
inpt
:
Union
[
datapoints
.
_
ImageTypeJIT
,
datapoints
.
_
VideoTypeJIT
])
->
int
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
get_num_channels
)
_log_api_usage_once
(
get_num_channels
)
...
@@ -114,7 +114,7 @@ def get_spatial_size_bounding_box(bounding_box: datapoints.BoundingBox) -> List[
...
@@ -114,7 +114,7 @@ def get_spatial_size_bounding_box(bounding_box: datapoints.BoundingBox) -> List[
return
list
(
bounding_box
.
spatial_size
)
return
list
(
bounding_box
.
spatial_size
)
def
get_spatial_size
(
inpt
:
datapoints
.
InputTypeJIT
)
->
List
[
int
]:
def
get_spatial_size
(
inpt
:
datapoints
.
_
InputTypeJIT
)
->
List
[
int
]:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
get_spatial_size
)
_log_api_usage_once
(
get_spatial_size
)
...
@@ -135,7 +135,7 @@ def get_num_frames_video(video: torch.Tensor) -> int:
...
@@ -135,7 +135,7 @@ def get_num_frames_video(video: torch.Tensor) -> int:
return
video
.
shape
[
-
4
]
return
video
.
shape
[
-
4
]
def
get_num_frames
(
inpt
:
datapoints
.
VideoTypeJIT
)
->
int
:
def
get_num_frames
(
inpt
:
datapoints
.
_
VideoTypeJIT
)
->
int
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
get_num_frames
)
_log_api_usage_once
(
get_num_frames
)
...
@@ -208,11 +208,11 @@ def _convert_format_bounding_box(
...
@@ -208,11 +208,11 @@ def _convert_format_bounding_box(
def
convert_format_bounding_box
(
def
convert_format_bounding_box
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
old_format
:
Optional
[
BoundingBoxFormat
]
=
None
,
old_format
:
Optional
[
BoundingBoxFormat
]
=
None
,
new_format
:
Optional
[
BoundingBoxFormat
]
=
None
,
new_format
:
Optional
[
BoundingBoxFormat
]
=
None
,
inplace
:
bool
=
False
,
inplace
:
bool
=
False
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
# This being a kernel / dispatcher hybrid, we need an option to pass `old_format` explicitly for simple tensor
# This being a kernel / dispatcher hybrid, we need an option to pass `old_format` explicitly for simple tensor
# inputs as well as extract it from `datapoints.BoundingBox` inputs. However, putting a default value on
# inputs as well as extract it from `datapoints.BoundingBox` inputs. However, putting a default value on
# `old_format` means we also need to put one on `new_format` to have syntactically correct Python. Here we mimic the
# `old_format` means we also need to put one on `new_format` to have syntactically correct Python. Here we mimic the
...
@@ -259,10 +259,10 @@ def _clamp_bounding_box(
...
@@ -259,10 +259,10 @@ def _clamp_bounding_box(
def
clamp_bounding_box
(
def
clamp_bounding_box
(
inpt
:
datapoints
.
InputTypeJIT
,
inpt
:
datapoints
.
_
InputTypeJIT
,
format
:
Optional
[
BoundingBoxFormat
]
=
None
,
format
:
Optional
[
BoundingBoxFormat
]
=
None
,
spatial_size
:
Optional
[
Tuple
[
int
,
int
]]
=
None
,
spatial_size
:
Optional
[
Tuple
[
int
,
int
]]
=
None
,
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
clamp_bounding_box
)
_log_api_usage_once
(
clamp_bounding_box
)
...
@@ -355,7 +355,7 @@ def convert_dtype_video(video: torch.Tensor, dtype: torch.dtype = torch.float) -
...
@@ -355,7 +355,7 @@ def convert_dtype_video(video: torch.Tensor, dtype: torch.dtype = torch.float) -
def
convert_dtype
(
def
convert_dtype
(
inpt
:
Union
[
datapoints
.
ImageTypeJIT
,
datapoints
.
VideoTypeJIT
],
dtype
:
torch
.
dtype
=
torch
.
float
inpt
:
Union
[
datapoints
.
_
ImageTypeJIT
,
datapoints
.
_
VideoTypeJIT
],
dtype
:
torch
.
dtype
=
torch
.
float
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
convert_dtype
)
_log_api_usage_once
(
convert_dtype
)
...
...
torchvision/transforms/v2/functional/_misc.py
View file @
d4d20f01
...
@@ -53,7 +53,7 @@ def normalize_video(video: torch.Tensor, mean: List[float], std: List[float], in
...
@@ -53,7 +53,7 @@ def normalize_video(video: torch.Tensor, mean: List[float], std: List[float], in
def
normalize
(
def
normalize
(
inpt
:
Union
[
datapoints
.
TensorImageTypeJIT
,
datapoints
.
TensorVideoTypeJIT
],
inpt
:
Union
[
datapoints
.
_
TensorImageTypeJIT
,
datapoints
.
_
TensorVideoTypeJIT
],
mean
:
List
[
float
],
mean
:
List
[
float
],
std
:
List
[
float
],
std
:
List
[
float
],
inplace
:
bool
=
False
,
inplace
:
bool
=
False
,
...
@@ -166,8 +166,8 @@ def gaussian_blur_video(
...
@@ -166,8 +166,8 @@ def gaussian_blur_video(
def
gaussian_blur
(
def
gaussian_blur
(
inpt
:
datapoints
.
InputTypeJIT
,
kernel_size
:
List
[
int
],
sigma
:
Optional
[
List
[
float
]]
=
None
inpt
:
datapoints
.
_
InputTypeJIT
,
kernel_size
:
List
[
int
],
sigma
:
Optional
[
List
[
float
]]
=
None
)
->
datapoints
.
InputTypeJIT
:
)
->
datapoints
.
_
InputTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
gaussian_blur
)
_log_api_usage_once
(
gaussian_blur
)
...
...
torchvision/transforms/v2/functional/_temporal.py
View file @
d4d20f01
...
@@ -14,7 +14,7 @@ def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> t
...
@@ -14,7 +14,7 @@ def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> t
return
torch
.
index_select
(
video
,
-
4
,
indices
)
return
torch
.
index_select
(
video
,
-
4
,
indices
)
def
uniform_temporal_subsample
(
inpt
:
datapoints
.
VideoTypeJIT
,
num_samples
:
int
)
->
datapoints
.
VideoTypeJIT
:
def
uniform_temporal_subsample
(
inpt
:
datapoints
.
_
VideoTypeJIT
,
num_samples
:
int
)
->
datapoints
.
_
VideoTypeJIT
:
if
not
torch
.
jit
.
is_scripting
():
if
not
torch
.
jit
.
is_scripting
():
_log_api_usage_once
(
uniform_temporal_subsample
)
_log_api_usage_once
(
uniform_temporal_subsample
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment