Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
03f2a8b7
"src/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "e30d3bf5442fbdbee899e8a5da0b11b621d54f1b"
Unverified
Commit
03f2a8b7
authored
May 09, 2023
by
Philip Meier
Committed by
GitHub
May 09, 2023
Browse files
add same size shortcut for resize in transforms v2 (#7521)
parent
205602af
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
22 additions
and
3 deletions
+22
-3
test/transforms_v2_kernel_infos.py
test/transforms_v2_kernel_infos.py
+3
-0
torchvision/transforms/v2/functional/_geometry.py
torchvision/transforms/v2/functional/_geometry.py
+19
-3
No files found.
test/transforms_v2_kernel_infos.py
View file @
03f2a8b7
...
@@ -321,6 +321,9 @@ def reference_resize_bounding_box(bounding_box, *, spatial_size, size, max_size=
...
@@ -321,6 +321,9 @@ def reference_resize_bounding_box(bounding_box, *, spatial_size, size, max_size=
old_height
,
old_width
=
spatial_size
old_height
,
old_width
=
spatial_size
new_height
,
new_width
=
F
.
_geometry
.
_compute_resized_output_size
(
spatial_size
,
size
=
size
,
max_size
=
max_size
)
new_height
,
new_width
=
F
.
_geometry
.
_compute_resized_output_size
(
spatial_size
,
size
=
size
,
max_size
=
max_size
)
if
(
old_height
,
old_width
)
==
(
new_height
,
new_width
):
return
bounding_box
,
(
old_height
,
old_width
)
affine_matrix
=
np
.
array
(
affine_matrix
=
np
.
array
(
[
[
[
new_width
/
old_width
,
0
,
0
],
[
new_width
/
old_width
,
0
,
0
],
...
...
torchvision/transforms/v2/functional/_geometry.py
View file @
03f2a8b7
...
@@ -179,7 +179,9 @@ def resize_image_tensor(
...
@@ -179,7 +179,9 @@ def resize_image_tensor(
num_channels
,
old_height
,
old_width
=
shape
[
-
3
:]
num_channels
,
old_height
,
old_width
=
shape
[
-
3
:]
new_height
,
new_width
=
_compute_resized_output_size
((
old_height
,
old_width
),
size
=
size
,
max_size
=
max_size
)
new_height
,
new_width
=
_compute_resized_output_size
((
old_height
,
old_width
),
size
=
size
,
max_size
=
max_size
)
if
image
.
numel
()
>
0
:
if
(
new_height
,
new_width
)
==
(
old_height
,
old_width
):
return
image
elif
image
.
numel
()
>
0
:
image
=
image
.
reshape
(
-
1
,
num_channels
,
old_height
,
old_width
)
image
=
image
.
reshape
(
-
1
,
num_channels
,
old_height
,
old_width
)
dtype
=
image
.
dtype
dtype
=
image
.
dtype
...
@@ -210,9 +212,19 @@ def resize_image_pil(
...
@@ -210,9 +212,19 @@ def resize_image_pil(
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
interpolation
:
Union
[
InterpolationMode
,
int
]
=
InterpolationMode
.
BILINEAR
,
max_size
:
Optional
[
int
]
=
None
,
max_size
:
Optional
[
int
]
=
None
,
)
->
PIL
.
Image
.
Image
:
)
->
PIL
.
Image
.
Image
:
old_height
,
old_width
=
image
.
height
,
image
.
width
new_height
,
new_width
=
_compute_resized_output_size
(
(
old_height
,
old_width
),
size
=
size
,
# type: ignore[arg-type]
max_size
=
max_size
,
)
interpolation
=
_check_interpolation
(
interpolation
)
interpolation
=
_check_interpolation
(
interpolation
)
size
=
_compute_resized_output_size
(
image
.
size
[::
-
1
],
size
=
size
,
max_size
=
max_size
)
# type: ignore[arg-type]
return
_FP
.
resize
(
image
,
size
,
interpolation
=
pil_modes_mapping
[
interpolation
])
if
(
new_height
,
new_width
)
==
(
old_height
,
old_width
):
return
image
return
image
.
resize
((
new_width
,
new_height
),
resample
=
pil_modes_mapping
[
interpolation
])
def
resize_mask
(
mask
:
torch
.
Tensor
,
size
:
List
[
int
],
max_size
:
Optional
[
int
]
=
None
)
->
torch
.
Tensor
:
def
resize_mask
(
mask
:
torch
.
Tensor
,
size
:
List
[
int
],
max_size
:
Optional
[
int
]
=
None
)
->
torch
.
Tensor
:
...
@@ -235,6 +247,10 @@ def resize_bounding_box(
...
@@ -235,6 +247,10 @@ def resize_bounding_box(
)
->
Tuple
[
torch
.
Tensor
,
Tuple
[
int
,
int
]]:
)
->
Tuple
[
torch
.
Tensor
,
Tuple
[
int
,
int
]]:
old_height
,
old_width
=
spatial_size
old_height
,
old_width
=
spatial_size
new_height
,
new_width
=
_compute_resized_output_size
(
spatial_size
,
size
=
size
,
max_size
=
max_size
)
new_height
,
new_width
=
_compute_resized_output_size
(
spatial_size
,
size
=
size
,
max_size
=
max_size
)
if
(
new_height
,
new_width
)
==
(
old_height
,
old_width
):
return
bounding_box
,
spatial_size
w_ratio
=
new_width
/
old_width
w_ratio
=
new_width
/
old_width
h_ratio
=
new_height
/
old_height
h_ratio
=
new_height
/
old_height
ratios
=
torch
.
tensor
([
w_ratio
,
h_ratio
,
w_ratio
,
h_ratio
],
device
=
bounding_box
.
device
)
ratios
=
torch
.
tensor
([
w_ratio
,
h_ratio
,
w_ratio
,
h_ratio
],
device
=
bounding_box
.
device
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment