Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
a0cd96ff
Unverified
Commit
a0cd96ff
authored
Jun 04, 2021
by
Nicolas Hug
Committed by
GitHub
Jun 04, 2021
Browse files
Port test_ops.py to pytest (#3953)
parent
2a52c2dc
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
208 additions
and
208 deletions
+208
-208
test/test_ops.py
test/test_ops.py
+208
-208
No files found.
test/test_ops.py
View file @
a0cd96ff
from
common_utils
import
needs_cuda
,
cpu_only
from
common_utils
import
needs_cuda
,
cpu_only
,
cpu_and_gpu
from
_assert_utils
import
assert_equal
from
_assert_utils
import
assert_equal
import
math
import
math
import
unittest
from
abc
import
ABC
,
abstractmethod
import
pytest
import
pytest
import
numpy
as
np
import
numpy
as
np
...
@@ -15,48 +15,12 @@ from torchvision import ops
...
@@ -15,48 +15,12 @@ from torchvision import ops
from
typing
import
Tuple
from
typing
import
Tuple
class
OpTester
(
object
):
class
RoIOpTester
(
ABC
):
@
classmethod
dtype
=
torch
.
float64
def
setUpClass
(
cls
):
cls
.
dtype
=
torch
.
float64
def
test_forward_cpu_contiguous
(
self
):
@
pytest
.
mark
.
parametrize
(
'device'
,
cpu_and_gpu
())
self
.
_test_forward
(
device
=
torch
.
device
(
'cpu'
),
contiguous
=
True
)
@
pytest
.
mark
.
parametrize
(
'contiguous'
,
(
True
,
False
))
def
test_forward
(
self
,
device
,
contiguous
,
x_dtype
=
None
,
rois_dtype
=
None
,
**
kwargs
):
def
test_forward_cpu_non_contiguous
(
self
):
self
.
_test_forward
(
device
=
torch
.
device
(
'cpu'
),
contiguous
=
False
)
def
test_backward_cpu_contiguous
(
self
):
self
.
_test_backward
(
device
=
torch
.
device
(
'cpu'
),
contiguous
=
True
)
def
test_backward_cpu_non_contiguous
(
self
):
self
.
_test_backward
(
device
=
torch
.
device
(
'cpu'
),
contiguous
=
False
)
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
"CUDA unavailable"
)
def
test_forward_cuda_contiguous
(
self
):
self
.
_test_forward
(
device
=
torch
.
device
(
'cuda'
),
contiguous
=
True
)
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
"CUDA unavailable"
)
def
test_forward_cuda_non_contiguous
(
self
):
self
.
_test_forward
(
device
=
torch
.
device
(
'cuda'
),
contiguous
=
False
)
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
"CUDA unavailable"
)
def
test_backward_cuda_contiguous
(
self
):
self
.
_test_backward
(
device
=
torch
.
device
(
'cuda'
),
contiguous
=
True
)
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
"CUDA unavailable"
)
def
test_backward_cuda_non_contiguous
(
self
):
self
.
_test_backward
(
device
=
torch
.
device
(
'cuda'
),
contiguous
=
False
)
def
_test_forward
(
self
,
device
,
contiguous
):
pass
def
_test_backward
(
self
,
device
,
contiguous
):
pass
class
RoIOpTester
(
OpTester
):
def
_test_forward
(
self
,
device
,
contiguous
,
x_dtype
=
None
,
rois_dtype
=
None
,
**
kwargs
):
x_dtype
=
self
.
dtype
if
x_dtype
is
None
else
x_dtype
x_dtype
=
self
.
dtype
if
x_dtype
is
None
else
x_dtype
rois_dtype
=
self
.
dtype
if
rois_dtype
is
None
else
rois_dtype
rois_dtype
=
self
.
dtype
if
rois_dtype
is
None
else
rois_dtype
pool_size
=
5
pool_size
=
5
...
@@ -74,14 +38,16 @@ class RoIOpTester(OpTester):
...
@@ -74,14 +38,16 @@ class RoIOpTester(OpTester):
pool_h
,
pool_w
=
pool_size
,
pool_size
pool_h
,
pool_w
=
pool_size
,
pool_size
y
=
self
.
fn
(
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
)
y
=
self
.
fn
(
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
)
# the following should be true whether we're running an autocast test or not.
# the following should be true whether we're running an autocast test or not.
self
.
assert
True
(
y
.
dtype
==
x
.
dtype
)
assert
y
.
dtype
==
x
.
dtype
gt_y
=
self
.
expected_fn
(
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
gt_y
=
self
.
expected_fn
(
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
device
=
device
,
dtype
=
self
.
dtype
,
**
kwargs
)
sampling_ratio
=-
1
,
device
=
device
,
dtype
=
self
.
dtype
,
**
kwargs
)
tol
=
1e-3
if
(
x_dtype
is
torch
.
half
or
rois_dtype
is
torch
.
half
)
else
1e-5
tol
=
1e-3
if
(
x_dtype
is
torch
.
half
or
rois_dtype
is
torch
.
half
)
else
1e-5
torch
.
testing
.
assert_close
(
gt_y
.
to
(
y
),
y
,
rtol
=
tol
,
atol
=
tol
)
torch
.
testing
.
assert_close
(
gt_y
.
to
(
y
),
y
,
rtol
=
tol
,
atol
=
tol
)
def
_test_backward
(
self
,
device
,
contiguous
):
@
pytest
.
mark
.
parametrize
(
'device'
,
cpu_and_gpu
())
@
pytest
.
mark
.
parametrize
(
'contiguous'
,
(
True
,
False
))
def
test_backward
(
self
,
device
,
contiguous
):
pool_size
=
2
pool_size
=
2
x
=
torch
.
rand
(
1
,
2
*
(
pool_size
**
2
),
5
,
5
,
dtype
=
self
.
dtype
,
device
=
device
,
requires_grad
=
True
)
x
=
torch
.
rand
(
1
,
2
*
(
pool_size
**
2
),
5
,
5
,
dtype
=
self
.
dtype
,
device
=
device
,
requires_grad
=
True
)
if
not
contiguous
:
if
not
contiguous
:
...
@@ -96,43 +62,43 @@ class RoIOpTester(OpTester):
...
@@ -96,43 +62,43 @@ class RoIOpTester(OpTester):
script_func
=
self
.
get_script_fn
(
rois
,
pool_size
)
script_func
=
self
.
get_script_fn
(
rois
,
pool_size
)
self
.
assertTrue
(
gradcheck
(
func
,
(
x
,))
)
gradcheck
(
func
,
(
x
,))
self
.
assertTrue
(
gradcheck
(
script_func
,
(
x
,))
)
gradcheck
(
script_func
,
(
x
,))
def
test_boxes_shape
(
self
):
@
needs_cuda
self
.
_test_boxes_shape
()
@
pytest
.
mark
.
parametrize
(
'x_dtype'
,
(
torch
.
float
,
torch
.
half
))
@
pytest
.
mark
.
parametrize
(
'rois_dtype'
,
(
torch
.
float
,
torch
.
half
))
def
test_autocast
(
self
,
x_dtype
,
rois_dtype
):
with
torch
.
cuda
.
amp
.
autocast
():
self
.
test_forward
(
torch
.
device
(
"cuda"
),
contiguous
=
False
,
x_dtype
=
x_dtype
,
rois_dtype
=
rois_dtype
)
def
_helper_boxes_shape
(
self
,
func
):
def
_helper_boxes_shape
(
self
,
func
):
# test boxes as Tensor[N, 5]
# test boxes as Tensor[N, 5]
with
self
.
assertR
aises
(
AssertionError
):
with
pytest
.
r
aises
(
AssertionError
):
a
=
torch
.
linspace
(
1
,
8
*
8
,
8
*
8
).
reshape
(
1
,
1
,
8
,
8
)
a
=
torch
.
linspace
(
1
,
8
*
8
,
8
*
8
).
reshape
(
1
,
1
,
8
,
8
)
boxes
=
torch
.
tensor
([[
0
,
0
,
3
,
3
]],
dtype
=
a
.
dtype
)
boxes
=
torch
.
tensor
([[
0
,
0
,
3
,
3
]],
dtype
=
a
.
dtype
)
func
(
a
,
boxes
,
output_size
=
(
2
,
2
))
func
(
a
,
boxes
,
output_size
=
(
2
,
2
))
# test boxes as List[Tensor[N, 4]]
# test boxes as List[Tensor[N, 4]]
with
self
.
assertR
aises
(
AssertionError
):
with
pytest
.
r
aises
(
AssertionError
):
a
=
torch
.
linspace
(
1
,
8
*
8
,
8
*
8
).
reshape
(
1
,
1
,
8
,
8
)
a
=
torch
.
linspace
(
1
,
8
*
8
,
8
*
8
).
reshape
(
1
,
1
,
8
,
8
)
boxes
=
torch
.
tensor
([[
0
,
0
,
3
]],
dtype
=
a
.
dtype
)
boxes
=
torch
.
tensor
([[
0
,
0
,
3
]],
dtype
=
a
.
dtype
)
ops
.
roi_pool
(
a
,
[
boxes
],
output_size
=
(
2
,
2
))
ops
.
roi_pool
(
a
,
[
boxes
],
output_size
=
(
2
,
2
))
@
abstractmethod
def
fn
(
*
args
,
**
kwargs
):
def
fn
(
*
args
,
**
kwargs
):
pass
pass
@
abstractmethod
def
get_script_fn
(
*
args
,
**
kwargs
):
def
get_script_fn
(
*
args
,
**
kwargs
):
pass
pass
@
abstractmethod
def
expected_fn
(
*
args
,
**
kwargs
):
def
expected_fn
(
*
args
,
**
kwargs
):
pass
pass
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
"CUDA unavailable"
)
def
test_autocast
(
self
):
for
x_dtype
in
(
torch
.
float
,
torch
.
half
):
for
rois_dtype
in
(
torch
.
float
,
torch
.
half
):
with
torch
.
cuda
.
amp
.
autocast
():
self
.
_test_forward
(
torch
.
device
(
"cuda"
),
contiguous
=
False
,
x_dtype
=
x_dtype
,
rois_dtype
=
rois_dtype
)
class
Ro
I
Pool
Tester
(
RoIOpTester
,
unittest
.
TestCase
):
class
Test
Ro
i
Pool
(
RoIOpTester
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
):
return
ops
.
RoIPool
((
pool_h
,
pool_w
),
spatial_scale
)(
x
,
rois
)
return
ops
.
RoIPool
((
pool_h
,
pool_w
),
spatial_scale
)(
x
,
rois
)
...
@@ -167,11 +133,12 @@ class RoIPoolTester(RoIOpTester, unittest.TestCase):
...
@@ -167,11 +133,12 @@ class RoIPoolTester(RoIOpTester, unittest.TestCase):
y
[
roi_idx
,
:,
i
,
j
]
=
bin_x
.
reshape
(
n_channels
,
-
1
).
max
(
dim
=
1
)[
0
]
y
[
roi_idx
,
:,
i
,
j
]
=
bin_x
.
reshape
(
n_channels
,
-
1
).
max
(
dim
=
1
)[
0
]
return
y
return
y
def
_test_boxes_shape
(
self
):
@
cpu_only
def
test_boxes_shape
(
self
):
self
.
_helper_boxes_shape
(
ops
.
roi_pool
)
self
.
_helper_boxes_shape
(
ops
.
roi_pool
)
class
PSRoIPool
Tester
(
RoIOpTester
,
unittest
.
TestCase
):
class
Test
PSRoIPool
(
RoIOpTester
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
):
return
ops
.
PSRoIPool
((
pool_h
,
pool_w
),
1
)(
x
,
rois
)
return
ops
.
PSRoIPool
((
pool_h
,
pool_w
),
1
)(
x
,
rois
)
...
@@ -184,7 +151,7 @@ class PSRoIPoolTester(RoIOpTester, unittest.TestCase):
...
@@ -184,7 +151,7 @@ class PSRoIPoolTester(RoIOpTester, unittest.TestCase):
if
device
is
None
:
if
device
is
None
:
device
=
torch
.
device
(
"cpu"
)
device
=
torch
.
device
(
"cpu"
)
n_input_channels
=
x
.
size
(
1
)
n_input_channels
=
x
.
size
(
1
)
self
.
assert
Equal
(
n_input_channels
%
(
pool_h
*
pool_w
)
,
0
,
"input channels must be divisible by ph * pw"
)
assert
n_input_channels
%
(
pool_h
*
pool_w
)
==
0
,
"input channels must be divisible by ph * pw"
n_output_channels
=
int
(
n_input_channels
/
(
pool_h
*
pool_w
))
n_output_channels
=
int
(
n_input_channels
/
(
pool_h
*
pool_w
))
y
=
torch
.
zeros
(
rois
.
size
(
0
),
n_output_channels
,
pool_h
,
pool_w
,
dtype
=
dtype
,
device
=
device
)
y
=
torch
.
zeros
(
rois
.
size
(
0
),
n_output_channels
,
pool_h
,
pool_w
,
dtype
=
dtype
,
device
=
device
)
...
@@ -211,7 +178,8 @@ class PSRoIPoolTester(RoIOpTester, unittest.TestCase):
...
@@ -211,7 +178,8 @@ class PSRoIPoolTester(RoIOpTester, unittest.TestCase):
y
[
roi_idx
,
c_out
,
i
,
j
]
=
t
/
area
y
[
roi_idx
,
c_out
,
i
,
j
]
=
t
/
area
return
y
return
y
def
_test_boxes_shape
(
self
):
@
cpu_only
def
test_boxes_shape
(
self
):
self
.
_helper_boxes_shape
(
ops
.
ps_roi_pool
)
self
.
_helper_boxes_shape
(
ops
.
ps_roi_pool
)
...
@@ -247,7 +215,7 @@ def bilinear_interpolate(data, y, x, snap_border=False):
...
@@ -247,7 +215,7 @@ def bilinear_interpolate(data, y, x, snap_border=False):
return
val
return
val
class
RoIAlign
Tester
(
RoIOpTester
,
unittest
.
TestCase
):
class
Test
RoIAlign
(
RoIOpTester
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
aligned
=
False
,
**
kwargs
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
aligned
=
False
,
**
kwargs
):
return
ops
.
RoIAlign
((
pool_h
,
pool_w
),
spatial_scale
=
spatial_scale
,
return
ops
.
RoIAlign
((
pool_h
,
pool_w
),
spatial_scale
=
spatial_scale
,
sampling_ratio
=
sampling_ratio
,
aligned
=
aligned
)(
x
,
rois
)
sampling_ratio
=
sampling_ratio
,
aligned
=
aligned
)(
x
,
rois
)
...
@@ -294,35 +262,47 @@ class RoIAlignTester(RoIOpTester, unittest.TestCase):
...
@@ -294,35 +262,47 @@ class RoIAlignTester(RoIOpTester, unittest.TestCase):
out_data
[
r
,
channel
,
i
,
j
]
=
val
out_data
[
r
,
channel
,
i
,
j
]
=
val
return
out_data
return
out_data
def
_test_boxes_shape
(
self
):
@
cpu_only
def
test_boxes_shape
(
self
):
self
.
_helper_boxes_shape
(
ops
.
roi_align
)
self
.
_helper_boxes_shape
(
ops
.
roi_align
)
def
_test_forward
(
self
,
device
,
contiguous
,
x_dtype
=
None
,
rois_dtype
=
None
,
**
kwargs
):
@
pytest
.
mark
.
parametrize
(
'aligned'
,
(
True
,
False
))
for
aligned
in
(
True
,
False
):
@
pytest
.
mark
.
parametrize
(
'device'
,
cpu_and_gpu
())
super
().
_test_forward
(
device
,
contiguous
,
x_dtype
,
rois_dtype
,
aligned
=
aligned
)
@
pytest
.
mark
.
parametrize
(
'contiguous'
,
(
True
,
False
))
def
test_forward
(
self
,
device
,
contiguous
,
aligned
,
x_dtype
=
None
,
rois_dtype
=
None
):
super
().
test_forward
(
device
=
device
,
contiguous
=
contiguous
,
x_dtype
=
x_dtype
,
rois_dtype
=
rois_dtype
,
aligned
=
aligned
)
def
test_qroialign
(
self
):
@
needs_cuda
"""Make sure quantized version of RoIAlign is close to float version"""
@
pytest
.
mark
.
parametrize
(
'aligned'
,
(
True
,
False
))
pool_size
=
5
@
pytest
.
mark
.
parametrize
(
'x_dtype'
,
(
torch
.
float
,
torch
.
half
))
img_size
=
10
@
pytest
.
mark
.
parametrize
(
'rois_dtype'
,
(
torch
.
float
,
torch
.
half
))
n_channels
=
2
def
test_autocast
(
self
,
aligned
,
x_dtype
,
rois_dtype
):
num_imgs
=
1
with
torch
.
cuda
.
amp
.
autocast
():
dtype
=
torch
.
float
self
.
test_forward
(
torch
.
device
(
"cuda"
),
contiguous
=
False
,
aligned
=
aligned
,
x_dtype
=
x_dtype
,
rois_dtype
=
rois_dtype
)
def
make_rois
(
num_rois
=
1000
):
def
_
make_rois
(
self
,
img_size
,
num_imgs
,
dtype
,
num_rois
=
1000
):
rois
=
torch
.
randint
(
0
,
img_size
//
2
,
size
=
(
num_rois
,
5
)).
to
(
dtype
)
rois
=
torch
.
randint
(
0
,
img_size
//
2
,
size
=
(
num_rois
,
5
)).
to
(
dtype
)
rois
[:,
0
]
=
torch
.
randint
(
0
,
num_imgs
,
size
=
(
num_rois
,))
# set batch index
rois
[:,
0
]
=
torch
.
randint
(
0
,
num_imgs
,
size
=
(
num_rois
,))
# set batch index
rois
[:,
3
:]
+=
rois
[:,
1
:
3
]
# make sure boxes aren't degenerate
rois
[:,
3
:]
+=
rois
[:,
1
:
3
]
# make sure boxes aren't degenerate
return
rois
return
rois
for
aligned
in
(
True
,
False
):
@
pytest
.
mark
.
parametrize
(
'aligned'
,
(
True
,
False
))
for
scale
,
zero_point
in
((
1
,
0
),
(
2
,
10
),
(
0.1
,
50
)):
@
pytest
.
mark
.
parametrize
(
'scale, zero_point'
,
((
1
,
0
),
(
2
,
10
),
(
0.1
,
50
)))
for
qdtype
in
(
torch
.
qint8
,
torch
.
quint8
,
torch
.
qint32
):
@
pytest
.
mark
.
parametrize
(
'qdtype'
,
(
torch
.
qint8
,
torch
.
quint8
,
torch
.
qint32
))
def
test_qroialign
(
self
,
aligned
,
scale
,
zero_point
,
qdtype
):
"""Make sure quantized version of RoIAlign is close to float version"""
pool_size
=
5
img_size
=
10
n_channels
=
2
num_imgs
=
1
dtype
=
torch
.
float
x
=
torch
.
randint
(
50
,
100
,
size
=
(
num_imgs
,
n_channels
,
img_size
,
img_size
)).
to
(
dtype
)
x
=
torch
.
randint
(
50
,
100
,
size
=
(
num_imgs
,
n_channels
,
img_size
,
img_size
)).
to
(
dtype
)
qx
=
torch
.
quantize_per_tensor
(
x
,
scale
=
scale
,
zero_point
=
zero_point
,
dtype
=
qdtype
)
qx
=
torch
.
quantize_per_tensor
(
x
,
scale
=
scale
,
zero_point
=
zero_point
,
dtype
=
qdtype
)
rois
=
make_rois
()
rois
=
self
.
_
make_rois
(
img_size
,
num_imgs
,
dtype
)
qrois
=
torch
.
quantize_per_tensor
(
rois
,
scale
=
scale
,
zero_point
=
zero_point
,
dtype
=
qdtype
)
qrois
=
torch
.
quantize_per_tensor
(
rois
,
scale
=
scale
,
zero_point
=
zero_point
,
dtype
=
qdtype
)
x
,
rois
=
qx
.
dequantize
(),
qrois
.
dequantize
()
# we want to pass the same inputs
x
,
rois
=
qx
.
dequantize
(),
qrois
.
dequantize
()
# we want to pass the same inputs
...
@@ -350,7 +330,7 @@ class RoIAlignTester(RoIOpTester, unittest.TestCase):
...
@@ -350,7 +330,7 @@ class RoIAlignTester(RoIOpTester, unittest.TestCase):
try
:
try
:
# Ideally, we would assert this, which passes with (scale, zero) == (1, 0)
# Ideally, we would assert this, which passes with (scale, zero) == (1, 0)
self
.
assert
True
(
(
qy
==
quantized_float_y
).
all
()
)
assert
(
qy
==
quantized_float_y
).
all
()
except
AssertionError
:
except
AssertionError
:
# But because the computation aren't exactly the same between the 2 RoIAlign procedures, some
# But because the computation aren't exactly the same between the 2 RoIAlign procedures, some
# rounding error may lead to a difference of 2 in the output.
# rounding error may lead to a difference of 2 in the output.
...
@@ -360,21 +340,23 @@ class RoIAlignTester(RoIOpTester, unittest.TestCase):
...
@@ -360,21 +340,23 @@ class RoIAlignTester(RoIOpTester, unittest.TestCase):
# - any difference between qy and quantized_float_y is == scale
# - any difference between qy and quantized_float_y is == scale
diff_idx
=
torch
.
where
(
qy
!=
quantized_float_y
)
diff_idx
=
torch
.
where
(
qy
!=
quantized_float_y
)
num_diff
=
diff_idx
[
0
].
numel
()
num_diff
=
diff_idx
[
0
].
numel
()
self
.
assert
True
(
num_diff
/
qy
.
numel
()
<
.
05
)
assert
num_diff
/
qy
.
numel
()
<
.
05
abs_diff
=
torch
.
abs
(
qy
[
diff_idx
].
dequantize
()
-
quantized_float_y
[
diff_idx
].
dequantize
())
abs_diff
=
torch
.
abs
(
qy
[
diff_idx
].
dequantize
()
-
quantized_float_y
[
diff_idx
].
dequantize
())
t_scale
=
torch
.
full_like
(
abs_diff
,
fill_value
=
scale
)
t_scale
=
torch
.
full_like
(
abs_diff
,
fill_value
=
scale
)
torch
.
testing
.
assert_close
(
abs_diff
,
t_scale
,
rtol
=
1e-5
,
atol
=
1e-5
)
torch
.
testing
.
assert_close
(
abs_diff
,
t_scale
,
rtol
=
1e-5
,
atol
=
1e-5
)
def
test_qroi_align_multiple_images
(
self
):
dtype
=
torch
.
float
x
=
torch
.
randint
(
50
,
100
,
size
=
(
2
,
3
,
10
,
10
)).
to
(
dtype
)
x
=
torch
.
randint
(
50
,
100
,
size
=
(
2
,
3
,
10
,
10
)).
to
(
dtype
)
qx
=
torch
.
quantize_per_tensor
(
x
,
scale
=
1
,
zero_point
=
0
,
dtype
=
torch
.
qint8
)
qx
=
torch
.
quantize_per_tensor
(
x
,
scale
=
1
,
zero_point
=
0
,
dtype
=
torch
.
qint8
)
rois
=
make_rois
(
10
)
rois
=
self
.
_
make_rois
(
img_size
=
10
,
num_imgs
=
2
,
dtype
=
dtype
,
num_rois
=
10
)
qrois
=
torch
.
quantize_per_tensor
(
rois
,
scale
=
1
,
zero_point
=
0
,
dtype
=
torch
.
qint8
)
qrois
=
torch
.
quantize_per_tensor
(
rois
,
scale
=
1
,
zero_point
=
0
,
dtype
=
torch
.
qint8
)
with
self
.
assertRaisesRegex
(
RuntimeError
,
"Only one image per batch is allowed"
):
with
pytest
.
raises
(
RuntimeError
,
match
=
"Only one image per batch is allowed"
):
ops
.
roi_align
(
qx
,
qrois
,
output_size
=
pool_size
)
ops
.
roi_align
(
qx
,
qrois
,
output_size
=
5
)
class
PSRoIAlign
Tester
(
RoIOpTester
,
unittest
.
TestCase
):
class
Test
PSRoIAlign
(
RoIOpTester
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
):
def
fn
(
self
,
x
,
rois
,
pool_h
,
pool_w
,
spatial_scale
=
1
,
sampling_ratio
=-
1
,
**
kwargs
):
return
ops
.
PSRoIAlign
((
pool_h
,
pool_w
),
spatial_scale
=
spatial_scale
,
return
ops
.
PSRoIAlign
((
pool_h
,
pool_w
),
spatial_scale
=
spatial_scale
,
sampling_ratio
=
sampling_ratio
)(
x
,
rois
)
sampling_ratio
=
sampling_ratio
)(
x
,
rois
)
...
@@ -388,7 +370,7 @@ class PSRoIAlignTester(RoIOpTester, unittest.TestCase):
...
@@ -388,7 +370,7 @@ class PSRoIAlignTester(RoIOpTester, unittest.TestCase):
if
device
is
None
:
if
device
is
None
:
device
=
torch
.
device
(
"cpu"
)
device
=
torch
.
device
(
"cpu"
)
n_input_channels
=
in_data
.
size
(
1
)
n_input_channels
=
in_data
.
size
(
1
)
self
.
assert
Equal
(
n_input_channels
%
(
pool_h
*
pool_w
)
,
0
,
"input channels must be divisible by ph * pw"
)
assert
n_input_channels
%
(
pool_h
*
pool_w
)
==
0
,
"input channels must be divisible by ph * pw"
n_output_channels
=
int
(
n_input_channels
/
(
pool_h
*
pool_w
))
n_output_channels
=
int
(
n_input_channels
/
(
pool_h
*
pool_w
))
out_data
=
torch
.
zeros
(
rois
.
size
(
0
),
n_output_channels
,
pool_h
,
pool_w
,
dtype
=
dtype
,
device
=
device
)
out_data
=
torch
.
zeros
(
rois
.
size
(
0
),
n_output_channels
,
pool_h
,
pool_w
,
dtype
=
dtype
,
device
=
device
)
...
@@ -421,11 +403,13 @@ class PSRoIAlignTester(RoIOpTester, unittest.TestCase):
...
@@ -421,11 +403,13 @@ class PSRoIAlignTester(RoIOpTester, unittest.TestCase):
out_data
[
r
,
c_out
,
i
,
j
]
=
val
out_data
[
r
,
c_out
,
i
,
j
]
=
val
return
out_data
return
out_data
def
_test_boxes_shape
(
self
):
@
cpu_only
def
test_boxes_shape
(
self
):
self
.
_helper_boxes_shape
(
ops
.
ps_roi_align
)
self
.
_helper_boxes_shape
(
ops
.
ps_roi_align
)
class
MultiScaleRoIAlignTester
(
unittest
.
TestCase
):
@
cpu_only
class
TestMultiScaleRoIAlign
:
def
test_msroialign_repr
(
self
):
def
test_msroialign_repr
(
self
):
fmap_names
=
[
'0'
]
fmap_names
=
[
'0'
]
output_size
=
(
7
,
7
)
output_size
=
(
7
,
7
)
...
@@ -436,7 +420,7 @@ class MultiScaleRoIAlignTester(unittest.TestCase):
...
@@ -436,7 +420,7 @@ class MultiScaleRoIAlignTester(unittest.TestCase):
# Check integrity of object __repr__ attribute
# Check integrity of object __repr__ attribute
expected_string
=
(
f
"MultiScaleRoIAlign(featmap_names=
{
fmap_names
}
, output_size=
{
output_size
}
, "
expected_string
=
(
f
"MultiScaleRoIAlign(featmap_names=
{
fmap_names
}
, output_size=
{
output_size
}
, "
f
"sampling_ratio=
{
sampling_ratio
}
)"
)
f
"sampling_ratio=
{
sampling_ratio
}
)"
)
self
.
assert
Equal
(
t
.
__repr__
(),
expected_string
)
assert
repr
(
t
)
==
expected_string
class
TestNMS
:
class
TestNMS
:
...
@@ -583,7 +567,9 @@ class TestNMS:
...
@@ -583,7 +567,9 @@ class TestNMS:
torch
.
testing
.
assert_close
(
empty
,
ops
.
batched_nms
(
empty
,
None
,
None
,
None
))
torch
.
testing
.
assert_close
(
empty
,
ops
.
batched_nms
(
empty
,
None
,
None
,
None
))
class
DeformConvTester
(
OpTester
,
unittest
.
TestCase
):
class
TestDeformConv
:
dtype
=
torch
.
float64
def
expected_fn
(
self
,
x
,
weight
,
offset
,
mask
,
bias
,
stride
=
1
,
padding
=
0
,
dilation
=
1
):
def
expected_fn
(
self
,
x
,
weight
,
offset
,
mask
,
bias
,
stride
=
1
,
padding
=
0
,
dilation
=
1
):
stride_h
,
stride_w
=
_pair
(
stride
)
stride_h
,
stride_w
=
_pair
(
stride
)
pad_h
,
pad_w
=
_pair
(
padding
)
pad_h
,
pad_w
=
_pair
(
padding
)
...
@@ -671,12 +657,11 @@ class DeformConvTester(OpTester, unittest.TestCase):
...
@@ -671,12 +657,11 @@ class DeformConvTester(OpTester, unittest.TestCase):
return
x
,
weight
,
offset
,
mask
,
bias
,
stride
,
pad
,
dilation
return
x
,
weight
,
offset
,
mask
,
bias
,
stride
,
pad
,
dilation
def
_test_forward
(
self
,
device
,
contiguous
,
dtype
=
None
):
@
pytest
.
mark
.
parametrize
(
'device'
,
cpu_and_gpu
())
dtype
=
self
.
dtype
if
dtype
is
None
else
dtype
@
pytest
.
mark
.
parametrize
(
'contiguous'
,
(
True
,
False
))
for
batch_sz
in
[
0
,
33
]:
@
pytest
.
mark
.
parametrize
(
'batch_sz'
,
(
0
,
33
))
self
.
_test_forward_with_batchsize
(
device
,
contiguous
,
batch_sz
,
dtype
)
def
test_forward
(
self
,
device
,
contiguous
,
batch_sz
,
dtype
=
None
):
dtype
=
dtype
or
self
.
dtype
def
_test_forward_with_batchsize
(
self
,
device
,
contiguous
,
batch_sz
,
dtype
):
x
,
_
,
offset
,
mask
,
_
,
stride
,
padding
,
dilation
=
self
.
get_fn_args
(
device
,
contiguous
,
batch_sz
,
dtype
)
x
,
_
,
offset
,
mask
,
_
,
stride
,
padding
,
dilation
=
self
.
get_fn_args
(
device
,
contiguous
,
batch_sz
,
dtype
)
in_channels
=
6
in_channels
=
6
out_channels
=
2
out_channels
=
2
...
@@ -704,20 +689,28 @@ class DeformConvTester(OpTester, unittest.TestCase):
...
@@ -704,20 +689,28 @@ class DeformConvTester(OpTester, unittest.TestCase):
res
.
to
(
expected
),
expected
,
rtol
=
tol
,
atol
=
tol
,
msg
=
'
\n
res:
\n
{}
\n
expected:
\n
{}'
.
format
(
res
,
expected
)
res
.
to
(
expected
),
expected
,
rtol
=
tol
,
atol
=
tol
,
msg
=
'
\n
res:
\n
{}
\n
expected:
\n
{}'
.
format
(
res
,
expected
)
)
)
# test for wrong sizes
@
cpu_only
with
self
.
assertRaises
(
RuntimeError
):
def
test_wrong_sizes
(
self
):
in_channels
=
6
out_channels
=
2
kernel_size
=
(
3
,
2
)
groups
=
2
x
,
_
,
offset
,
mask
,
_
,
stride
,
padding
,
dilation
=
self
.
get_fn_args
(
'cpu'
,
contiguous
=
True
,
batch_sz
=
10
,
dtype
=
self
.
dtype
)
layer
=
ops
.
DeformConv2d
(
in_channels
,
out_channels
,
kernel_size
,
stride
=
stride
,
padding
=
padding
,
dilation
=
dilation
,
groups
=
groups
)
with
pytest
.
raises
(
RuntimeError
,
match
=
"the shape of the offset"
):
wrong_offset
=
torch
.
rand_like
(
offset
[:,
:
2
])
wrong_offset
=
torch
.
rand_like
(
offset
[:,
:
2
])
res
=
layer
(
x
,
wrong_offset
)
layer
(
x
,
wrong_offset
)
with
self
.
assertR
aises
(
RuntimeError
):
with
pytest
.
r
aises
(
RuntimeError
,
match
=
r
'mask.shape\[1\] is not valid'
):
wrong_mask
=
torch
.
rand_like
(
mask
[:,
:
2
])
wrong_mask
=
torch
.
rand_like
(
mask
[:,
:
2
])
res
=
layer
(
x
,
offset
,
wrong_mask
)
layer
(
x
,
offset
,
wrong_mask
)
def
_test_backward
(
self
,
device
,
contiguous
):
for
batch_sz
in
[
0
,
33
]:
self
.
_test_backward_with_batchsize
(
device
,
contiguous
,
batch_sz
)
def
_test_backward_with_batchsize
(
self
,
device
,
contiguous
,
batch_sz
):
@
pytest
.
mark
.
parametrize
(
'device'
,
cpu_and_gpu
())
@
pytest
.
mark
.
parametrize
(
'contiguous'
,
(
True
,
False
))
@
pytest
.
mark
.
parametrize
(
'batch_sz'
,
(
0
,
33
))
def
test_backward
(
self
,
device
,
contiguous
,
batch_sz
):
x
,
weight
,
offset
,
mask
,
bias
,
stride
,
padding
,
dilation
=
self
.
get_fn_args
(
device
,
contiguous
,
x
,
weight
,
offset
,
mask
,
bias
,
stride
,
padding
,
dilation
=
self
.
get_fn_args
(
device
,
contiguous
,
batch_sz
,
self
.
dtype
)
batch_sz
,
self
.
dtype
)
...
@@ -751,11 +744,12 @@ class DeformConvTester(OpTester, unittest.TestCase):
...
@@ -751,11 +744,12 @@ class DeformConvTester(OpTester, unittest.TestCase):
gradcheck
(
lambda
z
,
off
,
wei
,
bi
:
script_func_no_mask
(
z
,
off
,
wei
,
bi
,
stride
,
padding
,
dilation
),
gradcheck
(
lambda
z
,
off
,
wei
,
bi
:
script_func_no_mask
(
z
,
off
,
wei
,
bi
,
stride
,
padding
,
dilation
),
(
x
,
offset
,
weight
,
bias
),
nondet_tol
=
1e-5
,
fast_mode
=
True
)
(
x
,
offset
,
weight
,
bias
),
nondet_tol
=
1e-5
,
fast_mode
=
True
)
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
"CUDA unavailable"
)
@
needs_cuda
def
test_compare_cpu_cuda_grads
(
self
):
@
pytest
.
mark
.
parametrize
(
'contiguous'
,
(
True
,
False
))
def
test_compare_cpu_cuda_grads
(
self
,
contiguous
):
# Test from https://github.com/pytorch/vision/issues/2598
# Test from https://github.com/pytorch/vision/issues/2598
# Run on CUDA only
# Run on CUDA only
for
contiguous
in
[
False
,
True
]:
# compare grads computed on CUDA with grads computed on CPU
# compare grads computed on CUDA with grads computed on CPU
true_cpu_grads
=
None
true_cpu_grads
=
None
...
@@ -778,20 +772,22 @@ class DeformConvTester(OpTester, unittest.TestCase):
...
@@ -778,20 +772,22 @@ class DeformConvTester(OpTester, unittest.TestCase):
out
.
mean
().
backward
()
out
.
mean
().
backward
()
if
true_cpu_grads
is
None
:
if
true_cpu_grads
is
None
:
true_cpu_grads
=
init_weight
.
grad
true_cpu_grads
=
init_weight
.
grad
self
.
assert
True
(
true_cpu_grads
is
not
None
)
assert
true_cpu_grads
is
not
None
else
:
else
:
self
.
assert
True
(
init_weight
.
grad
is
not
None
)
assert
init_weight
.
grad
is
not
None
res_grads
=
init_weight
.
grad
.
to
(
"cpu"
)
res_grads
=
init_weight
.
grad
.
to
(
"cpu"
)
torch
.
testing
.
assert_close
(
true_cpu_grads
,
res_grads
)
torch
.
testing
.
assert_close
(
true_cpu_grads
,
res_grads
)
@
unittest
.
skipIf
(
not
torch
.
cuda
.
is_available
(),
"CUDA unavailable"
)
@
needs_cuda
def
test_autocast
(
self
):
@
pytest
.
mark
.
parametrize
(
'batch_sz'
,
(
0
,
33
))
for
dtype
in
(
torch
.
float
,
torch
.
half
):
@
pytest
.
mark
.
parametrize
(
'dtype'
,
(
torch
.
float
,
torch
.
half
))
def
test_autocast
(
self
,
batch_sz
,
dtype
):
with
torch
.
cuda
.
amp
.
autocast
():
with
torch
.
cuda
.
amp
.
autocast
():
self
.
_
test_forward
(
torch
.
device
(
"cuda"
),
False
,
dtype
=
dtype
)
self
.
test_forward
(
torch
.
device
(
"cuda"
),
contiguous
=
False
,
batch_sz
=
batch_sz
,
dtype
=
dtype
)
class
FrozenBNTester
(
unittest
.
TestCase
):
@
cpu_only
class
TestFrozenBNT
:
def
test_frozenbatchnorm2d_repr
(
self
):
def
test_frozenbatchnorm2d_repr
(
self
):
num_features
=
32
num_features
=
32
eps
=
1e-5
eps
=
1e-5
...
@@ -799,7 +795,7 @@ class FrozenBNTester(unittest.TestCase):
...
@@ -799,7 +795,7 @@ class FrozenBNTester(unittest.TestCase):
# Check integrity of object __repr__ attribute
# Check integrity of object __repr__ attribute
expected_string
=
f
"FrozenBatchNorm2d(
{
num_features
}
, eps=
{
eps
}
)"
expected_string
=
f
"FrozenBatchNorm2d(
{
num_features
}
, eps=
{
eps
}
)"
self
.
assert
Equal
(
t
.
__repr__
(),
expected_string
)
assert
repr
(
t
)
==
expected_string
def
test_frozenbatchnorm2d_eps
(
self
):
def
test_frozenbatchnorm2d_eps
(
self
):
sample_size
=
(
4
,
32
,
28
,
28
)
sample_size
=
(
4
,
32
,
28
,
28
)
...
@@ -828,11 +824,12 @@ class FrozenBNTester(unittest.TestCase):
...
@@ -828,11 +824,12 @@ class FrozenBNTester(unittest.TestCase):
def
test_frozenbatchnorm2d_n_arg
(
self
):
def
test_frozenbatchnorm2d_n_arg
(
self
):
"""Ensure a warning is thrown when passing `n` kwarg
"""Ensure a warning is thrown when passing `n` kwarg
(remove this when support of `n` is dropped)"""
(remove this when support of `n` is dropped)"""
self
.
assertWarns
(
DeprecationWarning
,
ops
.
misc
.
FrozenBatchNorm2d
,
32
,
eps
=
1e-5
,
n
=
32
)
with
pytest
.
warns
(
DeprecationWarning
):
ops
.
misc
.
FrozenBatchNorm2d
(
32
,
eps
=
1e-5
,
n
=
32
)
class
BoxConversionTester
(
unittest
.
TestCase
):
@
cpu_only
@
staticmethod
class
TestBoxConversion
:
def
_get_box_sequences
():
def
_get_box_sequences
():
# Define here the argument type of `boxes` supported by region pooling operations
# Define here the argument type of `boxes` supported by region pooling operations
box_tensor
=
torch
.
tensor
([[
0
,
0
,
0
,
100
,
100
],
[
1
,
0
,
0
,
100
,
100
]],
dtype
=
torch
.
float
)
box_tensor
=
torch
.
tensor
([[
0
,
0
,
0
,
100
,
100
],
[
1
,
0
,
0
,
100
,
100
]],
dtype
=
torch
.
float
)
...
@@ -841,22 +838,23 @@ class BoxConversionTester(unittest.TestCase):
...
@@ -841,22 +838,23 @@ class BoxConversionTester(unittest.TestCase):
box_tuple
=
tuple
(
box_list
)
box_tuple
=
tuple
(
box_list
)
return
box_tensor
,
box_list
,
box_tuple
return
box_tensor
,
box_list
,
box_tuple
def
test_check_roi_boxes_shape
(
self
):
@
pytest
.
mark
.
parametrize
(
'box_sequence'
,
_get_box_sequences
())
def
test_check_roi_boxes_shape
(
self
,
box_sequence
):
# Ensure common sequences of tensors are supported
# Ensure common sequences of tensors are supported
for
box_sequence
in
self
.
_get_box_sequences
():
ops
.
_utils
.
check_roi_boxes_shape
(
box_sequence
)
self
.
assertIsNone
(
ops
.
_utils
.
check_roi_boxes_shape
(
box_sequence
))
def
test_convert_boxes_to_roi_format
(
self
):
@
pytest
.
mark
.
parametrize
(
'box_sequence'
,
_get_box_sequences
())
def
test_convert_boxes_to_roi_format
(
self
,
box_sequence
):
# Ensure common sequences of tensors yield the same result
# Ensure common sequences of tensors yield the same result
ref_tensor
=
None
ref_tensor
=
None
for
box_sequence
in
self
.
_get_box_sequences
():
if
ref_tensor
is
None
:
if
ref_tensor
is
None
:
ref_tensor
=
box_sequence
ref_tensor
=
box_sequence
else
:
else
:
self
.
assertTrue
(
torch
.
equal
(
ref_tensor
,
ops
.
_utils
.
convert_boxes_to_roi_format
(
box_sequence
))
)
assert_
equal
(
ref_tensor
,
ops
.
_utils
.
convert_boxes_to_roi_format
(
box_sequence
))
class
BoxTester
(
unittest
.
TestCase
):
@
cpu_only
class
TestBox
:
def
test_bbox_same
(
self
):
def
test_bbox_same
(
self
):
box_tensor
=
torch
.
tensor
([[
0
,
0
,
100
,
100
],
[
0
,
0
,
0
,
0
],
box_tensor
=
torch
.
tensor
([[
0
,
0
,
100
,
100
],
[
0
,
0
,
0
,
0
],
[
10
,
15
,
30
,
35
],
[
23
,
35
,
93
,
95
]],
dtype
=
torch
.
float
)
[
10
,
15
,
30
,
35
],
[
23
,
35
,
93
,
95
]],
dtype
=
torch
.
float
)
...
@@ -917,15 +915,14 @@ class BoxTester(unittest.TestCase):
...
@@ -917,15 +915,14 @@ class BoxTester(unittest.TestCase):
box_xywh
=
ops
.
box_convert
(
box_cxcywh
,
in_fmt
=
"cxcywh"
,
out_fmt
=
"xywh"
)
box_xywh
=
ops
.
box_convert
(
box_cxcywh
,
in_fmt
=
"cxcywh"
,
out_fmt
=
"xywh"
)
assert_equal
(
box_xywh
,
box_tensor
)
assert_equal
(
box_xywh
,
box_tensor
)
def
test_bbox_invalid
(
self
):
@
pytest
.
mark
.
parametrize
(
'inv_infmt'
,
[
"xwyh"
,
"cxwyh"
])
@
pytest
.
mark
.
parametrize
(
'inv_outfmt'
,
[
"xwcx"
,
"xhwcy"
])
def
test_bbox_invalid
(
self
,
inv_infmt
,
inv_outfmt
):
box_tensor
=
torch
.
tensor
([[
0
,
0
,
100
,
100
],
[
0
,
0
,
0
,
0
],
box_tensor
=
torch
.
tensor
([[
0
,
0
,
100
,
100
],
[
0
,
0
,
0
,
0
],
[
10
,
15
,
20
,
20
],
[
23
,
35
,
70
,
60
]],
dtype
=
torch
.
float
)
[
10
,
15
,
20
,
20
],
[
23
,
35
,
70
,
60
]],
dtype
=
torch
.
float
)
invalid_infmts
=
[
"xwyh"
,
"cxwyh"
]
with
pytest
.
raises
(
ValueError
):
invalid_outfmts
=
[
"xwcx"
,
"xhwcy"
]
ops
.
box_convert
(
box_tensor
,
inv_infmt
,
inv_outfmt
)
for
inv_infmt
in
invalid_infmts
:
for
inv_outfmt
in
invalid_outfmts
:
self
.
assertRaises
(
ValueError
,
ops
.
box_convert
,
box_tensor
,
inv_infmt
,
inv_outfmt
)
def
test_bbox_convert_jit
(
self
):
def
test_bbox_convert_jit
(
self
):
box_tensor
=
torch
.
tensor
([[
0
,
0
,
100
,
100
],
[
0
,
0
,
0
,
0
],
box_tensor
=
torch
.
tensor
([[
0
,
0
,
100
,
100
],
[
0
,
0
,
0
,
0
],
...
@@ -943,7 +940,8 @@ class BoxTester(unittest.TestCase):
...
@@ -943,7 +940,8 @@ class BoxTester(unittest.TestCase):
torch
.
testing
.
assert_close
(
scripted_cxcywh
,
box_cxcywh
,
rtol
=
0.0
,
atol
=
TOLERANCE
)
torch
.
testing
.
assert_close
(
scripted_cxcywh
,
box_cxcywh
,
rtol
=
0.0
,
atol
=
TOLERANCE
)
class
BoxAreaTester
(
unittest
.
TestCase
):
@
cpu_only
class
TestBoxArea
:
def
test_box_area
(
self
):
def
test_box_area
(
self
):
def
area_check
(
box
,
expected
,
tolerance
=
1e-4
):
def
area_check
(
box
,
expected
,
tolerance
=
1e-4
):
out
=
ops
.
box_area
(
box
)
out
=
ops
.
box_area
(
box
)
...
@@ -971,7 +969,8 @@ class BoxAreaTester(unittest.TestCase):
...
@@ -971,7 +969,8 @@ class BoxAreaTester(unittest.TestCase):
area_check
(
box_tensor
,
expected
)
area_check
(
box_tensor
,
expected
)
class
BoxIouTester
(
unittest
.
TestCase
):
@
cpu_only
class
TestBoxIou
:
def
test_iou
(
self
):
def
test_iou
(
self
):
def
iou_check
(
box
,
expected
,
tolerance
=
1e-4
):
def
iou_check
(
box
,
expected
,
tolerance
=
1e-4
):
out
=
ops
.
box_iou
(
box
,
box
)
out
=
ops
.
box_iou
(
box
,
box
)
...
@@ -992,7 +991,8 @@ class BoxIouTester(unittest.TestCase):
...
@@ -992,7 +991,8 @@ class BoxIouTester(unittest.TestCase):
iou_check
(
box_tensor
,
expected
,
tolerance
=
0.002
if
dtype
==
torch
.
float16
else
1e-4
)
iou_check
(
box_tensor
,
expected
,
tolerance
=
0.002
if
dtype
==
torch
.
float16
else
1e-4
)
class
GenBoxIouTester
(
unittest
.
TestCase
):
@
cpu_only
class
TestGenBoxIou
:
def
test_gen_iou
(
self
):
def
test_gen_iou
(
self
):
def
gen_iou_check
(
box
,
expected
,
tolerance
=
1e-4
):
def
gen_iou_check
(
box
,
expected
,
tolerance
=
1e-4
):
out
=
ops
.
generalized_box_iou
(
box
,
box
)
out
=
ops
.
generalized_box_iou
(
box
,
box
)
...
@@ -1014,4 +1014,4 @@ class GenBoxIouTester(unittest.TestCase):
...
@@ -1014,4 +1014,4 @@ class GenBoxIouTester(unittest.TestCase):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unit
test
.
main
()
py
test
.
main
(
[
__file__
]
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment