Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
infinicore
Commits
fe164657
Commit
fe164657
authored
Nov 20, 2025
by
Your Name
Committed by
MaYuhang
Nov 22, 2025
Browse files
Add max_unpool 1D/2D/3D and stack operator test cases
parent
0298925d
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
592 additions
and
0 deletions
+592
-0
test/infinicore/ops/max_unpool1d.py
test/infinicore/ops/max_unpool1d.py
+148
-0
test/infinicore/ops/max_unpool2d.py
test/infinicore/ops/max_unpool2d.py
+162
-0
test/infinicore/ops/max_unpool3d.py
test/infinicore/ops/max_unpool3d.py
+159
-0
test/infinicore/ops/stack.py
test/infinicore/ops/stack.py
+123
-0
No files found.
test/infinicore/ops/max_unpool1d.py
0 → 100644
View file @
fe164657
import
sys
import
os
sys
.
path
.
insert
(
0
,
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
".."
))
import
torch
import
infinicore
from
framework.base
import
BaseOperatorTest
,
TensorSpec
,
TestCase
from
framework.runner
import
GenericTestRunner
from
framework.tensor
import
TensorInitializer
# Test cases format:
# (input_shape, kernel_size, stride_or_None, padding_or_None,
# use_output_size, output_length_or_None)
#
# input_shape : (N, C, L_out)
# kernel_size : int
# stride_or_None : int or None (None -> default stride == kernel_size)
# padding_or_None : int or None (None -> default padding == 0)
# use_output_size : bool
# output_length : L_in when use_output_size is True, otherwise None
#
# torch.nn.functional.max_unpool1d(
# input, indices, kernel_size, stride=None, padding=0, output_size=None
# )
_TEST_CASES_DATA
=
[
# ========== Basic cases ==========
# small sizes and various stride/padding combinations
((
1
,
1
,
4
),
2
,
2
,
0
,
False
,
None
),
((
2
,
3
,
8
),
2
,
2
,
0
,
False
,
None
),
((
2
,
3
,
8
),
3
,
2
,
1
,
False
,
None
),
((
1
,
4
,
6
),
2
,
None
,
0
,
False
,
None
),
# default stride
((
4
,
8
,
16
),
2
,
2
,
0
,
True
,
32
),
# L_in = (16-1)*2 - 0 + 2 = 32
((
4
,
8
,
16
),
3
,
2
,
1
,
True
,
31
),
# L_in = (16-1)*2 - 2 + 3 = 31
((
2
,
1
,
10
),
3
,
1
,
1
,
True
,
10
),
# L_in = (10-1)*1 - 2 + 3 = 10
((
2
,
1
,
5
),
2
,
None
,
1
,
True
,
8
),
# L_in = (5-1)*2 - 2 + 2 = 8
# ========== Large-scale performance test cases ==========
# medium to large sizes for performance and stability
((
8
,
64
,
128
),
2
,
2
,
0
,
False
,
None
),
((
8
,
64
,
256
),
3
,
2
,
1
,
False
,
None
),
((
4
,
128
,
512
),
2
,
2
,
0
,
False
,
None
),
((
4
,
128
,
512
),
3
,
2
,
1
,
False
,
None
),
((
16
,
32
,
256
),
4
,
4
,
0
,
False
,
None
),
((
16
,
32
,
256
),
3
,
1
,
1
,
False
,
None
),
((
32
,
16
,
1024
),
2
,
2
,
0
,
True
,
2048
),
# L_in = (1024-1)*2 + 2 = 2048
((
32
,
16
,
512
),
3
,
2
,
1
,
True
,
1023
),
# L_in = (512-1)*2 - 2 + 3 = 1023
((
2
,
256
,
2048
),
2
,
2
,
0
,
True
,
4096
),
# L_in = (2048-1)*2 + 2 = 4096
((
2
,
256
,
2048
),
3
,
2
,
1
,
True
,
4095
),
# L_in = (2048-1)*2 - 2 + 3 = 4095
((
1
,
64
,
16384
),
2
,
2
,
0
,
False
,
None
),
((
1
,
64
,
8192
),
3
,
2
,
1
,
False
,
None
),
# ========== Edge cases ==========
# extreme small / boundary sizes
((
1
,
1
,
1
),
1
,
1
,
0
,
False
,
None
),
((
1
,
4
,
2
),
2
,
2
,
0
,
True
,
4
),
# L_in = (2-1)*2 + 2 = 4
((
2
,
1
,
64
),
3
,
2
,
1
,
False
,
None
),
((
1
,
1
,
3
),
2
,
2
,
1
,
True
,
4
),
# L_in = (3-1)*2 - 2 + 2 = 4
]
_TOLERANCE_MAP
=
{
infinicore
.
float16
:
{
"atol"
:
1e-3
,
"rtol"
:
1e-2
},
infinicore
.
float32
:
{
"atol"
:
1e-5
,
"rtol"
:
1e-4
},
infinicore
.
bfloat16
:
{
"atol"
:
1e-2
,
"rtol"
:
5e-2
},
}
_TENSOR_DTYPES
=
[
infinicore
.
float16
,
infinicore
.
bfloat16
,
infinicore
.
float32
]
_INDEX_DTYPE
=
infinicore
.
int64
def
parse_test_cases
():
"""
Parse test case data and return list of TestCase objects.
max_unpool1d takes two inputs:
- pooled input tensor
- indices tensor (int64), values in [0, kernel_size)
"""
test_cases
=
[]
for
(
input_shape
,
kernel_size
,
stride
,
padding
,
use_output_size
,
output_length
)
in
_TEST_CASES_DATA
:
for
dtype
in
_TENSOR_DTYPES
:
tol
=
_TOLERANCE_MAP
.
get
(
dtype
,
{
"atol"
:
1e-5
,
"rtol"
:
1e-4
})
# data tensor
input_spec
=
TensorSpec
.
from_tensor
(
input_shape
,
None
,
dtype
)
# indices tensor: same shape, int64, random integers in [0, kernel_size)
indices_spec
=
TensorSpec
.
from_tensor
(
input_shape
,
None
,
_INDEX_DTYPE
,
init_mode
=
TensorInitializer
.
RANDINT
,
low
=
0
,
high
=
int
(
kernel_size
),
)
kwargs
=
{
"kernel_size"
:
kernel_size
}
if
stride
is
not
None
:
kwargs
[
"stride"
]
=
stride
if
padding
is
not
None
:
kwargs
[
"padding"
]
=
padding
if
use_output_size
and
output_length
is
not
None
:
n
,
c
,
_
=
input_shape
kwargs
[
"output_size"
]
=
(
n
,
c
,
output_length
)
test_cases
.
append
(
TestCase
(
inputs
=
[
input_spec
,
indices_spec
],
kwargs
=
kwargs
,
output_spec
=
None
,
comparison_target
=
None
,
tolerance
=
tol
,
description
=
"MaxUnpool1d - OUT_OF_PLACE"
,
)
)
return
test_cases
class
OpTest
(
BaseOperatorTest
):
"""MaxUnpool1d operator test with simplified implementation"""
def
__init__
(
self
):
super
().
__init__
(
"MaxUnpool1d"
)
def
get_test_cases
(
self
):
return
parse_test_cases
()
def
torch_operator
(
self
,
*
args
,
**
kwargs
):
return
torch
.
nn
.
functional
.
max_unpool1d
(
*
args
,
**
kwargs
)
# Uncomment if InfiniCore implementation is available
# def infinicore_operator(self, *args, **kwargs):
# return infinicore.nn.functional.max_unpool1d(*args, **kwargs)
def
main
():
"""Main entry point"""
runner
=
GenericTestRunner
(
OpTest
)
runner
.
run_and_exit
()
if
__name__
==
"__main__"
:
main
()
test/infinicore/ops/max_unpool2d.py
0 → 100644
View file @
fe164657
import
sys
import
os
sys
.
path
.
insert
(
0
,
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
".."
))
import
torch
import
infinicore
from
framework.base
import
BaseOperatorTest
,
TensorSpec
,
TestCase
from
framework.runner
import
GenericTestRunner
from
framework.tensor
import
TensorInitializer
# Test cases format:
# (input_shape, kernel_size, stride_or_None, padding_or_None,
# use_output_size, output_hw_or_None)
#
# input_shape : (N, C, H_out, W_out), pooled feature map
# kernel_size : int or (kh, kw)
# stride_or_None : int / (sh, sw) / None (None -> stride == kernel_size)
# padding_or_None : int / (ph, pw) / None (None -> padding == 0)
# use_output_size : bool, whether to pass output_size explicitly
# output_hw_or_None: (H_in, W_in) if use_output_size is True
#
# torch.nn.functional.max_unpool2d(
# input, indices, kernel_size, stride=None, padding=0, output_size=None
# )
_TEST_CASES_DATA
=
[
# ========== Basic cases ==========
# small sizes with different stride/padding and optional output_size
((
1
,
1
,
16
,
16
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
False
,
None
),
((
2
,
3
,
16
,
16
),
(
2
,
2
),
None
,
None
,
False
,
None
),
# default stride / padding
((
2
,
3
,
8
,
8
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
False
,
None
),
((
1
,
4
,
7
,
9
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
False
,
None
),
((
4
,
8
,
14
,
14
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
True
,
(
27
,
27
)),
# H,W: (14-1)*2 - 2 + 3 = 27
((
4
,
8
,
14
,
14
),
(
2
,
2
),
None
,
(
1
,
1
),
True
,
(
26
,
26
)),
# H,W: (14-1)*2 - 2 + 2 = 26
((
2
,
16
,
10
,
12
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
True
,
(
20
,
24
)),
# H: (10-1)*2+2, W: (12-1)*2+2
((
2
,
16
,
10
,
12
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
True
,
(
19
,
23
)),
# H: (10-1)*2-2+3, W: (12-1)*2-2+3
# ========== Large-scale performance test cases ==========
# typical CNN activation map sizes and larger inputs
((
32
,
64
,
56
,
56
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
False
,
None
),
((
32
,
64
,
56
,
56
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
False
,
None
),
((
64
,
128
,
28
,
28
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
False
,
None
),
((
64
,
128
,
28
,
28
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
False
,
None
),
((
128
,
256
,
14
,
14
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
False
,
None
),
((
128
,
256
,
14
,
14
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
False
,
None
),
((
256
,
512
,
7
,
7
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
False
,
None
),
((
256
,
512
,
7
,
7
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
False
,
None
),
# large inputs with explicit output_size
((
16
,
32
,
64
,
64
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
True
,
(
128
,
128
)),
# H,W: (64-1)*2 - 0 + 2 = 128
((
16
,
32
,
64
,
64
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
True
,
(
127
,
127
)),
# H,W: (64-1)*2 - 2 + 3 = 127
((
8
,
64
,
32
,
48
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
True
,
(
64
,
96
)),
# H: (32-1)*2+2=64, W: (48-1)*2+2=96
((
8
,
64
,
32
,
48
),
(
3
,
3
),
(
2
,
2
),
(
1
,
1
),
True
,
(
63
,
95
)),
# H: (32-1)*2-2+3=63, W: (48-1)*2-2+3=95
# ========== Edge cases ==========
((
1
,
1
,
1
,
1
),
(
1
,
1
),
(
1
,
1
),
(
0
,
0
),
False
,
None
),
((
1
,
4
,
2
,
2
),
(
2
,
2
),
(
2
,
2
),
(
0
,
0
),
True
,
(
4
,
4
)),
# H,W: (2-1)*2+2=4
((
1
,
2
,
1
,
8
),
(
2
,
2
),
(
2
,
2
),
(
0
,
1
),
False
,
None
),
((
1
,
2
,
3
,
5
),
(
2
,
2
),
(
2
,
2
),
(
1
,
0
),
True
,
(
4
,
10
)),
# H: (3-1)*2-2+2, W: (5-1)*2+2
]
_TOLERANCE_MAP
=
{
infinicore
.
float16
:
{
"atol"
:
1e-2
,
"rtol"
:
1e-2
},
infinicore
.
float32
:
{
"atol"
:
1e-5
,
"rtol"
:
1e-4
},
infinicore
.
bfloat16
:
{
"atol"
:
1e-2
,
"rtol"
:
5e-2
},
}
_TENSOR_DTYPES
=
[
infinicore
.
float16
,
infinicore
.
bfloat16
,
infinicore
.
float32
]
_INDEX_DTYPE
=
infinicore
.
int64
def
_kernel_elems_2d
(
kernel_size
):
"""Return number of elements in a 2D kernel."""
if
isinstance
(
kernel_size
,
int
):
kh
=
kw
=
kernel_size
else
:
kh
,
kw
=
kernel_size
return
int
(
kh
*
kw
)
def
parse_test_cases
():
"""
Parse test case data and return list of TestCase objects.
max_unpool2d takes two inputs:
- pooled input tensor
- indices tensor (int64), values in [0, kh * kw)
"""
test_cases
=
[]
for
(
input_shape
,
kernel_size
,
stride
,
padding
,
use_output_size
,
output_hw
)
in
_TEST_CASES_DATA
:
indices_high
=
_kernel_elems_2d
(
kernel_size
)
for
dtype
in
_TENSOR_DTYPES
:
tol
=
_TOLERANCE_MAP
.
get
(
dtype
,
{
"atol"
:
1e-2
,
"rtol"
:
1e-2
})
input_spec
=
TensorSpec
.
from_tensor
(
input_shape
,
None
,
dtype
)
indices_spec
=
TensorSpec
.
from_tensor
(
input_shape
,
None
,
_INDEX_DTYPE
,
init_mode
=
TensorInitializer
.
RANDINT
,
low
=
0
,
high
=
indices_high
,
)
kwargs
=
{
"kernel_size"
:
kernel_size
}
if
stride
is
not
None
:
kwargs
[
"stride"
]
=
stride
if
padding
is
not
None
:
kwargs
[
"padding"
]
=
padding
if
use_output_size
and
output_hw
is
not
None
:
n
,
c
,
_
,
_
=
input_shape
h_in
,
w_in
=
output_hw
kwargs
[
"output_size"
]
=
(
n
,
c
,
h_in
,
w_in
)
test_cases
.
append
(
TestCase
(
inputs
=
[
input_spec
,
indices_spec
],
kwargs
=
kwargs
,
output_spec
=
None
,
comparison_target
=
None
,
tolerance
=
tol
,
description
=
"MaxUnpool2d - OUT_OF_PLACE"
,
)
)
return
test_cases
class
OpTest
(
BaseOperatorTest
):
"""MaxUnpool2d operator test with simplified implementation"""
def
__init__
(
self
):
super
().
__init__
(
"MaxUnpool2d"
)
def
get_test_cases
(
self
):
return
parse_test_cases
()
def
torch_operator
(
self
,
*
args
,
**
kwargs
):
return
torch
.
nn
.
functional
.
max_unpool2d
(
*
args
,
**
kwargs
)
# Uncomment if InfiniCore implementation is available
# def infinicore_operator(self, *args, **kwargs):
# return infinicore.nn.functional.max_unpool2d(*args, **kwargs)
def
main
():
"""Main entry point"""
runner
=
GenericTestRunner
(
OpTest
)
runner
.
run_and_exit
()
if
__name__
==
"__main__"
:
main
()
test/infinicore/ops/max_unpool3d.py
0 → 100644
View file @
fe164657
import
sys
import
os
sys
.
path
.
insert
(
0
,
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
".."
))
import
torch
import
infinicore
from
framework.base
import
BaseOperatorTest
,
TensorSpec
,
TestCase
from
framework.runner
import
GenericTestRunner
from
framework.tensor
import
TensorInitializer
# ==============================================================================
# Operator-specific configuration for max_unpool3d
# ==============================================================================
# Test cases format:
# (input_shape, kernel_size, stride_or_None, padding_or_None,
# use_output_size, output_dhw_or_None)
#
# input_shape : (N, C, D_out, H_out, W_out), pooled feature map
# kernel_size : int or (kd, kh, kw)
# stride_or_None : int / (sd, sh, sw) / None (None -> stride == kernel_size)
# padding_or_None : int / (pd, ph, pw) / None (None -> padding == 0)
# use_output_size : bool, whether to pass output_size explicitly
# output_dhw_or_None: (D_in, H_in, W_in) if use_output_size is True
#
# torch.nn.functional.max_unpool3d(
# input, indices, kernel_size, stride=None, padding=0, output_size=None
# )
_TEST_CASES_DATA
=
[
# ========== Basic cases ==========
# small sizes with different stride/padding and optional output_size
((
1
,
1
,
4
,
4
,
4
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
2
,
3
,
2
,
4
,
4
),
(
2
,
2
,
2
),
None
,
None
,
False
,
None
),
# default stride / padding
((
2
,
3
,
4
,
4
,
4
),
(
3
,
3
,
3
),
(
2
,
2
,
2
),
(
1
,
1
,
1
),
False
,
None
),
((
2
,
3
,
4
,
4
,
4
),
(
3
,
3
,
3
),
(
2
,
2
,
2
),
(
1
,
1
,
1
),
True
,
(
7
,
7
,
7
)),
((
1
,
4
,
3
,
5
,
7
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
1
,
4
,
3
,
5
,
7
),
(
2
,
2
,
2
),
None
,
(
1
,
1
,
1
),
True
,
(
4
,
8
,
12
)),
# ========== Large-scale performance test cases ==========
# larger volumes and batches for performance and stability
((
4
,
8
,
8
,
16
,
16
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
4
,
8
,
8
,
16
,
16
),
(
3
,
3
,
3
),
(
2
,
2
,
2
),
(
1
,
1
,
1
),
False
,
None
),
((
8
,
16
,
4
,
32
,
32
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
8
,
16
,
4
,
32
,
32
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
True
,
(
8
,
64
,
64
)),
((
8
,
16
,
4
,
32
,
32
),
(
3
,
3
,
3
),
(
2
,
2
,
2
),
(
1
,
1
,
1
),
True
,
(
7
,
63
,
63
)),
((
2
,
32
,
16
,
16
,
16
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
2
,
32
,
16
,
16
,
16
),
(
2
,
2
,
2
),
None
,
None
,
False
,
None
),
((
2
,
32
,
8
,
32
,
32
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
2
,
32
,
8
,
32
,
32
),
(
3
,
3
,
3
),
(
2
,
2
,
2
),
(
1
,
1
,
1
),
False
,
None
),
((
1
,
64
,
8
,
64
,
64
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
1
,
64
,
8
,
64
,
64
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
True
,
(
16
,
128
,
128
)),
((
1
,
64
,
4
,
64
,
128
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
# ========== Edge cases ==========
# very small shapes and asymmetric sizes
((
1
,
1
,
1
,
1
,
1
),
(
1
,
1
,
1
),
(
1
,
1
,
1
),
(
0
,
0
,
0
),
False
,
None
),
((
1
,
1
,
1
,
2
,
2
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
True
,
(
2
,
4
,
4
)),
((
1
,
1
,
2
,
2
,
8
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
0
,
0
,
0
),
False
,
None
),
((
1
,
1
,
2
,
2
,
8
),
(
2
,
2
,
2
),
(
2
,
2
,
2
),
(
1
,
1
,
1
),
True
,
(
2
,
2
,
14
)),
]
_TOLERANCE_MAP
=
{
infinicore
.
float16
:
{
"atol"
:
1e-2
,
"rtol"
:
1e-2
},
infinicore
.
float32
:
{
"atol"
:
1e-5
,
"rtol"
:
1e-4
},
infinicore
.
bfloat16
:
{
"atol"
:
1e-2
,
"rtol"
:
5e-2
},
}
_TENSOR_DTYPES
=
[
infinicore
.
float16
,
infinicore
.
bfloat16
,
infinicore
.
float32
]
_INDEX_DTYPE
=
infinicore
.
int64
def
_kernel_elems_3d
(
kernel_size
):
"""Return number of elements in a 3D kernel."""
if
isinstance
(
kernel_size
,
int
):
kd
=
kh
=
kw
=
kernel_size
else
:
kd
,
kh
,
kw
=
kernel_size
return
int
(
kd
*
kh
*
kw
)
def
parse_test_cases
():
"""
Parse test case data and return list of TestCase objects.
max_unpool3d takes two inputs:
- pooled input tensor
- indices tensor (int64), values in [0, kd * kh * kw)
"""
test_cases
=
[]
for
(
input_shape
,
kernel_size
,
stride
,
padding
,
use_output_size
,
output_dhw
)
in
_TEST_CASES_DATA
:
indices_high
=
_kernel_elems_3d
(
kernel_size
)
for
dtype
in
_TENSOR_DTYPES
:
tol
=
_TOLERANCE_MAP
.
get
(
dtype
,
{
"atol"
:
1e-2
,
"rtol"
:
1e-2
})
input_spec
=
TensorSpec
.
from_tensor
(
input_shape
,
None
,
dtype
)
indices_spec
=
TensorSpec
.
from_tensor
(
input_shape
,
None
,
_INDEX_DTYPE
,
init_mode
=
TensorInitializer
.
RANDINT
,
low
=
0
,
high
=
indices_high
,
)
kwargs
=
{
"kernel_size"
:
kernel_size
}
if
stride
is
not
None
:
kwargs
[
"stride"
]
=
stride
if
padding
is
not
None
:
kwargs
[
"padding"
]
=
padding
if
use_output_size
and
output_dhw
is
not
None
:
n
,
c
,
_
,
_
,
_
=
input_shape
d_in
,
h_in
,
w_in
=
output_dhw
kwargs
[
"output_size"
]
=
(
n
,
c
,
d_in
,
h_in
,
w_in
)
test_cases
.
append
(
TestCase
(
inputs
=
[
input_spec
,
indices_spec
],
kwargs
=
kwargs
,
output_spec
=
None
,
comparison_target
=
None
,
tolerance
=
tol
,
description
=
"MaxUnpool3d - OUT_OF_PLACE"
,
)
)
return
test_cases
class
OpTest
(
BaseOperatorTest
):
"""MaxUnpool3d operator test with simplified implementation"""
def
__init__
(
self
):
super
().
__init__
(
"MaxUnpool3d"
)
def
get_test_cases
(
self
):
return
parse_test_cases
()
def
torch_operator
(
self
,
*
args
,
**
kwargs
):
return
torch
.
nn
.
functional
.
max_unpool3d
(
*
args
,
**
kwargs
)
# Uncomment if InfiniCore implementation is available
# def infinicore_operator(self, *args, **kwargs):
# return infinicore.nn.functional.max_unpool3d(*args, **kwargs)
def
main
():
"""Main entry point"""
runner
=
GenericTestRunner
(
OpTest
)
runner
.
run_and_exit
()
if
__name__
==
"__main__"
:
main
()
test/infinicore/ops/stack.py
0 → 100644
View file @
fe164657
import
sys
import
os
sys
.
path
.
insert
(
0
,
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
".."
))
import
torch
import
infinicore
from
framework.base
import
BaseOperatorTest
,
TensorSpec
,
TestCase
from
framework.runner
import
GenericTestRunner
# ==============================================================================
# Operator-specific configuration for stack
# ==============================================================================
# Test cases format: (base_shape, num_tensors, dim)
#
# base_shape : shape of each input tensor
# num_tensors : number of tensors to stack
# dim : dimension at which to insert the new axis
_TEST_CASES_DATA
=
[
# ========== Basic cases ==========
((
8
,),
2
,
0
),
((
8
,),
4
,
1
),
# stack 1D tensors along a new last dim
((
2
,
3
),
3
,
0
),
((
2
,
3
),
3
,
1
),
((
2
,
3
),
3
,
2
),
((
4
,
5
,
6
),
2
,
-
1
),
((
4
,
5
,
6
),
4
,
0
),
((
3
,
4
,
5
,
6
),
2
,
2
),
# ========== Large-scale performance test cases ==========
((
1024
,),
8
,
0
),
((
2048
,),
16
,
0
),
((
256
,
256
),
4
,
0
),
((
256
,
256
),
8
,
1
),
((
64
,
128
,
128
),
4
,
0
),
((
64
,
128
,
128
),
8
,
1
),
((
32
,
64
,
64
,
64
),
4
,
0
),
((
32
,
64
,
64
,
64
),
4
,
2
),
((
16
,
32
,
64
,
128
),
8
,
1
),
((
16
,
32
,
64
,
128
),
8
,
-
1
),
((
8
,
16
,
32
,
64
),
16
,
0
),
((
8
,
16
,
32
,
64
),
16
,
3
),
# ========== Edge cases ==========
((
1
,),
2
,
0
),
# single element
((
0
,
3
),
3
,
0
),
# zero-length dimension
((
2
,
0
,
4
),
4
,
1
),
# zero in middle dimension
((
1
,
1
,
1
),
1
,
0
),
# single tensor
]
_TOLERANCE_MAP
=
{
infinicore
.
float16
:
{
"atol"
:
1e-3
,
"rtol"
:
1e-2
},
infinicore
.
float32
:
{
"atol"
:
1e-5
,
"rtol"
:
1e-4
},
infinicore
.
bfloat16
:
{
"atol"
:
1e-2
,
"rtol"
:
5e-2
},
}
_TENSOR_DTYPES
=
[
infinicore
.
float16
,
infinicore
.
bfloat16
,
infinicore
.
float32
]
def
parse_test_cases
():
"""
Parse test case data and return list of TestCase objects for stack.
"""
cases
=
[]
for
base_shape
,
num_tensors
,
dim
in
_TEST_CASES_DATA
:
for
dtype
in
_TENSOR_DTYPES
:
tol
=
_TOLERANCE_MAP
[
dtype
]
# Create multiple input specs with the same base shape and dtype
input_specs
=
[]
for
i
in
range
(
num_tensors
):
input_specs
.
append
(
TensorSpec
.
from_tensor
(
base_shape
,
None
,
dtype
,
name
=
f
"input_
{
i
}
"
,
)
)
kwargs
=
{
"dim"
:
dim
}
cases
.
append
(
TestCase
(
inputs
=
input_specs
,
kwargs
=
kwargs
,
output_spec
=
None
,
comparison_target
=
None
,
tolerance
=
tol
,
description
=
"Stack - OUT_OF_PLACE"
,
)
)
return
cases
class
OpTest
(
BaseOperatorTest
):
"""Stack operator test with simplified implementation"""
def
__init__
(
self
):
super
().
__init__
(
"Stack"
)
def
get_test_cases
(
self
):
return
parse_test_cases
()
def
torch_operator
(
self
,
*
args
,
**
kwargs
):
return
torch
.
stack
(
*
args
,
**
kwargs
)
# Uncomment if InfiniCore implementation is available
# def infinicore_operator(self, *args, **kwargs):
# return infinicore.stack(*args, **kwargs)
def
main
():
"""Main entry point"""
runner
=
GenericTestRunner
(
OpTest
)
runner
.
run_and_exit
()
if
__name__
==
"__main__"
:
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment