Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
a6fa5e4b
"doc/vscode:/vscode.git/clone" did not exist on "f6a8d6d091a87452d1a3dbeccf49c90833e3ba97"
Unverified
Commit
a6fa5e4b
authored
Oct 23, 2023
by
Chris Austen
Committed by
GitHub
Oct 23, 2023
Browse files
Merge branch 'develop' into enable_navi_32_ci
parents
b7a7cd3c
7604ecf5
Changes
247
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1542 additions
and
32 deletions
+1542
-32
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+1062
-32
test/onnx/group_norm_3d_half_test.onnx
test/onnx/group_norm_3d_half_test.onnx
+30
-0
test/onnx/group_norm_3d_test.onnx
test/onnx/group_norm_3d_test.onnx
+25
-0
test/onnx/group_norm_4d_half_test.onnx
test/onnx/group_norm_4d_half_test.onnx
+32
-0
test/onnx/group_norm_4d_test.onnx
test/onnx/group_norm_4d_test.onnx
+27
-0
test/onnx/group_norm_5d_half_test.onnx
test/onnx/group_norm_5d_half_test.onnx
+34
-0
test/onnx/group_norm_5d_test.onnx
test/onnx/group_norm_5d_test.onnx
+29
-0
test/onnx/group_norm_invalid_bias_shape_test.onnx
test/onnx/group_norm_invalid_bias_shape_test.onnx
+27
-0
test/onnx/group_norm_invalid_input_count_error_test.onnx
test/onnx/group_norm_invalid_input_count_error_test.onnx
+22
-0
test/onnx/group_norm_invalid_input_shape_error_test.onnx
test/onnx/group_norm_invalid_input_shape_error_test.onnx
+23
-0
test/onnx/group_norm_invalid_num_groups_error_test.onnx
test/onnx/group_norm_invalid_num_groups_error_test.onnx
+27
-0
test/onnx/group_norm_invalid_scale_shape_test.onnx
test/onnx/group_norm_invalid_scale_shape_test.onnx
+27
-0
test/onnx/group_norm_missing_attribute_error_test.onnx
test/onnx/group_norm_missing_attribute_error_test.onnx
+21
-0
test/onnx/group_norm_small_eps_half_test.onnx
test/onnx/group_norm_small_eps_half_test.onnx
+30
-0
test/onnx/layer_norm_2d_axis_minus_one_test.onnx
test/onnx/layer_norm_2d_axis_minus_one_test.onnx
+22
-0
test/onnx/layer_norm_2d_axis_one_test.onnx
test/onnx/layer_norm_2d_axis_one_test.onnx
+22
-0
test/onnx/layer_norm_2d_axis_zero_test.onnx
test/onnx/layer_norm_2d_axis_zero_test.onnx
+0
-0
test/onnx/layer_norm_3d_half_test.onnx
test/onnx/layer_norm_3d_half_test.onnx
+28
-0
test/onnx/layer_norm_3d_test.onnx
test/onnx/layer_norm_3d_test.onnx
+24
-0
test/onnx/layer_norm_4d_half_test.onnx
test/onnx/layer_norm_4d_half_test.onnx
+30
-0
No files found.
test/onnx/gen_onnx.py
View file @
a6fa5e4b
...
@@ -149,6 +149,21 @@ def argmax_test():
...
@@ -149,6 +149,21 @@ def argmax_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
argmax_select_last_index_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
6
])
node
=
onnx
.
helper
.
make_node
(
'ArgMax'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axis
=
2
,
keepdims
=
0
,
select_last_index
=
1
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
argmax_dyn_test
():
def
argmax_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
4
,
5
,
6
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
4
,
5
,
6
])
...
@@ -177,6 +192,21 @@ def argmin_test():
...
@@ -177,6 +192,21 @@ def argmin_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
argmin_select_last_index_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
])
node
=
onnx
.
helper
.
make_node
(
'ArgMin'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axis
=
3
,
keepdims
=
0
,
select_last_index
=
1
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
asin_test
():
def
asin_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
...
@@ -582,6 +612,29 @@ def cast_test():
...
@@ -582,6 +612,29 @@ def cast_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
castlike_test
():
input
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT16
,
[
10
])
target_type
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
10
])
output
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
FLOAT
,
[
10
])
node
=
onnx
.
helper
.
make_node
(
'CastLike'
,
inputs
=
[
'0'
,
'1'
],
outputs
=
[
'out'
])
return
([
node
],
[
input
,
target_type
],
[
output
])
@
onnx_test
()
def
castlike_error_test
():
input
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT16
,
[
10
])
output
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
FLOAT
,
[
10
])
node
=
onnx
.
helper
.
make_node
(
'CastLike'
,
inputs
=
[
'0'
],
outputs
=
[
'out'
])
return
([
node
],
[
input
],
[
output
])
@
onnx_test
()
@
onnx_test
()
def
ceil_test
():
def
ceil_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
...
@@ -1007,9 +1060,9 @@ def const_of_shape_empty_input_test():
...
@@ -1007,9 +1060,9 @@ def const_of_shape_empty_input_test():
[
10
])
[
10
])
empty_val
=
np
.
array
([]).
astype
(
np
.
int64
)
empty_val
=
np
.
array
([]).
astype
(
np
.
int64
)
empty_ts
=
helper
.
make_tensor
(
name
=
'empty_tensor'
,
empty_ts
=
helper
.
make_tensor
(
name
=
'empty_tensor'
,
data_type
=
TensorProto
.
INT
32
,
data_type
=
TensorProto
.
INT
64
,
dims
=
empty_val
.
shape
,
dims
=
empty_val
.
shape
,
vals
=
empty_val
.
flatten
().
astype
(
int
))
vals
=
empty_val
.
flatten
().
astype
(
np
.
int
64
))
shape_const
=
helper
.
make_node
(
shape_const
=
helper
.
make_node
(
'Constant'
,
'Constant'
,
inputs
=
[],
inputs
=
[],
...
@@ -1035,9 +1088,9 @@ def const_of_shape_float_test():
...
@@ -1035,9 +1088,9 @@ def const_of_shape_float_test():
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_ts
=
helper
.
make_tensor
(
name
=
'shape_tensor'
,
shape_ts
=
helper
.
make_tensor
(
name
=
'shape_tensor'
,
data_type
=
TensorProto
.
INT
32
,
data_type
=
TensorProto
.
INT
64
,
dims
=
shape_val
.
shape
,
dims
=
shape_val
.
shape
,
vals
=
shape_val
.
flatten
().
astype
(
int
))
vals
=
shape_val
.
flatten
().
astype
(
np
.
int
64
))
shape_const
=
helper
.
make_node
(
shape_const
=
helper
.
make_node
(
'Constant'
,
'Constant'
,
...
@@ -1055,22 +1108,44 @@ def const_of_shape_float_test():
...
@@ -1055,22 +1108,44 @@ def const_of_shape_float_test():
return
([
shape_const
,
node
],
[],
[
y
])
return
([
shape_const
,
node
],
[],
[
y
])
@
onnx_test
()
def
const_of_shape_default_test
():
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_ts
=
helper
.
make_tensor
(
name
=
'shape_tensor'
,
data_type
=
TensorProto
.
INT64
,
dims
=
shape_val
.
shape
,
vals
=
shape_val
.
flatten
().
astype
(
np
.
int64
))
shape_const
=
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'shape'
],
value
=
shape_ts
,
)
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT64
,
[
2
,
3
,
4
])
node
=
onnx
.
helper
.
make_node
(
'ConstantOfShape'
,
inputs
=
[
'shape'
],
outputs
=
[
'y'
])
return
([
shape_const
,
node
],
[],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
const_of_shape_int64_test
():
def
const_of_shape_int64_test
():
tensor_val
=
onnx
.
helper
.
make_tensor
(
'value'
,
onnx
.
TensorProto
.
INT64
,
[
1
],
tensor_val
=
onnx
.
helper
.
make_tensor
(
'value'
,
onnx
.
TensorProto
.
INT64
,
[
1
],
[
10
])
[
10
])
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_ts
=
helper
.
make_tensor
(
name
=
'shape_tensor'
,
shape_ts
=
helper
.
make_tensor
(
name
=
'shape_tensor'
,
data_type
=
TensorProto
.
INT
32
,
data_type
=
TensorProto
.
INT
64
,
dims
=
shape_val
.
shape
,
dims
=
shape_val
.
shape
,
vals
=
shape_val
.
flatten
().
astype
(
int
))
vals
=
shape_val
.
flatten
().
astype
(
np
.
int
64
))
shape_const
=
helper
.
make_node
(
shape_const
=
helper
.
make_node
(
'Constant'
,
'Constant'
,
inputs
=
[],
inputs
=
[],
outputs
=
[
'shape'
],
outputs
=
[
'shape'
],
value
=
shape_ts
,
value
=
shape_ts
,
)
)
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT64
,
[
2
,
3
,
4
])
node
=
onnx
.
helper
.
make_node
(
'ConstantOfShape'
,
node
=
onnx
.
helper
.
make_node
(
'ConstantOfShape'
,
inputs
=
[
'shape'
],
inputs
=
[
'shape'
],
...
@@ -1084,9 +1159,9 @@ def const_of_shape_int64_test():
...
@@ -1084,9 +1159,9 @@ def const_of_shape_int64_test():
def
const_of_shape_no_value_attr_test
():
def
const_of_shape_no_value_attr_test
():
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_ts
=
helper
.
make_tensor
(
name
=
'shape_tensor'
,
shape_ts
=
helper
.
make_tensor
(
name
=
'shape_tensor'
,
data_type
=
TensorProto
.
INT
32
,
data_type
=
TensorProto
.
INT
64
,
dims
=
shape_val
.
shape
,
dims
=
shape_val
.
shape
,
vals
=
shape_val
.
flatten
().
astype
(
int
))
vals
=
shape_val
.
flatten
().
astype
(
np
.
int
64
))
shape_const
=
helper
.
make_node
(
shape_const
=
helper
.
make_node
(
'Constant'
,
'Constant'
,
inputs
=
[],
inputs
=
[],
...
@@ -1104,6 +1179,40 @@ def const_of_shape_no_value_attr_test():
...
@@ -1104,6 +1179,40 @@ def const_of_shape_no_value_attr_test():
return
([
shape_const
,
node
],
[],
[
y
])
return
([
shape_const
,
node
],
[],
[
y
])
@
onnx_test
()
def
const_of_shape_dyn_float_test
():
tensor_val
=
onnx
.
helper
.
make_tensor
(
'value'
,
onnx
.
TensorProto
.
FLOAT
,
[
1
],
[
10
])
output_dims
=
helper
.
make_tensor_value_info
(
'output_dims'
,
TensorProto
.
INT64
,
[
3
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
])
node
=
onnx
.
helper
.
make_node
(
'ConstantOfShape'
,
inputs
=
[
'output_dims'
],
outputs
=
[
'y'
],
value
=
tensor_val
)
return
([
node
],
[
output_dims
],
[
y
])
@
onnx_test
()
def
const_of_shape_dyn_int64_test
():
tensor_val
=
onnx
.
helper
.
make_tensor
(
'value'
,
onnx
.
TensorProto
.
INT64
,
[
1
],
[
10
])
output_dims
=
helper
.
make_tensor_value_info
(
'output_dims'
,
TensorProto
.
INT64
,
[
3
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT64
,
[
2
,
3
,
4
])
node
=
onnx
.
helper
.
make_node
(
'ConstantOfShape'
,
inputs
=
[
'output_dims'
],
outputs
=
[
'y'
],
value
=
tensor_val
)
return
([
node
],
[
output_dims
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
conv_1d_test
():
def
conv_1d_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
])
...
@@ -2643,6 +2752,119 @@ def group_conv_test():
...
@@ -2643,6 +2752,119 @@ def group_conv_test():
return
([
node
],
[
x
,
y
],
[
z
])
return
([
node
],
[
x
,
y
],
[
z
])
def
group_norm_test
(
x_dims
,
scale_dims
,
bias_dims
,
y_dims
,
num_groups
,
eps_value
=
1e-5
,
dtype
=
TensorProto
.
FLOAT
):
x
=
helper
.
make_tensor_value_info
(
'x'
,
dtype
,
x_dims
)
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
dtype
,
scale_dims
)
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
dtype
,
bias_dims
)
y
=
helper
.
make_tensor_value_info
(
'y'
,
dtype
,
y_dims
)
node
=
onnx
.
helper
.
make_node
(
'GroupNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
],
outputs
=
[
'y'
],
num_groups
=
num_groups
,
epsilon
=
eps_value
)
return
([
node
],
[
x
,
scale
,
bias
],
[
y
])
@
onnx_test
()
def
group_norm_3d_test
():
return
group_norm_test
([
1
,
4
,
2
],
[
2
],
[
2
],
[
1
,
4
,
2
],
2
)
@
onnx_test
()
def
group_norm_3d_half_test
():
return
group_norm_test
([
1
,
4
,
2
],
[
2
],
[
2
],
[
1
,
4
,
2
],
2
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_4d_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
2
],
[
1
,
4
,
3
,
3
],
2
)
@
onnx_test
()
def
group_norm_4d_half_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
2
],
[
1
,
4
,
3
,
3
],
2
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_5d_test
():
return
group_norm_test
([
3
,
3
,
3
,
3
,
3
],
[
1
],
[
1
],
[
3
,
3
,
3
,
3
,
3
],
1
)
@
onnx_test
()
def
group_norm_5d_half_test
():
return
group_norm_test
([
3
,
3
,
3
,
3
,
3
],
[
1
],
[
1
],
[
3
,
3
,
3
,
3
,
3
],
1
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_small_eps_half_test
():
return
group_norm_test
([
1
,
4
,
2
],
[
2
],
[
2
],
[
1
,
4
,
2
],
2
,
eps_value
=
1e-12
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_invalid_num_groups_error_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
2
],
[
1
,
4
,
3
,
3
],
3
)
@
onnx_test
()
def
group_norm_missing_attribute_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
2
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
node
=
onnx
.
helper
.
make_node
(
'GroupNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
,
bias
],
[
y
])
@
onnx_test
()
def
group_norm_invalid_input_count_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
,
3
,
3
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'GroupNormalization'
,
inputs
=
[
'x'
,
'scale'
],
outputs
=
[
'y'
],
num_groups
=
2
)
return
([
node
],
[
x
,
scale
],
[
y
])
@
onnx_test
()
def
group_norm_invalid_input_shape_error_test
():
return
group_norm_test
([
1
,
4
],
[
2
],
[
2
],
[
1
,
4
],
2
)
@
onnx_test
()
def
group_norm_invalid_scale_shape_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
1
],
[
2
],
[
1
,
4
,
3
,
3
],
2
)
@
onnx_test
()
def
group_norm_invalid_bias_shape_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
3
],
[
1
,
4
,
3
,
3
],
2
)
@
onnx_test
()
@
onnx_test
()
def
hardsigmoid_default_test
():
def
hardsigmoid_default_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
...
@@ -3725,6 +3947,110 @@ def layernorm_test():
...
@@ -3725,6 +3947,110 @@ def layernorm_test():
bias_add
],
[
x
,
scale
,
bias
],
[
y
],
[
pow_tensor
,
epsilon_tensor
])
bias_add
],
[
x
,
scale
,
bias
],
[
y
],
[
pow_tensor
,
epsilon_tensor
])
def
make_layer_norm
(
shape
,
axis
,
dtype
=
TensorProto
.
FLOAT
):
norm_axis
=
axis
+
len
(
shape
)
if
axis
<
0
else
axis
x
=
helper
.
make_tensor_value_info
(
'x'
,
dtype
,
shape
)
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
dtype
,
shape
[
norm_axis
:])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
dtype
,
shape
[
norm_axis
:])
y
=
helper
.
make_tensor_value_info
(
'y'
,
dtype
,
shape
)
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
],
outputs
=
[
'y'
],
axis
=
axis
)
return
([
node
],
[
x
,
scale
,
bias
],
[
y
])
@
onnx_test
()
def
layer_norm_invalid_shape_error_test
():
return
make_layer_norm
([
3
],
0
)
@
onnx_test
()
def
layer_norm_2d_axis_zero_test
():
return
make_layer_norm
([
3
,
4
],
0
)
@
onnx_test
()
def
layer_norm_2d_axis_one_test
():
return
make_layer_norm
([
3
,
4
],
1
)
@
onnx_test
()
def
layer_norm_2d_axis_minus_one_test
():
return
make_layer_norm
([
3
,
4
],
-
1
)
@
onnx_test
()
def
layer_norm_3d_test
():
return
make_layer_norm
([
1
,
4
,
2
],
-
1
)
@
onnx_test
()
def
layer_norm_3d_half_test
():
return
make_layer_norm
([
1
,
4
,
2
],
-
1
,
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
layer_norm_4d_test
():
return
make_layer_norm
([
3
,
3
,
3
,
3
],
-
1
)
@
onnx_test
()
def
layer_norm_4d_half_test
():
return
make_layer_norm
([
3
,
3
,
3
,
3
],
-
1
,
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
layer_norm_invalid_axis_error_test
():
return
make_layer_norm
([
1
,
4
,
2
],
1000
)
@
onnx_test
()
def
layer_norm_invalid_minus_axis_error_test
():
return
make_layer_norm
([
1
,
4
,
2
],
-
1000
)
@
onnx_test
()
def
layer_norm_invalid_input_count_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
layer_norm_without_bias_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
,
'scale'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
],
[
y
])
@
onnx_test
()
def
layer_norm_small_eps_half_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
1
,
2
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT16
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
1
,
2
])
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
,
'scale'
],
outputs
=
[
'y'
],
epsilon
=
1e-12
)
return
([
node
],
[
x
,
scale
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
leaky_relu_test
():
def
leaky_relu_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
...
@@ -4385,6 +4711,77 @@ def mean_integral_test():
...
@@ -4385,6 +4711,77 @@ def mean_integral_test():
return
([
node
],
data
,
[
mean
])
return
([
node
],
data
,
[
mean
])
def
mvn_default_axes_test_base
(
dims
,
type
=
TensorProto
.
FLOAT
):
data
=
helper
.
make_tensor_value_info
(
"data"
,
type
,
dims
)
out
=
helper
.
make_tensor_value_info
(
"out"
,
type
,
dims
)
node
=
helper
.
make_node
(
"MeanVarianceNormalization"
,
inputs
=
[
"data"
],
outputs
=
[
"out"
])
return
([
node
],
[
data
],
[
out
])
@
onnx_test
()
def
mvn_default_axes_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
,
2
])
@
onnx_test
()
def
mvn_default_axes_fp16_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
,
2
],
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
mvn_default_axes_rank_too_small_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
])
@
onnx_test
()
def
mvn_default_axes_rank_too_big_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
,
2
,
2
])
def
mvn_n_rank_test_base
(
axes
,
dims
,
type
=
TensorProto
.
FLOAT
):
data
=
helper
.
make_tensor_value_info
(
"data"
,
type
,
dims
)
out
=
helper
.
make_tensor_value_info
(
"out"
,
type
,
dims
)
node
=
helper
.
make_node
(
"MeanVarianceNormalization"
,
inputs
=
[
"data"
],
outputs
=
[
"out"
],
axes
=
axes
)
return
([
node
],
[
data
],
[
out
])
@
onnx_test
()
def
mvn_rank_2_test
():
return
mvn_n_rank_test_base
([
1
],
[
2
,
2
])
@
onnx_test
()
def
mvn_rank_2_fp16_test
():
return
mvn_n_rank_test_base
([
1
],
[
2
,
2
],
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
mvn_rank_3_test
():
return
mvn_n_rank_test_base
([
0
,
1
],
[
2
,
2
,
2
])
@
onnx_test
()
def
mvn_rank_3_fp16_test
():
return
mvn_n_rank_test_base
([
0
,
1
],
[
2
,
2
,
2
],
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
mvn_axes_rank_too_small_test
():
return
mvn_n_rank_test_base
([
0
,
1
,
2
],
[
2
,
2
,
2
])
@
onnx_test
()
def
mvn_axes_rank_too_big_test
():
return
mvn_n_rank_test_base
([
0
],
[
2
,
2
,
2
])
@
onnx_test
()
@
onnx_test
()
def
min_test
():
def
min_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
...
@@ -4812,24 +5209,50 @@ def pad_test():
...
@@ -4812,24 +5209,50 @@ def pad_test():
@
onnx_test
()
@
onnx_test
()
def
pad_3arg_test
():
def
pad_asym_test
():
values
=
np
.
array
([
1
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
1
,
1
,
2
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
inputs
=
[
'0'
],
data_type
=
TensorProto
.
INT32
,
pads
=
[
0
,
1
,
0
,
3
,
0
,
2
,
0
,
4
],
dims
=
sizes
.
shape
,
outputs
=
[
'1'
])
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
return
([
node
],
[
x
],
[
y
])
inputs
=
[],
@
onnx_test
()
def
pad_asym_invalid_pads_error_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
],
pads
=
[
0
,
1
,
0
,
3
,
0
,
2
],
outputs
=
[
'1'
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_3arg_test
():
values
=
np
.
array
([
1
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
1
,
1
,
2
,
2
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
value
=
pad_tensor
)
...
@@ -4843,6 +5266,129 @@ def pad_3arg_test():
...
@@ -4843,6 +5266,129 @@ def pad_3arg_test():
return
([
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
return
([
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_4arg_axes_test
():
values
=
np
.
array
([
1
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
1
,
3
,
2
,
4
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
axes
=
np
.
array
([
1
,
3
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_val'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
arg_axes
,
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_4arg_invalid_axes_error_test
():
values
=
np
.
array
([
1
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
1
,
3
,
2
,
4
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
axes
=
np
.
array
([
1
,
2
,
3
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_val'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
arg_axes
,
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_4arg_neg_axes_test
():
values
=
np
.
array
([
1
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
1
,
3
,
2
,
4
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
axes
=
np
.
array
([
-
3
,
-
1
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_val'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
arg_axes
,
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
pad_reflect_test
():
def
pad_reflect_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
...
@@ -4866,6 +5412,39 @@ def pad_reflect_test():
...
@@ -4866,6 +5412,39 @@ def pad_reflect_test():
return
([
arg_pad
,
node
],
[
x
],
[
y
])
return
([
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_reflect_with_axes_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
2
,
5
])
sizes
=
np
.
array
([
2
,
1
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
axes
=
np
.
array
([
1
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
mode
=
'reflect'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
arg_axes
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
pad_reflect_multiaxis_test
():
def
pad_reflect_multiaxis_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
])
...
@@ -5017,6 +5596,278 @@ def prelu_brcst_test():
...
@@ -5017,6 +5596,278 @@ def prelu_brcst_test():
return
([
node
],
[
arg0
,
arg1
],
[
arg_out
])
return
([
node
],
[
arg0
,
arg1
],
[
arg_out
])
@
onnx_test
()
def
qlinearadd_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
UINT8
,
[
64
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
UINT8
,
[],
[
0
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
UINT8
,
[
64
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
UINT8
,
[],
[
128
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
UINT8
,
[],
[
64
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
UINT8
,
[
64
])
node
=
onnx
.
helper
.
make_node
(
'QLinearAdd'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearadd_bcast_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
INT8
,
[
64
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
INT8
,
[],
[
0
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
INT8
,
[
1
,
1
,
64
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
INT8
,
[],
[
32
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
INT8
,
[],
[
-
64
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
INT8
,
[
1
,
1
,
64
])
node
=
onnx
.
helper
.
make_node
(
'QLinearAdd'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearconv_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
7
,
7
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.00369204697
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
132
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
1
,
1
],
[
0
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
0.00172794575
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
255
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.00162681262
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
UINT8
,
[],
[
123
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
UINT8
,
[
1
,
1
,
7
,
7
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_pad_1_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
1.0
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
UINT8
,
[],
[
0
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
1
,
1
,
1
,
1
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_pad_0_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
1.0
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
INT8
,
[],
[
-
128
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
INT8
,
[
1
,
1
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
0
,
0
,
0
,
0
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_scale_1D_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
2
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[
2
],
[
1.0
,
0.5
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[
2
],
[
0
,
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
INT8
,
[],
[
-
128
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
INT8
,
[
1
,
2
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
0
,
0
,
0
,
0
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearglobalavgpool_test
():
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
3
,
4
,
4
])
sc_x
=
helper
.
make_tensor
(
'X_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
z_pt_x
=
helper
.
make_tensor
(
'X_zero_point'
,
TensorProto
.
UINT8
,
[],
[
128
])
y
=
helper
.
make_tensor_value_info
(
'Y'
,
TensorProto
.
UINT8
,
[
1
,
3
,
1
,
1
])
sc_y
=
helper
.
make_tensor
(
'Y_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.025
])
z_pt_y
=
helper
.
make_tensor
(
'Y_zero_point'
,
TensorProto
.
UINT8
,
[],
[
64
])
n
=
onnx
.
helper
.
make_node
(
'QLinearGlobalAveragePool'
,
inputs
=
[
'X'
,
'X_scale'
,
'X_zero_point'
,
'Y_scale'
,
'Y_zero_point'
],
outputs
=
[
'Y'
],
channels_last
=
0
,
)
return
([
n
],
[
x
],
[
y
],
[
sc_x
,
z_pt_x
,
sc_y
,
z_pt_y
])
def
qlinearmatmul_1D_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
UINT8
,
[
8
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
UINT8
,
[],
[
0
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
UINT8
,
[
8
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
UINT8
,
[],
[
128
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
UINT8
,
[],
[
64
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
UINT8
,
[
1
])
node
=
onnx
.
helper
.
make_node
(
'QLinearMatMul'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearmatmul_2D_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
UINT8
,
[
1
,
8
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
UINT8
,
[],
[
0
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
UINT8
,
[
8
,
1
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
UINT8
,
[],
[
128
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
UINT8
,
[],
[
64
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
UINT8
,
[
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'QLinearMatMul'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearmatmul_3D_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
UINT8
,
[
2
,
2
,
4
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.0066
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
UINT8
,
[],
[
113
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
UINT8
,
[
2
,
4
,
3
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.00705
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
UINT8
,
[],
[
114
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.0107
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
UINT8
,
[],
[
118
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
UINT8
,
[
2
,
2
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearMatMul'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
@
onnx_test
()
def
quantizelinear_test
():
def
quantizelinear_test
():
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
5
])
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
5
])
...
@@ -5714,6 +6565,24 @@ def reshape_non_standard_test():
...
@@ -5714,6 +6565,24 @@ def reshape_non_standard_test():
return
([
trans
,
res
],
[
x
],
[
y
])
return
([
trans
,
res
],
[
x
],
[
y
])
@
onnx_test
()
def
reshape_variable_input_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
4
,
2
,
3
])
x_shape
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT64
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
3
,
8
])
node
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
[
'0'
,
'1'
],
outputs
=
[
'2'
])
return
([
node
],
[
x
,
x_shape
],
[
y
])
@
onnx_test
()
def
reshape_variable_input_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
3
])
x_shape
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT64
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
None
,
6
])
node
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
[
'0'
,
'1'
],
outputs
=
[
'2'
])
return
([
node
],
[
x
,
x_shape
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
resize_downsample_f_test
():
def
resize_downsample_f_test
():
scales
=
np
.
array
([
1.0
,
1.0
,
0.6
,
0.6
],
dtype
=
np
.
float32
)
scales
=
np
.
array
([
1.0
,
1.0
,
0.6
,
0.6
],
dtype
=
np
.
float32
)
...
@@ -6367,6 +7236,101 @@ def shape_gather_test():
...
@@ -6367,6 +7236,101 @@ def shape_gather_test():
return
([
node_const
,
node_shape
,
node_gather
],
[
x
],
[
z
])
return
([
node_const
,
node_shape
,
node_gather
],
[
x
],
[
z
])
@
onnx_test
()
def
shrink_hard_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
1.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_soft_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
1.5
,
bias
=
1.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_verify_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=-
5.0
,
bias
=
1.0
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_verify2_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=-
6.0
,
bias
=
5.0
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_int8_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
INT8
,
[
3
,
3
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT8
,
[
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
1.5
,
bias
=
1.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_uint8_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
UINT8
,
[
3
,
3
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
UINT8
,
[
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
5.0
,
bias
=-
4.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
@
onnx_test
()
def
sign_test
():
def
sign_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
DOUBLE
,
[
10
,
5
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
DOUBLE
,
[
10
,
5
])
...
@@ -7639,7 +8603,7 @@ def transpose_gather_test():
...
@@ -7639,7 +8603,7 @@ def transpose_gather_test():
@
onnx_test
()
@
onnx_test
()
def
tri
l
u_test
():
def
triu_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
@@ -7652,7 +8616,7 @@ def trilu_test():
...
@@ -7652,7 +8616,7 @@ def trilu_test():
@
onnx_test
()
@
onnx_test
()
def
tri
l
u_batch_diff_k_test
():
def
triu_batch_diff_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
k
=
np
.
array
([
2
])
k
=
np
.
array
([
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
...
@@ -7670,7 +8634,24 @@ def trilu_batch_diff_k_test():
...
@@ -7670,7 +8634,24 @@ def trilu_batch_diff_k_test():
@
onnx_test
()
@
onnx_test
()
def
trilu_lower_test
():
def
tril_batch_diff_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
k
=
np
.
array
([
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
tril_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
@@ -7679,7 +8660,7 @@ def trilu_lower_test():
...
@@ -7679,7 +8660,7 @@ def trilu_lower_test():
@
onnx_test
()
@
onnx_test
()
def
tri
l
u_neg_k_test
():
def
triu_neg_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
-
1
])
k
=
np
.
array
([
-
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
@@ -7693,7 +8674,23 @@ def trilu_neg_k_test():
...
@@ -7693,7 +8674,23 @@ def trilu_neg_k_test():
@
onnx_test
()
@
onnx_test
()
def
trilu_out_k_test
():
def
tril_neg_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
-
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
triu_out_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
5
])
k
=
np
.
array
([
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
@@ -7707,7 +8704,23 @@ def trilu_out_k_test():
...
@@ -7707,7 +8704,23 @@ def trilu_out_k_test():
@
onnx_test
()
@
onnx_test
()
def
trilu_row_one_test
():
def
tril_out_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
triu_row_one_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
k
=
np
.
array
([
1
])
k
=
np
.
array
([
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
...
@@ -7724,6 +8737,23 @@ def trilu_row_one_test():
...
@@ -7724,6 +8737,23 @@ def trilu_row_one_test():
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
tril_row_one_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
k
=
np
.
array
([
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
@
onnx_test
()
def
undefined_test
():
def
undefined_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
...
...
test/onnx/group_norm_3d_half_test.onnx
0 → 100644
View file @
a6fa5e4b
group_norm_3d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_3d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_3d_test.onnx
0 → 100644
View file @
a6fa5e4b
group_norm_3d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_3d_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_4d_half_test.onnx
0 → 100644
View file @
a6fa5e4b
group_norm_4d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_4d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_4d_test.onnx
0 → 100644
View file @
a6fa5e4b
group_norm_4d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_4d_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_5d_half_test.onnx
0 → 100644
View file @
a6fa5e4b
group_norm_5d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_5d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_5d_test.onnx
0 → 100644
View file @
a6fa5e4b
group_norm_5d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_5d_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_invalid_bias_shape_test.onnx
0 → 100644
View file @
a6fa5e4b
"group_norm_invalid_bias_shape_test:
:
x
scale
biasy"GroupNormalization*
num_groups"group_norm_invalid_bias_shape_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_invalid_input_count_error_test.onnx
0 → 100644
View file @
a6fa5e4b
)group_norm_invalid_input_count_error_test:
4
x
scaley"GroupNormalization*
num_groups)group_norm_invalid_input_count_error_testZ
x
Z
scale
b
y
B
\ No newline at end of file
test/onnx/group_norm_invalid_input_shape_error_test.onnx
0 → 100644
View file @
a6fa5e4b
)group_norm_invalid_input_shape_error_test:
:
x
scale
biasy"GroupNormalization*
num_groups)group_norm_invalid_input_shape_error_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_invalid_num_groups_error_test.onnx
0 → 100644
View file @
a6fa5e4b
(group_norm_invalid_num_groups_error_test:
:
x
scale
biasy"GroupNormalization*
num_groups(group_norm_invalid_num_groups_error_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_invalid_scale_shape_test.onnx
0 → 100644
View file @
a6fa5e4b
#group_norm_invalid_scale_shape_test:
:
x
scale
biasy"GroupNormalization*
num_groups#group_norm_invalid_scale_shape_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_missing_attribute_error_test.onnx
0 → 100644
View file @
a6fa5e4b
'group_norm_missing_attribute_error_test:
'
x
scale
biasy"GroupNormalization'group_norm_missing_attribute_error_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_small_eps_half_test.onnx
0 → 100644
View file @
a6fa5e4b
group_norm_small_eps_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon̼+*
num_groupsgroup_norm_small_eps_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/layer_norm_2d_axis_minus_one_test.onnx
0 → 100644
View file @
a6fa5e4b
!layer_norm_2d_axis_minus_one_test:
=
x
scale
biasy"LayerNormalization*
axis!layer_norm_2d_axis_minus_one_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/layer_norm_2d_axis_one_test.onnx
0 → 100644
View file @
a6fa5e4b
layer_norm_2d_axis_one_test:
4
x
scale
biasy"LayerNormalization*
axislayer_norm_2d_axis_one_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/layer_norm_2d_axis_zero_test.onnx
0 → 100644
View file @
a6fa5e4b
File added
test/onnx/layer_norm_3d_half_test.onnx
0 → 100644
View file @
a6fa5e4b
layer_norm_3d_half_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_3d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/layer_norm_3d_test.onnx
0 → 100644
View file @
a6fa5e4b
layer_norm_3d_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_3d_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/layer_norm_4d_half_test.onnx
0 → 100644
View file @
a6fa5e4b
layer_norm_4d_half_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_4d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
Prev
1
…
4
5
6
7
8
9
10
11
12
13
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment