Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
6711780a
Commit
6711780a
authored
Oct 24, 2023
by
Artur Wojcik
Browse files
Merge branch 'develop' into uif2-initial
parents
c0563b9e
d1abf06f
Changes
167
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1277 additions
and
98 deletions
+1277
-98
src/targets/gpu/kernels/include/migraphx/kernels/index.hpp
src/targets/gpu/kernels/include/migraphx/kernels/index.hpp
+33
-30
src/targets/gpu/prefuse_ops.cpp
src/targets/gpu/prefuse_ops.cpp
+59
-2
src/targets/gpu/time_op.cpp
src/targets/gpu/time_op.cpp
+9
-12
test/eliminate_allocation_test.cpp
test/eliminate_allocation_test.cpp
+1
-1
test/eliminate_concat_test.cpp
test/eliminate_concat_test.cpp
+2
-2
test/gpu/fuse_mlir.cpp
test/gpu/fuse_mlir.cpp
+3
-3
test/memory_coloring_test.cpp
test/memory_coloring_test.cpp
+1
-1
test/normalize_ops_test.cpp
test/normalize_ops_test.cpp
+1
-1
test/onnx/.onnxrt-commit
test/onnx/.onnxrt-commit
+1
-1
test/onnx/argmax_select_last_index_test.onnx
test/onnx/argmax_select_last_index_test.onnx
+0
-0
test/onnx/argmin_select_last_index_test.onnx
test/onnx/argmin_select_last_index_test.onnx
+0
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+941
-45
test/onnx/group_norm_3d_half_test.onnx
test/onnx/group_norm_3d_half_test.onnx
+30
-0
test/onnx/group_norm_3d_test.onnx
test/onnx/group_norm_3d_test.onnx
+25
-0
test/onnx/group_norm_4d_half_test.onnx
test/onnx/group_norm_4d_half_test.onnx
+32
-0
test/onnx/group_norm_4d_test.onnx
test/onnx/group_norm_4d_test.onnx
+27
-0
test/onnx/group_norm_5d_half_test.onnx
test/onnx/group_norm_5d_half_test.onnx
+34
-0
test/onnx/group_norm_5d_test.onnx
test/onnx/group_norm_5d_test.onnx
+29
-0
test/onnx/group_norm_invalid_bias_shape_test.onnx
test/onnx/group_norm_invalid_bias_shape_test.onnx
+27
-0
test/onnx/group_norm_invalid_input_count_error_test.onnx
test/onnx/group_norm_invalid_input_count_error_test.onnx
+22
-0
No files found.
src/targets/gpu/kernels/include/migraphx/kernels/index.hpp
View file @
6711780a
...
...
@@ -31,6 +31,14 @@
#include <migraphx/kernels/debug.hpp>
#include <migraphx/kernels/functional.hpp>
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-identifier"
extern
"C"
__device__
size_t
__ockl_get_enqueued_local_size
(
uint
);
// NOLINT
extern
"C"
__device__
size_t
__ockl_get_local_size
(
uint
);
// NOLINT
#pragma clang diagnostic pop
#endif
namespace
migraphx
{
#if defined(MIGRAPHX_NGLOBAL) && defined(MIGRAPHX_NLOCAL)
...
...
@@ -45,43 +53,37 @@ inline __device__ __attribute__((const)) index_int compute_global_size()
// This actualy works even when global is not divisible by local size.
// This doesnt actually do a multiplicatiosn. Instead it calls a device
// function to get the global size, which is why it works.
return
blockDim
.
x
*
gridDim
.
x
;
// NOLINT
return
blockDim
.
x
*
gridDim
.
x
;
// NOLINT
#endif
}
// We cant just use blockDim.x to get the local size since its broken on hip
// when global is not divisible by local size. In this case, we calulate the
// size for the last group.
#ifdef MIGRAPHX_NGROUP
// If global is divisible by local then local can be a const
#if(MIGRAPHX_NGLOBAL % MIGRAPHX_NLOCAL == 0) || (MIGRAPHX_NGROUP == 1)
#define MIGRAPHX_HAS_CONST_LOCAL 1
#endif
#endif
inline
__device__
__attribute__
((
const
))
index_int
compute_local_size
()
{
#ifdef MIGRAPHX_NLOCAL
const
auto
nlocal
=
MIGRAPHX_NLOCAL
;
#else
const
auto
nlocal
=
blockDim
.
x
;
// NOLINT
#endif
#ifdef MIGRAPHX_NGROUP
const
auto
ngroup
=
MIGRAPHX_NGROUP
;
#ifdef MIGRAPHX_HAS_CONST_LOCAL
return
MIGRAPHX_NLOCAL
;
#else
const
auto
ngroup
=
gridDim
.
x
;
// NOLINT
// Returns block size. For the non-uniform block it returns the size of the non-uniform block.
return
__ockl_get_local_size
(
0
);
// NOLINT
#endif
const
auto
group_id
=
blockIdx
.
x
;
// NOLINT
const
auto
nglobal
=
compute_global_size
();
if
(
group_id
==
ngroup
-
1
)
{
return
1
+
(
nglobal
-
1
)
%
nlocal
;
}
else
{
return
nlocal
;
// NOLINT
}
}
#ifdef MIGRAPHX_NGROUP
// If global is divisible by local then local can be a const
#if(MIGRAPHX_NGLOBAL % MIGRAPHX_NLOCAL == 0) || (MIGRAPHX_NGROUP == 1)
#define MIGRAPHX_HAS_CONST_LOCAL 1
#endif
inline
__device__
__attribute__
((
const
))
index_int
compute_max_local_size
()
{
#ifdef MIGRAPHX_LOCAL
return
MIGRAPHX_NLOCAL
;
#else
// Returns the block size. When workgrop has non-uniform block, this returns size of the uniform
// block.
return
__ockl_get_enqueued_local_size
(
0
);
// NOLINT
#endif
}
struct
index
{
...
...
@@ -126,8 +128,8 @@ struct index
#else
__device__
index_int
max_nlocal
()
const
{
MIGRAPHX_ASSERT
(
blockDim
.
x
>
0
);
return
blockDim
.
x
;
MIGRAPHX_ASSERT
(
compute_max_local_size
()
>
0
);
return
compute_max_local_size
()
;
}
#endif
...
...
@@ -249,7 +251,8 @@ struct index
#endif
inline
__device__
__attribute__
((
const
))
index
make_index
()
{
return
index
{
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
,
threadIdx
.
x
,
blockIdx
.
x
};
// NOLINT
return
index
{
blockIdx
.
x
*
compute_max_local_size
()
+
threadIdx
.
x
,
threadIdx
.
x
,
blockIdx
.
x
};
// NOLINT
}
}
// namespace migraphx
...
...
src/targets/gpu/prefuse_ops.cpp
View file @
6711780a
...
...
@@ -24,9 +24,8 @@
#include <migraphx/permutation.hpp>
#include <migraphx/gpu/prefuse_ops.hpp>
#if !defined(_MSC_VER)
#include <migraphx/gpu/gemm_softmax_gemm.hpp>
#include <migraphx/match/layernorm.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/register_op.hpp>
#endif
#include <migraphx/pass_manager.hpp>
...
...
@@ -126,6 +125,60 @@ struct find_add_layernorm
m
.
replace_instruction
(
ins
,
add_layernorm
{
op
.
epsilon
},
add_ins
->
inputs
());
}
};
struct
pre_gemm_softmax_gemm
:
gemm_softmax_gemm
{
std
::
string
name
()
const
{
return
"gpu::pre_gemm_softmax_gemm"
;
}
};
MIGRAPHX_REGISTER_OP
(
pre_gemm_softmax_gemm
);
MIGRAPHX_PRED_MATCHER
(
is_ck_gemm
,
instruction_ref
ins
)
{
if
(
ins
->
name
()
!=
"dot"
)
return
false
;
if
(
not
pre_gemm_softmax_gemm
::
is_ck_supported_type
(
ins
->
get_shape
().
type
()))
return
false
;
return
true
;
}
struct
find_gemm_softmax_gemm
{
auto
matcher
()
const
{
auto
gemm1
=
match
::
skip
(
match
::
name
(
"contiguous"
))(
match
::
name
(
"dot"
)(
is_ck_gemm
().
bind
(
"gemm1"
)));
auto
mul
=
match
::
name
(
"mul"
)(
match
::
nargs
(
2
),
match
::
either_arg
(
0
,
1
)(
match
::
is_constant
().
bind
(
"scale"
),
gemm1
));
auto
softmax
=
match
::
name
(
"softmax"
)(
match
::
arg
(
0
)(
mul
)).
bind
(
"softmax"
);
return
match
::
name
(
"dot"
)(
is_ck_gemm
().
bind
(
"gemm2"
))(
match
::
arg
(
0
)(
softmax
));
}
void
apply
(
module_pass_manager
&
mpm
,
const
match
::
matcher_result
&
r
)
const
{
auto
ins
=
r
.
result
;
auto
gemm2_ins
=
r
.
instructions
[
"gemm2"
];
auto
gemm1_ins
=
r
.
instructions
[
"gemm1"
];
auto
scale_lit
=
r
.
instructions
[
"scale"
];
float
scale
=
1.0
;
scale_lit
->
eval
().
visit
([
&
](
const
auto
s
)
{
// CK only supports single-valued scale
if
(
std
::
all_of
(
s
.
begin
()
+
1
,
s
.
end
(),
[
&
](
auto
v
)
{
return
float_equal
(
v
,
s
.
front
());
}))
scale
=
s
.
front
();
else
return
;
});
auto
inputs
=
gemm1_ins
->
inputs
();
// A, B
inputs
.
push_back
(
gemm2_ins
->
inputs
().
back
());
// B1
mpm
.
get_module
().
replace_instruction
(
ins
,
pre_gemm_softmax_gemm
{
gemm2_ins
->
get_operator
(),
scale
},
inputs
);
}
};
}
// namespace
#endif
...
...
@@ -135,6 +188,10 @@ void prefuse_ops::apply(module_pass_manager& mpm) const
match
::
find_matches
(
mpm
.
get_module
(),
find_layernorm
{});
mpm
.
run_pass
(
dead_code_elimination
{});
match
::
find_matches
(
mpm
.
get_module
(),
find_add_layernorm
{});
if
(
enabled
(
MIGRAPHX_ENABLE_CK
{}))
match
::
find_matches
(
mpm
,
find_gemm_softmax_gemm
{});
#else
(
void
)
mpm
;
#endif
}
...
...
src/targets/gpu/time_op.cpp
View file @
6711780a
...
...
@@ -41,8 +41,7 @@ std::vector<argument> generate_arguments(const std::vector<shape>& shapes, unsig
}
using
milliseconds
=
std
::
chrono
::
duration
<
double
,
std
::
milli
>
;
std
::
pair
<
double
,
double
>
time_op
(
context
&
ictx
,
operation
op
,
const
std
::
vector
<
shape
>&
inputs
,
int
n
)
double
time_op
(
context
&
ictx
,
operation
op
,
const
std
::
vector
<
shape
>&
inputs
,
int
n
)
{
// TODO: Use std::ref
...
...
@@ -51,21 +50,19 @@ time_op(context& ictx, operation op, const std::vector<shape>& inputs, int n)
auto
output
=
op
.
compute_shape
(
inputs
);
op
.
finalize
(
ctx
,
output
,
inputs
);
auto
args
=
generate_arguments
(
inputs
);
auto
run
=
[
&
]
{
op
.
compute
(
ctx
,
output
,
args
);
ctx
.
finish
();
};
gctx
.
enable_perf_measurement
();
auto
start
=
context
::
create_event_for_timing
();
auto
stop
=
context
::
create_event_for_timing
();
auto
run
=
[
&
]
{
op
.
compute
(
ctx
,
output
,
args
);
};
run
();
double
host_time
=
0.0
;
double
device_time
=
0.0
;
gctx
.
get_stream
().
record
(
start
.
get
());
for
(
auto
i
:
range
(
n
))
{
(
void
)
i
;
host_time
+=
time
<
milliseconds
>
(
run
);
device_time
+=
gctx
.
get_elapsed_ms
();
run
();
}
return
std
::
make_pair
(
host_time
/
n
,
device_time
/
n
);
gctx
.
get_stream
().
record
(
stop
.
get
());
gctx
.
finish
();
return
context
::
get_elapsed_ms
(
start
.
get
(),
stop
.
get
())
/
n
;
}
}
// namespace gpu
...
...
test/eliminate_allocation_test.cpp
View file @
6711780a
...
...
@@ -55,7 +55,7 @@ struct allocate
const
migraphx
::
shape
&
output_shape
,
const
std
::
vector
<
migraphx
::
argument
>&
)
const
{
return
{
output_shape
};
return
migraphx
::
argument
{
output_shape
};
}
};
...
...
test/eliminate_concat_test.cpp
View file @
6711780a
...
...
@@ -60,7 +60,7 @@ struct concat
const
migraphx
::
shape
&
output_shape
,
const
std
::
vector
<
migraphx
::
argument
>&
)
const
{
return
{
output_shape
};
return
migraphx
::
argument
{
output_shape
};
}
};
...
...
@@ -104,7 +104,7 @@ struct allocate
const
migraphx
::
shape
&
output_shape
,
const
std
::
vector
<
migraphx
::
argument
>&
)
const
{
return
{
output_shape
};
return
migraphx
::
argument
{
output_shape
};
}
};
...
...
test/gpu/fuse_mlir.cpp
View file @
6711780a
...
...
@@ -34,7 +34,8 @@
void
run_pass
(
migraphx
::
program
&
p
)
{
migraphx
::
run_passes
(
p
,
{
migraphx
::
gpu
::
fuse_mlir
{},
migraphx
::
dead_code_elimination
{}});
migraphx
::
run_passes
(
p
,
{
migraphx
::
gpu
::
fuse_mlir
{.
enable_extra
=
true
},
migraphx
::
dead_code_elimination
{}});
}
template
<
class
F
>
...
...
@@ -151,7 +152,6 @@ TEST_CASE(int_quant_dot_tanh_fails)
int
main
(
int
argc
,
const
char
*
argv
[])
{
if
(
migraphx
::
gpu
::
mlir_enabled
())
test
::
run
(
argc
,
argv
);
test
::
run
(
argc
,
argv
);
return
0
;
}
test/memory_coloring_test.cpp
View file @
6711780a
...
...
@@ -55,7 +55,7 @@ struct allocate
const
migraphx
::
shape
&
output_shape
,
const
std
::
vector
<
migraphx
::
argument
>&
)
const
{
return
{
output_shape
};
return
migraphx
::
argument
{
output_shape
};
}
};
...
...
test/normalize_ops_test.cpp
View file @
6711780a
...
...
@@ -57,7 +57,7 @@ struct normalize_test_op
const
migraphx
::
shape
&
output_shape
,
const
std
::
vector
<
migraphx
::
argument
>&
)
const
{
return
{
output_shape
};
return
migraphx
::
argument
{
output_shape
};
}
};
...
...
test/onnx/.onnxrt-commit
View file @
6711780a
6
d7bc2a097a1a08541cd0d4628831c79ab8092d5
6
35d3faa3b3908d2806d009dc6872152cfcfcdda
test/onnx/argmax_select_last_index_test.onnx
0 → 100644
View file @
6711780a
File added
test/onnx/argmin_select_last_index_test.onnx
0 → 100644
View file @
6711780a
File added
test/onnx/gen_onnx.py
View file @
6711780a
...
...
@@ -149,6 +149,21 @@ def argmax_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
argmax_select_last_index_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
6
])
node
=
onnx
.
helper
.
make_node
(
'ArgMax'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axis
=
2
,
keepdims
=
0
,
select_last_index
=
1
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
argmax_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
4
,
5
,
6
])
...
...
@@ -177,6 +192,21 @@ def argmin_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
argmin_select_last_index_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
])
node
=
onnx
.
helper
.
make_node
(
'ArgMin'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axis
=
3
,
keepdims
=
0
,
select_last_index
=
1
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
asin_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -2722,6 +2752,119 @@ def group_conv_test():
return
([
node
],
[
x
,
y
],
[
z
])
def
group_norm_test
(
x_dims
,
scale_dims
,
bias_dims
,
y_dims
,
num_groups
,
eps_value
=
1e-5
,
dtype
=
TensorProto
.
FLOAT
):
x
=
helper
.
make_tensor_value_info
(
'x'
,
dtype
,
x_dims
)
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
dtype
,
scale_dims
)
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
dtype
,
bias_dims
)
y
=
helper
.
make_tensor_value_info
(
'y'
,
dtype
,
y_dims
)
node
=
onnx
.
helper
.
make_node
(
'GroupNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
],
outputs
=
[
'y'
],
num_groups
=
num_groups
,
epsilon
=
eps_value
)
return
([
node
],
[
x
,
scale
,
bias
],
[
y
])
@
onnx_test
()
def
group_norm_3d_test
():
return
group_norm_test
([
1
,
4
,
2
],
[
2
],
[
2
],
[
1
,
4
,
2
],
2
)
@
onnx_test
()
def
group_norm_3d_half_test
():
return
group_norm_test
([
1
,
4
,
2
],
[
2
],
[
2
],
[
1
,
4
,
2
],
2
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_4d_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
2
],
[
1
,
4
,
3
,
3
],
2
)
@
onnx_test
()
def
group_norm_4d_half_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
2
],
[
1
,
4
,
3
,
3
],
2
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_5d_test
():
return
group_norm_test
([
3
,
3
,
3
,
3
,
3
],
[
1
],
[
1
],
[
3
,
3
,
3
,
3
,
3
],
1
)
@
onnx_test
()
def
group_norm_5d_half_test
():
return
group_norm_test
([
3
,
3
,
3
,
3
,
3
],
[
1
],
[
1
],
[
3
,
3
,
3
,
3
,
3
],
1
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_small_eps_half_test
():
return
group_norm_test
([
1
,
4
,
2
],
[
2
],
[
2
],
[
1
,
4
,
2
],
2
,
eps_value
=
1e-12
,
dtype
=
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
group_norm_invalid_num_groups_error_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
2
],
[
1
,
4
,
3
,
3
],
3
)
@
onnx_test
()
def
group_norm_missing_attribute_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
2
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
node
=
onnx
.
helper
.
make_node
(
'GroupNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
,
bias
],
[
y
])
@
onnx_test
()
def
group_norm_invalid_input_count_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
,
3
,
3
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'GroupNormalization'
,
inputs
=
[
'x'
,
'scale'
],
outputs
=
[
'y'
],
num_groups
=
2
)
return
([
node
],
[
x
,
scale
],
[
y
])
@
onnx_test
()
def
group_norm_invalid_input_shape_error_test
():
return
group_norm_test
([
1
,
4
],
[
2
],
[
2
],
[
1
,
4
],
2
)
@
onnx_test
()
def
group_norm_invalid_scale_shape_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
1
],
[
2
],
[
1
,
4
,
3
,
3
],
2
)
@
onnx_test
()
def
group_norm_invalid_bias_shape_test
():
return
group_norm_test
([
1
,
4
,
3
,
3
],
[
2
],
[
3
],
[
1
,
4
,
3
,
3
],
2
)
@
onnx_test
()
def
hardsigmoid_default_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
...
...
@@ -3804,6 +3947,110 @@ def layernorm_test():
bias_add
],
[
x
,
scale
,
bias
],
[
y
],
[
pow_tensor
,
epsilon_tensor
])
def
make_layer_norm
(
shape
,
axis
,
dtype
=
TensorProto
.
FLOAT
):
norm_axis
=
axis
+
len
(
shape
)
if
axis
<
0
else
axis
x
=
helper
.
make_tensor_value_info
(
'x'
,
dtype
,
shape
)
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
dtype
,
shape
[
norm_axis
:])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
dtype
,
shape
[
norm_axis
:])
y
=
helper
.
make_tensor_value_info
(
'y'
,
dtype
,
shape
)
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
],
outputs
=
[
'y'
],
axis
=
axis
)
return
([
node
],
[
x
,
scale
,
bias
],
[
y
])
@
onnx_test
()
def
layer_norm_invalid_shape_error_test
():
return
make_layer_norm
([
3
],
0
)
@
onnx_test
()
def
layer_norm_2d_axis_zero_test
():
return
make_layer_norm
([
3
,
4
],
0
)
@
onnx_test
()
def
layer_norm_2d_axis_one_test
():
return
make_layer_norm
([
3
,
4
],
1
)
@
onnx_test
()
def
layer_norm_2d_axis_minus_one_test
():
return
make_layer_norm
([
3
,
4
],
-
1
)
@
onnx_test
()
def
layer_norm_3d_test
():
return
make_layer_norm
([
1
,
4
,
2
],
-
1
)
@
onnx_test
()
def
layer_norm_3d_half_test
():
return
make_layer_norm
([
1
,
4
,
2
],
-
1
,
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
layer_norm_4d_test
():
return
make_layer_norm
([
3
,
3
,
3
,
3
],
-
1
)
@
onnx_test
()
def
layer_norm_4d_half_test
():
return
make_layer_norm
([
3
,
3
,
3
,
3
],
-
1
,
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
layer_norm_invalid_axis_error_test
():
return
make_layer_norm
([
1
,
4
,
2
],
1000
)
@
onnx_test
()
def
layer_norm_invalid_minus_axis_error_test
():
return
make_layer_norm
([
1
,
4
,
2
],
-
1000
)
@
onnx_test
()
def
layer_norm_invalid_input_count_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
layer_norm_without_bias_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
,
'scale'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
],
[
y
])
@
onnx_test
()
def
layer_norm_small_eps_half_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
1
,
2
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT16
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
1
,
2
])
node
=
onnx
.
helper
.
make_node
(
'LayerNormalization'
,
inputs
=
[
'x'
,
'scale'
],
outputs
=
[
'y'
],
epsilon
=
1e-12
)
return
([
node
],
[
x
,
scale
],
[
y
])
@
onnx_test
()
def
leaky_relu_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -4464,6 +4711,77 @@ def mean_integral_test():
return
([
node
],
data
,
[
mean
])
def
mvn_default_axes_test_base
(
dims
,
type
=
TensorProto
.
FLOAT
):
data
=
helper
.
make_tensor_value_info
(
"data"
,
type
,
dims
)
out
=
helper
.
make_tensor_value_info
(
"out"
,
type
,
dims
)
node
=
helper
.
make_node
(
"MeanVarianceNormalization"
,
inputs
=
[
"data"
],
outputs
=
[
"out"
])
return
([
node
],
[
data
],
[
out
])
@
onnx_test
()
def
mvn_default_axes_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
,
2
])
@
onnx_test
()
def
mvn_default_axes_fp16_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
,
2
],
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
mvn_default_axes_rank_too_small_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
])
@
onnx_test
()
def
mvn_default_axes_rank_too_big_test
():
return
mvn_default_axes_test_base
([
2
,
2
,
2
,
2
,
2
])
def
mvn_n_rank_test_base
(
axes
,
dims
,
type
=
TensorProto
.
FLOAT
):
data
=
helper
.
make_tensor_value_info
(
"data"
,
type
,
dims
)
out
=
helper
.
make_tensor_value_info
(
"out"
,
type
,
dims
)
node
=
helper
.
make_node
(
"MeanVarianceNormalization"
,
inputs
=
[
"data"
],
outputs
=
[
"out"
],
axes
=
axes
)
return
([
node
],
[
data
],
[
out
])
@
onnx_test
()
def
mvn_rank_2_test
():
return
mvn_n_rank_test_base
([
1
],
[
2
,
2
])
@
onnx_test
()
def
mvn_rank_2_fp16_test
():
return
mvn_n_rank_test_base
([
1
],
[
2
,
2
],
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
mvn_rank_3_test
():
return
mvn_n_rank_test_base
([
0
,
1
],
[
2
,
2
,
2
])
@
onnx_test
()
def
mvn_rank_3_fp16_test
():
return
mvn_n_rank_test_base
([
0
,
1
],
[
2
,
2
,
2
],
TensorProto
.
FLOAT16
)
@
onnx_test
()
def
mvn_axes_rank_too_small_test
():
return
mvn_n_rank_test_base
([
0
,
1
,
2
],
[
2
,
2
,
2
])
@
onnx_test
()
def
mvn_axes_rank_too_big_test
():
return
mvn_n_rank_test_base
([
0
],
[
2
,
2
,
2
])
@
onnx_test
()
def
min_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -4890,6 +5208,32 @@ def pad_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_asym_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
],
pads
=
[
0
,
1
,
0
,
3
,
0
,
2
,
0
,
4
],
outputs
=
[
'1'
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_asym_invalid_pads_error_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
],
pads
=
[
0
,
1
,
0
,
3
,
0
,
2
],
outputs
=
[
'1'
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_3arg_test
():
values
=
np
.
array
([
1
])
...
...
@@ -4923,11 +5267,18 @@ def pad_3arg_test():
@
onnx_test
()
def
pad_reflect_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
2
,
5
])
def
pad_4arg_axes_test
():
values
=
np
.
array
([
1
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
0
,
2
,
0
,
1
])
sizes
=
np
.
array
([
1
,
3
,
2
,
4
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
...
...
@@ -4937,20 +5288,38 @@ def pad_reflect_test():
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
mode
=
'reflect'
,
inputs
=
[
'0'
,
'arg_pad'
],
outputs
=
[
'1'
])
axes
=
np
.
array
([
1
,
3
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
return
([
arg_pad
,
node
],
[
x
],
[
y
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_val'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
arg_axes
,
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_reflect_multiaxis_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
4
,
5
])
def
pad_4arg_invalid_axes_error_test
():
values
=
np
.
array
([
1
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
0
,
2
,
2
,
0
])
sizes
=
np
.
array
([
1
,
3
,
2
,
4
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
...
...
@@ -4960,38 +5329,169 @@ def pad_reflect_multiaxis_test():
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
mode
=
'reflect'
,
inputs
=
[
'0'
,
'arg_pad'
],
outputs
=
[
'1'
])
return
([
arg_pad
,
node
],
[
x
],
[
y
])
axes
=
np
.
array
([
1
,
2
,
3
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
@
onnx_test
()
def
pad_attr_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
],
pads
=
[
1
,
1
,
1
,
1
],
outputs
=
[
'1'
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_val'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
node
],
[
x
],
[
y
])
return
([
arg_axes
,
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_cnst_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
sizes
=
np
.
array
([
0
,
2
,
0
,
1
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
def
pad_4arg_neg_axes_test
():
values
=
np
.
array
([
1
])
val_tensor
=
helper
.
make_tensor
(
name
=
'val'
,
data_type
=
TensorProto
.
FLOAT
,
dims
=
values
.
reshape
(()).
shape
,
vals
=
values
.
astype
(
float
))
arg_val
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_val'
],
value
=
val_tensor
)
sizes
=
np
.
array
([
1
,
3
,
2
,
4
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
axes
=
np
.
array
([
-
3
,
-
1
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
6
,
4
,
12
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_val'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
arg_axes
,
arg_val
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_reflect_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
2
,
5
])
sizes
=
np
.
array
([
0
,
2
,
0
,
1
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
mode
=
'reflect'
,
inputs
=
[
'0'
,
'arg_pad'
],
outputs
=
[
'1'
])
return
([
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_reflect_with_axes_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
2
,
5
])
sizes
=
np
.
array
([
2
,
1
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
axes
=
np
.
array
([
1
])
axes_tensor
=
helper
.
make_tensor
(
name
=
'pad_axes'
,
data_type
=
TensorProto
.
INT32
,
dims
=
axes
.
shape
,
vals
=
axes
.
astype
(
int
))
arg_axes
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_axes'
],
value
=
axes_tensor
)
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
mode
=
'reflect'
,
inputs
=
[
'0'
,
'arg_pad'
,
'arg_axes'
],
outputs
=
[
'1'
])
return
([
arg_axes
,
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_reflect_multiaxis_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
4
,
5
])
sizes
=
np
.
array
([
0
,
2
,
2
,
0
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
mode
=
'reflect'
,
inputs
=
[
'0'
,
'arg_pad'
],
outputs
=
[
'1'
])
return
([
arg_pad
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_attr_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
node
=
onnx
.
helper
.
make_node
(
'Pad'
,
inputs
=
[
'0'
],
pads
=
[
1
,
1
,
1
,
1
],
outputs
=
[
'1'
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
pad_cnst_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
sizes
=
np
.
array
([
0
,
2
,
0
,
1
])
pad_tensor
=
helper
.
make_tensor
(
name
=
'pad_size'
,
data_type
=
TensorProto
.
INT32
,
dims
=
sizes
.
shape
,
vals
=
sizes
.
astype
(
int
))
arg_pad
=
onnx
.
helper
.
make_node
(
'Constant'
,
inputs
=
[],
outputs
=
[
'arg_pad'
],
value
=
pad_tensor
)
...
...
@@ -5151,6 +5651,223 @@ def qlinearadd_bcast_test():
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearconv_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
7
,
7
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.00369204697
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
132
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
1
,
1
],
[
0
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
0.00172794575
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
255
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.00162681262
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
UINT8
,
[],
[
123
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
UINT8
,
[
1
,
1
,
7
,
7
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_pad_1_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
1.0
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
UINT8
,
[],
[
0
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
1
,
1
,
1
,
1
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_pad_0_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
1.0
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
INT8
,
[],
[
-
128
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
INT8
,
[
1
,
1
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
0
,
0
,
0
,
0
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_scale_1D_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
2
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[
2
],
[
1.0
,
0.5
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[
2
],
[
0
,
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
INT8
,
[],
[
-
128
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
INT8
,
[
1
,
2
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
0
,
0
,
0
,
0
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearglobalavgpool_test
():
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
3
,
4
,
4
])
sc_x
=
helper
.
make_tensor
(
'X_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
z_pt_x
=
helper
.
make_tensor
(
'X_zero_point'
,
TensorProto
.
UINT8
,
[],
[
128
])
y
=
helper
.
make_tensor_value_info
(
'Y'
,
TensorProto
.
UINT8
,
[
1
,
3
,
1
,
1
])
sc_y
=
helper
.
make_tensor
(
'Y_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.025
])
z_pt_y
=
helper
.
make_tensor
(
'Y_zero_point'
,
TensorProto
.
UINT8
,
[],
[
64
])
n
=
onnx
.
helper
.
make_node
(
'QLinearGlobalAveragePool'
,
inputs
=
[
'X'
,
'X_scale'
,
'X_zero_point'
,
'Y_scale'
,
'Y_zero_point'
],
outputs
=
[
'Y'
],
channels_last
=
0
,
)
return
([
n
],
[
x
],
[
y
],
[
sc_x
,
z_pt_x
,
sc_y
,
z_pt_y
])
def
qlinearmatmul_1D_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
UINT8
,
[
8
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
UINT8
,
[],
[
0
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
UINT8
,
[
8
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
UINT8
,
[],
[
128
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
UINT8
,
[],
[
64
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
UINT8
,
[
1
])
node
=
onnx
.
helper
.
make_node
(
'QLinearMatMul'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearmatmul_2D_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
UINT8
,
[
1
,
8
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
UINT8
,
[],
[
0
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
UINT8
,
[
8
,
1
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
UINT8
,
[],
[
128
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.05
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
UINT8
,
[],
[
64
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
UINT8
,
[
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'QLinearMatMul'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearmatmul_3D_test
():
a
=
helper
.
make_tensor_value_info
(
'A'
,
TensorProto
.
UINT8
,
[
2
,
2
,
4
])
sc_a
=
helper
.
make_tensor
(
'A_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.0066
])
zero_pt_a
=
helper
.
make_tensor
(
'A_zero_point'
,
TensorProto
.
UINT8
,
[],
[
113
])
b
=
helper
.
make_tensor_value_info
(
'B'
,
TensorProto
.
UINT8
,
[
2
,
4
,
3
])
sc_b
=
helper
.
make_tensor
(
'B_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.00705
])
zero_pt_b
=
helper
.
make_tensor
(
'B_zero_point'
,
TensorProto
.
UINT8
,
[],
[
114
])
sc_c
=
helper
.
make_tensor
(
'C_scale'
,
TensorProto
.
FLOAT
,
[],
[
0.0107
])
zero_pt_c
=
helper
.
make_tensor
(
'C_zero_point'
,
TensorProto
.
UINT8
,
[],
[
118
])
c
=
helper
.
make_tensor_value_info
(
'C'
,
TensorProto
.
UINT8
,
[
2
,
2
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearMatMul'
,
inputs
=
[
'A'
,
'A_scale'
,
'A_zero_point'
,
'B'
,
'B_scale'
,
'B_zero_point'
,
'C_scale'
,
'C_zero_point'
],
outputs
=
[
'C'
],
)
return
([
node
],
[
a
,
b
],
[
c
],
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
quantizelinear_test
():
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
5
])
...
...
@@ -5848,6 +6565,24 @@ def reshape_non_standard_test():
return
([
trans
,
res
],
[
x
],
[
y
])
@
onnx_test
()
def
reshape_variable_input_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
4
,
2
,
3
])
x_shape
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT64
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
3
,
8
])
node
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
[
'0'
,
'1'
],
outputs
=
[
'2'
])
return
([
node
],
[
x
,
x_shape
],
[
y
])
@
onnx_test
()
def
reshape_variable_input_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
3
])
x_shape
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT64
,
[
2
])
y
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
None
,
6
])
node
=
onnx
.
helper
.
make_node
(
'Reshape'
,
inputs
=
[
'0'
,
'1'
],
outputs
=
[
'2'
])
return
([
node
],
[
x
,
x_shape
],
[
y
])
@
onnx_test
()
def
resize_downsample_f_test
():
scales
=
np
.
array
([
1.0
,
1.0
,
0.6
,
0.6
],
dtype
=
np
.
float32
)
...
...
@@ -6501,6 +7236,101 @@ def shape_gather_test():
return
([
node_const
,
node_shape
,
node_gather
],
[
x
],
[
z
])
@
onnx_test
()
def
shrink_hard_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
1.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_soft_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
1.5
,
bias
=
1.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_verify_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=-
5.0
,
bias
=
1.0
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_verify2_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
5
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=-
6.0
,
bias
=
5.0
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_int8_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
INT8
,
[
3
,
3
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT8
,
[
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
1.5
,
bias
=
1.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
shrink_uint8_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
UINT8
,
[
3
,
3
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
UINT8
,
[
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
"Shrink"
,
inputs
=
[
"x"
],
outputs
=
[
"y"
],
lambd
=
5.0
,
bias
=-
4.5
,
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
sign_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
DOUBLE
,
[
10
,
5
])
...
...
@@ -7773,7 +8603,7 @@ def transpose_gather_test():
@
onnx_test
()
def
tri
l
u_test
():
def
triu_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -7786,7 +8616,7 @@ def trilu_test():
@
onnx_test
()
def
tri
l
u_batch_diff_k_test
():
def
triu_batch_diff_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
k
=
np
.
array
([
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
...
...
@@ -7804,7 +8634,24 @@ def trilu_batch_diff_k_test():
@
onnx_test
()
def
trilu_lower_test
():
def
tril_batch_diff_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
k
=
np
.
array
([
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
2
,
3
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
tril_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -7813,7 +8660,7 @@ def trilu_lower_test():
@
onnx_test
()
def
tri
l
u_neg_k_test
():
def
triu_neg_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
-
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -7827,7 +8674,23 @@ def trilu_neg_k_test():
@
onnx_test
()
def
trilu_out_k_test
():
def
tril_neg_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
-
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
triu_out_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -7841,7 +8704,23 @@ def trilu_out_k_test():
@
onnx_test
()
def
trilu_row_one_test
():
def
tril_out_k_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k
=
np
.
array
([
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
triu_row_one_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
k
=
np
.
array
([
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
...
...
@@ -7858,6 +8737,23 @@ def trilu_row_one_test():
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
tril_row_one_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
k
=
np
.
array
([
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
4
])
k_tensor
=
helper
.
make_tensor
(
name
=
'k'
,
data_type
=
TensorProto
.
INT64
,
dims
=
k
.
shape
,
vals
=
k
.
astype
(
np
.
int64
))
node
=
onnx
.
helper
.
make_node
(
'Trilu'
,
inputs
=
[
'x'
,
'k'
],
outputs
=
[
'y'
],
upper
=
0
)
return
([
node
],
[
x
],
[
y
],
[
k_tensor
])
@
onnx_test
()
def
undefined_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
...
...
test/onnx/group_norm_3d_half_test.onnx
0 → 100644
View file @
6711780a
group_norm_3d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_3d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_3d_test.onnx
0 → 100644
View file @
6711780a
group_norm_3d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_3d_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_4d_half_test.onnx
0 → 100644
View file @
6711780a
group_norm_4d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_4d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_4d_test.onnx
0 → 100644
View file @
6711780a
group_norm_4d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_4d_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_5d_half_test.onnx
0 → 100644
View file @
6711780a
group_norm_5d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_5d_half_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_5d_test.onnx
0 → 100644
View file @
6711780a
group_norm_5d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_5d_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_invalid_bias_shape_test.onnx
0 → 100644
View file @
6711780a
"group_norm_invalid_bias_shape_test:
:
x
scale
biasy"GroupNormalization*
num_groups"group_norm_invalid_bias_shape_testZ
x
Z
scale
Z
bias
b
y
B
\ No newline at end of file
test/onnx/group_norm_invalid_input_count_error_test.onnx
0 → 100644
View file @
6711780a
)group_norm_invalid_input_count_error_test:
4
x
scaley"GroupNormalization*
num_groups)group_norm_invalid_input_count_error_testZ
x
Z
scale
b
y
B
\ No newline at end of file
Prev
1
2
3
4
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment