Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
870a396b
Commit
870a396b
authored
Jan 23, 2023
by
Khalique Ahmed
Browse files
manual merge
parents
228b665c
d309e02f
Changes
473
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2192 additions
and
232 deletions
+2192
-232
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+843
-74
test/onnx/pad_attr_dyn_test.onnx
test/onnx/pad_attr_dyn_test.onnx
+0
-0
test/onnx/pad_cnst_dyn_test.onnx
test/onnx/pad_cnst_dyn_test.onnx
+0
-0
test/onnx/pad_dyn_reflect_error.onnx
test/onnx/pad_dyn_reflect_error.onnx
+0
-0
test/onnx/reducel1_dyn_noaxes_test.onnx
test/onnx/reducel1_dyn_noaxes_test.onnx
+0
-0
test/onnx/reducel1_dyn_test.onnx
test/onnx/reducel1_dyn_test.onnx
+0
-0
test/onnx/reducemax_dyn_test.onnx
test/onnx/reducemax_dyn_test.onnx
+0
-0
test/onnx/sinh_dynamic_test.onnx
test/onnx/sinh_dynamic_test.onnx
+0
-0
test/onnx/softmax_dyn_test.onnx
test/onnx/softmax_dyn_test.onnx
+0
-0
test/onnx/split_test_invalid_split.onnx
test/onnx/split_test_invalid_split.onnx
+25
-0
test/onnx/split_test_no_attribute.onnx
test/onnx/split_test_no_attribute.onnx
+26
-0
test/onnx/split_test_no_attribute_invalid_input_split.onnx
test/onnx/split_test_no_attribute_invalid_input_split.onnx
+26
-0
test/onnx/split_test_no_attribute_invalid_split.onnx
test/onnx/split_test_no_attribute_invalid_split.onnx
+26
-0
test/onnx/squeeze_unsqueeze_dyn_test.onnx
test/onnx/squeeze_unsqueeze_dyn_test.onnx
+0
-0
test/onnx/transpose_dyn_test.onnx
test/onnx/transpose_dyn_test.onnx
+0
-0
test/onnx/verify_onnx.cpp
test/onnx/verify_onnx.cpp
+316
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+913
-157
test/operators.cpp
test/operators.cpp
+0
-1
test/py/CMakeLists.txt
test/py/CMakeLists.txt
+1
-0
test/py/onnx_backend_test.py
test/py/onnx_backend_test.py
+16
-0
No files found.
test/onnx/onnx_test.cpp
View file @
870a396b
...
...
@@ -42,7 +42,6 @@
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/unknown.hpp>
#include <random>
#include <migraphx/serialize.hpp>
...
...
@@ -182,6 +181,24 @@ TEST_CASE(argmax_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
argmax_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
},
{
6
,
6
,
0
}}});
auto
ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
2
}}),
l0
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
ins
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"argmax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
argmin_test
)
{
migraphx
::
program
p
;
...
...
@@ -274,6 +291,51 @@ TEST_CASE(averagepool_3d_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
averagepool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"padding"
,
{
0
,
0
,
0
,
0
,
0
,
0
}},
{
"stride"
,
{
1
,
1
,
1
}},
{
"lengths"
,
{
3
,
3
,
3
}}}),
l0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"averagepool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
averagepool_dyn_autopad_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_autopad_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_asym_padding_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_asym_padding_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_cip_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_cip_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_notset_test
)
{
migraphx
::
program
p
;
...
...
@@ -369,33 +431,226 @@ TEST_CASE(averagepool_same_upper_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batchnorm_
1d
_test
)
TEST_CASE
(
batch
_
norm_
flat
_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l2
=
mm
->
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l3
=
mm
->
add_parameter
(
"3"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l4
=
mm
->
add_parameter
(
"4"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
l0
,
l1
,
l2
,
l3
,
l4
);
auto
prog
=
optimize_onnx
(
"batchnorm_1d_test.onnx"
);
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
10
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-6
f
}});
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_flat_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batchnorm_
3d
_test
)
TEST_CASE
(
batch
_
norm_
rank_2
_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
,
5
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l2
=
mm
->
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l3
=
mm
->
add_parameter
(
"3"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l4
=
mm
->
add_parameter
(
"4"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
l0
,
l1
,
l2
,
l3
,
l4
);
auto
prog
=
optimize_onnx
(
"batchnorm_3d_test.onnx"
);
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-6
f
}});
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_rank_2_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_1d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
1e-5
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_1d_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_2d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
4
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_2d_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_3d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
half_type
,
{
2
,
2
,
2
,
2
,
2
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
1e-6
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_3d_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_invalid_rank
)
{
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"batch_norm_invalid_rank.onnx"
);
}));
}
TEST_CASE
(
batch_norm_invalid_bias_rank
)
{
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"batch_norm_invalid_bias_rank.onnx"
);
}));
}
TEST_CASE
(
binary_dyn_brcst_prelu_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}});
auto
ret
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"prelu"
),
{
l0
,
l1
});
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"binary_dyn_brcst_prelu_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
binary_dyn_brcst_add_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
4
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
auto
ret
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
l0
,
l1
});
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"binary_dyn_brcst_add_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
binary_dyn_brcst_attr_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"binary_dyn_brcst_attr_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
binary_dyn_brcst_mul_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
1
}});
auto
bl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
to_value
(
l0
->
get_shape
().
dyn_dims
())}}),
l1
,
l0
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
l0
,
bl1
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"binary_dyn_brcst_mul_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -759,7 +1014,6 @@ TEST_CASE(conv_autopad_same_test)
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
migraphx
::
op
::
convolution
op
;
op
.
padding
=
{
1
,
1
,
1
,
1
};
op
.
padding_mode
=
migraphx
::
op
::
padding_mode_t
::
same
;
mm
->
add_instruction
(
op
,
l0
,
l1
);
auto
prog
=
optimize_onnx
(
"conv_autopad_same_test.onnx"
);
...
...
@@ -795,14 +1049,42 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto
p4
=
mm
->
add_parameter
(
"4"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p5
=
mm
->
add_parameter
(
"5"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p6
=
mm
->
add_parameter
(
"6"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
uint64_t
axis
=
1
;
auto
l3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
l0
,
l1
);
auto
l4
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
axis
},
{
"out_lens"
,
l3
->
get_shape
().
lens
()}}),
l2
);
auto
l5
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
l3
,
l4
);
auto
l6
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
,
{{
"epsilon"
,
1.0e-5
f
}}),
l5
,
p3
,
p4
,
p5
,
p6
);
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p3
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p4
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p5
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p6
);
auto
mb_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
usq_mean
);
auto
numer
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"sub"
),
l5
,
mb_mean
);
auto
mb_eps
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
1
}}}),
eps
);
auto
var_eps
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
usq_var
,
mb_eps
);
auto
mb_rt
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
1
}}}),
rt
);
auto
denom
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pow"
),
var_eps
,
mb_rt
);
auto
mb_denom
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
denom
);
auto
div0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
numer
,
mb_denom
);
auto
mb_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
usq_scale
);
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
div0
,
mb_scale
);
auto
mb_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
usq_bias
);
auto
l6
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
r0
,
mb_bias
);
auto
l7
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
l6
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
...
...
@@ -836,6 +1118,25 @@ TEST_CASE(conv_dynamic_batch_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
conv_dynamic_bias_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
6
,
0
},
{
3
,
3
,
0
},
{
32
,
32
,
0
},
{
32
,
32
,
0
}}});
auto
x1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
,
5
}});
auto
x2
=
mm
->
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
x3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x0
,
x1
);
auto
x4
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
x2
,
x3
);
auto
x5
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
x3
,
x4
);
mm
->
add_return
({
x5
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
6
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_bias_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
conv_dynamic_img_test
)
{
migraphx
::
program
p
;
...
...
@@ -908,13 +1209,9 @@ TEST_CASE(conv_dynamic_batch_same_upper)
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
1
,
1
,
1
,
1
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same
},
{
"use_dynamic_same_auto_pad"
,
false
}}),
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
1
,
1
,
1
,
1
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}}}),
l0
,
l1
);
mm
->
add_return
({
c0
});
...
...
@@ -938,8 +1235,7 @@ TEST_CASE(conv_dynamic_img_same_upper)
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
},
{
"use_dynamic_same_auto_pad"
,
true
}}),
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
l0
,
l1
);
mm
->
add_return
({
c0
});
...
...
@@ -963,8 +1259,7 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
},
{
"use_dynamic_same_auto_pad"
,
true
}}),
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
}}),
l0
,
l1
);
mm
->
add_return
({
c0
});
...
...
@@ -1542,6 +1837,16 @@ migraphx::program create_external_data_prog()
return
p
;
}
TEST_CASE
(
external_constant_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
mm
->
add_literal
(
migraphx
::
literal
{{
migraphx
::
shape
::
int64_type
,
{
3
}},
{
0
,
1
,
2
}});
auto
prog
=
optimize_onnx
(
"external_constant_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
external_data_test
)
{
migraphx
::
program
p
=
create_external_data_prog
();
...
...
@@ -1701,6 +2006,23 @@ TEST_CASE(flatten_nonstd_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
flatten_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l0
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
2
}}),
c0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"flatten_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
floor_test
)
{
migraphx
::
program
p
;
...
...
@@ -1813,64 +2135,64 @@ TEST_CASE(gemm_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"
0
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
7
}});
auto
l1
=
mm
->
add_parameter
(
"
1
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
11
,
5
}});
auto
l2
=
mm
->
add_parameter
(
"
2
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
});
auto
alpha
=
2.
f
;
auto
beta
=
2.0
f
;
auto
l0
=
mm
->
add_parameter
(
"
A
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
8
,
6
}});
auto
l1
=
mm
->
add_parameter
(
"
B
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
8
,
7
}});
auto
l2
=
mm
->
add_parameter
(
"
C
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
6
,
7
}
});
auto
alpha
=
0.5
f
;
auto
beta
=
0.8
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
auto
t_a
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
a_l
,
l0
});
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
t_a
);
auto
t1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
l1
);
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
t1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
b_l
=
mm
->
add_literal
(
beta
);
auto
l2_b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
7
,
11
}}}),
l2
);
auto
b_b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l2
_b
->
get_shape
().
lens
()}}),
b_l
);
auto
l2_b
b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
l2
_b
,
b_b
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
dot
,
l2_b
b
);
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l2
->
get_shape
().
lens
()}}),
b_l
);
auto
l2_b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
l2
,
b_b
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
dot
,
l2_b
);
auto
prog
=
optimize_onnx
(
"gemm_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
gemm_
ex
_test
)
TEST_CASE
(
gemm_
no_C
_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"
1
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
8
,
6
}});
auto
l1
=
mm
->
add_parameter
(
"
2
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
8
,
7
}});
auto
l2
=
mm
->
add_parameter
(
"
3
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
6
,
7
}
});
auto
alpha
=
0.5
f
;
auto
beta
=
0.8
f
;
auto
l0
=
mm
->
add_parameter
(
"
A
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
7
}});
auto
l1
=
mm
->
add_parameter
(
"
B
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
11
,
5
}});
auto
l2
=
mm
->
add_parameter
(
"
C
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
});
auto
alpha
=
2.
f
;
auto
beta
=
2.0
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
auto
t_a
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
a_l
,
l0
});
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
3
,
2
}}}),
t_a
);
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
t_a
);
auto
t1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
l1
);
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
t1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
b_l
=
mm
->
add_literal
(
beta
);
auto
l2_b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
7
,
11
}}}),
l2
);
auto
b_b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l2
->
get_shape
().
lens
()}}),
b_l
);
auto
l2_b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
l2
,
b_b
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
dot
,
l2_b
);
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l2
_b
->
get_shape
().
lens
()}}),
b_l
);
auto
l2_b
b
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
l2
_b
,
b_b
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
dot
,
l2_b
b
);
auto
prog
=
optimize_onnx
(
"gemm_
ex
_test.onnx"
);
auto
prog
=
optimize_onnx
(
"gemm_
no_C
_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
gemm_
ex_
brcst_test
)
TEST_CASE
(
gemm_brcst_
C_
test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"
1
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
5
,
6
}});
auto
l1
=
mm
->
add_parameter
(
"
2
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
5
,
7
}});
auto
l2
=
mm
->
add_parameter
(
"
3
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
6
,
1
}});
std
::
vector
<
std
::
size_t
>
out_lens
{
1
,
1
,
6
,
7
};
auto
l0
=
mm
->
add_parameter
(
"
A
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
6
}});
auto
l1
=
mm
->
add_parameter
(
"
B
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
7
}});
auto
l2
=
mm
->
add_parameter
(
"
C
"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
6
,
1
}});
std
::
vector
<
std
::
size_t
>
out_lens
{
6
,
7
};
auto
alpha
=
0.5
f
;
auto
beta
=
0.8
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
auto
t_a
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
a_l
,
l0
});
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
3
,
2
}}}),
t_a
);
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
t_a
);
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
b_l
=
mm
->
add_literal
(
beta
);
auto
l2_b
=
...
...
@@ -1880,7 +2202,7 @@ TEST_CASE(gemm_ex_brcst_test)
auto
l2_bb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
l2_b
,
b_b
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
dot
,
l2_bb
);
auto
prog
=
optimize_onnx
(
"gemm_
ex_
brcst_test.onnx"
);
auto
prog
=
optimize_onnx
(
"gemm_brcst_
C_
test.onnx"
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -1888,17 +2210,17 @@ TEST_CASE(gemm_half_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"
1
"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
1
,
1
,
8
,
6
}});
auto
l1
=
mm
->
add_parameter
(
"
2
"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
1
,
1
,
8
,
7
}});
auto
l2
=
mm
->
add_parameter
(
"
3
"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
1
,
1
,
6
,
1
}});
auto
l0
=
mm
->
add_parameter
(
"
A
"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
8
,
6
}});
auto
l1
=
mm
->
add_parameter
(
"
B
"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
8
,
7
}});
auto
l2
=
mm
->
add_parameter
(
"
C
"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
6
,
1
}});
auto
alpha
=
0.5
f
;
auto
beta
=
0.8
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
auto
t_a
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
a_l
,
l0
});
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
migraphx
::
shape
::
half_type
}}),
t_a
);
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
3
,
2
}}}),
t_a
);
std
::
vector
<
std
::
size_t
>
lens
=
{
1
,
1
,
6
,
7
};
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
t_a
);
std
::
vector
<
std
::
size_t
>
lens
=
{
6
,
7
};
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
l2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
lens
}}),
l2
);
l2
=
mm
->
add_instruction
(
...
...
@@ -1914,6 +2236,60 @@ TEST_CASE(gemm_half_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
gemm_dyn_inner_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"A"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
8
},
{
6
,
6
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"B"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
8
},
{
7
,
7
,
0
}}});
auto
alpha
=
0.5
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
auto
t_a
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
a_l
,
l0
});
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
t_a
);
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
mm
->
add_return
({
dot
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
10
,
8
};
auto
prog
=
migraphx
::
parse_onnx
(
"gemm_dyn_inner_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
gemm_dyn_outer_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"A"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
5
,
5
,
0
},
{
5
,
10
,
7
}}});
auto
l1
=
mm
->
add_parameter
(
"B"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
11
,
5
}});
auto
alpha
=
2.
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
auto
t_a
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
a_l
,
l0
});
t_a
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
t_a
);
auto
t1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
l1
);
auto
dot
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
t_a
,
t1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
mm
->
add_return
({
dot
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
5
,
10
,
7
};
auto
prog
=
migraphx
::
parse_onnx
(
"gemm_dyn_outer_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
gemm_dyn_C_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"gemm_dyn_C_error.onnx"
,
options
);
}));
}
TEST_CASE
(
gemm_rank_error
)
{
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"gemm_rank_error.onnx"
);
}));
}
TEST_CASE
(
globalavgpool_test
)
{
migraphx
::
program
p
;
...
...
@@ -1931,6 +2307,28 @@ TEST_CASE(globalavgpool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalavgpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"lengths"
,
{
16
,
16
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalavgpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globallppool_test
)
{
migraphx
::
program
p
;
...
...
@@ -1948,6 +2346,29 @@ TEST_CASE(globallppool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globallppool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
16
,
32
,
0
},
{
16
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
lpnorm
},
{
"dyn_global"
,
true
},
{
"padding"
,
{
0
,
0
,
0
,
0
}},
{
"lengths"
,
{}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
16
,
32
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"globallppool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalmaxpool_test
)
{
migraphx
::
program
p
;
...
...
@@ -1965,6 +2386,28 @@ TEST_CASE(globalmaxpool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalmaxpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
32
,
32
,
0
},
{
32
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"lengths"
,
{
32
,
32
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalmaxpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
greater_test
)
{
migraphx
::
program
p
;
...
...
@@ -3043,6 +3486,92 @@ TEST_CASE(matmul_vv_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_mm_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
1
,
5
,
3
}}});
auto
ret
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
1
,
5
,
3
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_mv_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
sl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
l1
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
sl1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
1
}}}),
res
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mv_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_vm_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
4
,
10
,
8
}}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
sl0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
res
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
4
,
10
,
8
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_vm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_vv_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
::
dynamic_dimension
dd
{
5
,
8
,
7
};
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
auto
sl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
l1
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
sl0
,
sl1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
sr0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
res
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
sr0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
dd
;
auto
prog
=
parse_onnx
(
"matmul_dyn_vv_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_broadcast_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"matmul_dyn_broadcast_error.onnx"
,
options
);
}));
}
TEST_CASE
(
matmulinteger_test
)
{
migraphx
::
program
p
;
...
...
@@ -3056,6 +3585,13 @@ TEST_CASE(matmulinteger_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmulinteger_dyn_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"matmulinteger_dyn_error.onnx"
,
options
);
}));
}
TEST_CASE
(
max_test
)
{
migraphx
::
program
p
;
...
...
@@ -3357,6 +3893,21 @@ TEST_CASE(neg_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
neg_dynamic_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
int64_type
,
{{
1
,
10
,
0
},
{
3
,
3
,
0
}}};
auto
input
=
mm
->
add_parameter
(
"0"
,
s
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"neg"
),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
10
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"neg_dynamic_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
nms_test
)
{
migraphx
::
program
p
;
...
...
@@ -3631,6 +4182,44 @@ TEST_CASE(pad_3arg_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
pad_attr_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
2
},
{
2
,
4
,
2
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
1
,
1
,
1
,
1
}}}),
x
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
4
,
2
},
{
2
,
4
,
2
}};
auto
prog
=
parse_onnx
(
"pad_attr_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
pad_cnst_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
2
},
{
2
,
4
,
2
}}});
mm
->
add_literal
({
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
4
}},
{
0
,
2
,
0
,
1
}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
2
,
0
,
1
}}}),
x
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
4
,
2
},
{
2
,
4
,
2
}};
auto
prog
=
parse_onnx
(
"pad_cnst_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
pad_dyn_reflect_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
2
,
4
,
2
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"pad_dyn_reflect_error.onnx"
,
options
);
}));
}
TEST_CASE
(
pad_reflect_test
)
{
migraphx
::
program
p
;
...
...
@@ -4083,6 +4672,50 @@ TEST_CASE(reducel1_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducel1_dyn_test
)
{
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// a shape with 4 dynamic dimensions
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
-
2
}}}),
abs_ins
);
auto
sq_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
-
2
}}}),
sum_ins
);
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// No axes given in the onnx file. Parser should default to all axes.
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
0
,
1
,
2
,
3
}}}),
abs_ins
);
auto
sq_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
,
1
,
2
,
3
}}}),
sum_ins
);
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_noaxes_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
reducel2_test
)
{
migraphx
::
program
p
;
...
...
@@ -4133,6 +4766,24 @@ TEST_CASE(reducemax_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducemax_dyn_test
)
{
// input shape with 4 dynamic dimensions
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
2
}}}),
l0
);
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
r0
);
mm
->
add_return
({
r1
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducemax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducemean_test
)
{
migraphx
::
program
p
;
...
...
@@ -5080,6 +5731,24 @@ TEST_CASE(sinh_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
sinh_dynamic_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
::
dynamic_dimension
dd
{
1
,
10
,
0
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
dyn_dims
;
dyn_dims
.
push_back
(
dd
);
auto
input
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
dyn_dims
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"sinh"
),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
dd
;
auto
prog
=
parse_onnx
(
"sinh_dynamic_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
size_float_test
)
{
migraphx
::
program
p
;
...
...
@@ -5246,6 +5915,23 @@ TEST_CASE(softmax_nonstd_input_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
softmax_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
4
,
4
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"softmax"
,
{{
"axis"
,
-
1
}}),
l0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"softmax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
softplus_test
)
{
migraphx
::
program
p
;
...
...
@@ -5361,6 +6047,31 @@ TEST_CASE(split_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
split_test_no_attribute
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
si
{
migraphx
::
shape
::
int64_type
,
{
4
},
{
1
}};
std
::
vector
<
int
>
ind
=
{
75
,
75
,
75
,
75
};
auto
input
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
300
,
15
}});
mm
->
add_literal
(
migraphx
::
literal
(
si
,
ind
));
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
75
}}}),
input
);
auto
r2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
75
}},
{
"ends"
,
{
150
}}}),
input
);
auto
r3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
150
}},
{
"ends"
,
{
225
}}}),
input
);
auto
r4
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
225
}},
{
"ends"
,
{
300
}}}),
input
);
mm
->
add_return
({
r1
,
r2
,
r3
,
r4
});
auto
prog
=
migraphx
::
parse_onnx
(
"split_test_no_attribute.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
split_test_default
)
{
migraphx
::
program
p
;
...
...
@@ -5376,6 +6087,23 @@ TEST_CASE(split_test_default)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
split_test_no_attribute_invalid_split
)
{
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"split_test_no_attribute_invalid_split.onnx"
);
}));
}
TEST_CASE
(
split_test_invalid_split
)
{
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"split_test_invalid_split.onnx"
);
}));
}
TEST_CASE
(
split_test_no_attribute_invalid_input_split
)
{
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"split_test_no_attribute_invalid_input_split.onnx"
);
}));
}
TEST_CASE
(
sqrt_test
)
{
migraphx
::
program
p
;
...
...
@@ -5402,6 +6130,29 @@ TEST_CASE(squeeze_unsqueeze_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
squeeze_unsqueeze_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
int64_t
>
squeeze_axes
{
0
,
2
,
3
,
5
};
std
::
vector
<
int64_t
>
unsqueeze_axes
{
0
,
1
,
3
,
5
};
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
1
,
4
,
0
},
{
1
,
1
,
0
},
{
1
,
1
,
0
},
{
1
,
4
,
0
},
{
1
,
1
,
0
}}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l0
);
auto
l1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
squeeze_axes
}}),
c0
);
auto
c1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l1
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
unsqueeze_axes
}}),
c1
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"squeeze_unsqueeze_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
squeeze_axes_input_test
)
{
migraphx
::
program
p
;
...
...
@@ -5685,6 +6436,24 @@ TEST_CASE(transpose_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
transpose_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
0
},
{
2
,
2
,
0
},
{
3
,
3
,
0
}}});
std
::
vector
<
int64_t
>
perm
{
0
,
3
,
1
,
2
};
auto
t0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
input
);
mm
->
add_return
({
t0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"transpose_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
topk_attrk_test
)
{
migraphx
::
program
p
;
...
...
test/onnx/pad_attr_dyn_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/pad_cnst_dyn_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/pad_dyn_reflect_error.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/reducel1_dyn_noaxes_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/reducel1_dyn_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/reducemax_dyn_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/sinh_dynamic_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/softmax_dyn_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/split_test_invalid_split.onnx
0 → 100644
View file @
870a396b
split_test_invalid_split:
5
xy1y2y3"Split*
axis*
split@@@split_test_invalid_splitZ
x
b
y1
b
y2
b
y3
B
\ No newline at end of file
test/onnx/split_test_no_attribute.onnx
0 → 100644
View file @
870a396b
split_test_no_attribute:
0split"Constant*
value*:KKKKBsplit
!
x
splity1y2y3y4"Splitsplit_test_no_attributeZ
x
b
y1
K
b
y2
K
b
y3
K
b
y4
K
B
\ No newline at end of file
test/onnx/split_test_no_attribute_invalid_input_split.onnx
0 → 100644
View file @
870a396b
+split_test_no_attribute_invalid_input_split:
/
xy1y2y3"Split*
axis*
split+split_test_no_attribute_invalid_input_splitZ
x
b
y1
b
y2
b
y3
B
\ No newline at end of file
test/onnx/split_test_no_attribute_invalid_split.onnx
0 → 100644
View file @
870a396b
%split_test_no_attribute_invalid_split:
0split"Constant*
value*:Bsplit
!
x
splity1y2y3y4"Split%split_test_no_attribute_invalid_splitZ
x
b
y1
K
b
y2
K
b
y3
K
b
y4
K
B
\ No newline at end of file
test/onnx/squeeze_unsqueeze_dyn_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/transpose_dyn_test.onnx
0 → 100644
View file @
870a396b
File added
test/onnx/verify_onnx.cpp
View file @
870a396b
...
...
@@ -30,6 +30,7 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/half.hpp>
#include "test.hpp"
TEST_CASE
(
averagepool_notset_test
)
...
...
@@ -68,6 +69,233 @@ TEST_CASE(averagepool_nt_cip_test)
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_flat_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_flat_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
10
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
1
});
std
::
vector
<
float
>
x_data
=
{
1.6524342
,
-
0.51048076
,
0.32543048
,
2.4410043
,
2.0833702
,
0.44981122
,
1.0044622
,
-
0.24006313
,
-
0.43065986
,
0.07626268
};
std
::
vector
<
float
>
scale_data
=
{
-
0.02927135
};
std
::
vector
<
float
>
bias_data
=
{
0.42347777
};
std
::
vector
<
float
>
mean_data
=
{
-
0.00449735
};
std
::
vector
<
float
>
variance_data
=
{
0.5184545
};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.35612
,
0.44404706
,
0.4100655
,
0.32406294
,
0.33860153
,
0.40500915
,
0.38246143
,
0.43305403
,
0.4408022
,
0.42019472
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_rank_2_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_rank_2_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
5
});
std
::
vector
<
float
>
x_data
=
{
1.
,
2.
,
3.
,
4.
,
5.
,
6.
,
7.
,
8.
,
9.
,
10.
};
std
::
vector
<
float
>
scale_data
(
5
,
1.
);
std
::
vector
<
float
>
bias_data
(
5
,
0.
);
std
::
vector
<
float
>
mean_data
=
{
1.
,
2.
,
1.
,
2.
,
1.
};
std
::
vector
<
float
>
variance_data
(
5
,
0.5
);
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.
,
0.
,
2.8284243
,
2.8284243
,
5.65684859
,
7.07106074
,
7.07106074
,
9.89948504
,
9.89948504
,
12.72790933
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_1d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_1d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
3
});
std
::
vector
<
float
>
tmp
=
{
1.652
,
-
0.5103
,
0.3254
,
2.441
,
2.084
,
0.4497
,
1.005
,
-
0.2401
,
-
0.4307
,
0.07623
,
-
0.02927
,
0.4236
,
-
0.004498
,
-
0.4282
,
-
0.5527
,
0.02205
,
-
1.472
,
-
1.7295
,
0.796
,
0.9507
,
0.2312
,
0.664
,
-
0.06964
,
1.035
};
std
::
vector
<
migraphx
::
half
>
x_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
std
::
vector
<
float
>
scale_data
=
{
-
1.336926
,
-
1.0679098
,
0.10368501
};
std
::
vector
<
float
>
bias_data
=
{
0.20240043
,
-
0.70175606
,
-
0.8859727
};
std
::
vector
<
float
>
mean_data
=
{
0.30854642
,
-
0.36574763
,
-
0.9463552
};
std
::
vector
<
float
>
variance_data
=
{
0.43428132
,
0.97773486
,
0.30332062
};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
-
2.523
,
1.863
,
0.1681
,
-
4.125
,
-
3.348
,
-
1.582
,
-
2.182
,
-
0.8374
,
-
0.789
,
-
0.6934
,
-
0.7134
,
-
0.628
,
0.8374
,
1.697
,
1.949
,
0.7837
,
0.4927
,
0.771
,
-
1.956
,
-
2.123
,
-
0.664
,
-
0.583
,
-
0.7207
,
-
0.5127
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_2d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_2d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
3
});
std
::
vector
<
float
>
x_data
=
{
1.6524342
,
-
0.51048076
,
0.32543048
,
2.4410043
,
2.0833702
,
0.44981122
,
1.0044622
,
-
0.24006313
,
-
0.43065986
,
0.07626268
,
-
0.02927135
,
0.42347777
,
-
0.00449735
,
-
0.4281568
,
-
0.5527635
,
0.02204161
,
-
1.4719028
,
-
1.7298799
,
0.79596406
,
0.9505461
,
0.23115851
,
0.6639593
,
-
0.06963254
,
1.0348768
,
-
1.336926
,
-
1.0679098
,
0.10368501
,
0.20240043
,
-
0.70175606
,
-
0.8859727
,
0.30854642
,
-
0.36574763
,
-
0.9463552
,
0.9476916
,
0.37686515
,
-
0.05184272
,
-
0.7151244
,
-
0.37341377
,
0.59440356
,
0.10051094
,
-
0.20755945
,
0.9098465
,
1.1664004
,
1.4075205
,
-
1.1522529
,
-
0.34607422
,
0.32027543
,
-
0.6885485
,
0.5404544
,
0.10012514
,
0.8767704
,
1.0032021
,
-
1.2755303
,
0.23577735
,
0.74239916
,
1.0146079
,
0.60875916
,
-
0.29163074
,
1.4872868
,
0.20466477
,
-
0.26367408
,
-
0.56394804
,
-
0.56043875
,
0.7763664
,
-
0.9626441
,
0.29653943
,
-
3.2231965
,
0.03322164
,
0.03402911
,
0.77308357
,
-
0.0654009
,
-
0.30463725
,
0.22182712
,
-
0.22594836
,
-
0.5807543
,
-
0.22390617
,
-
0.24484141
,
-
2.0761833
,
1.8459716
,
0.2455878
,
0.99913245
,
-
0.9266217
,
-
0.1938893
,
0.6417983
,
-
1.0880078
,
0.49565446
,
2.1584804
,
1.2276239
,
3.3091128
,
0.14217089
,
0.9425477
,
0.07578196
,
0.4067431
,
0.71984154
,
-
0.20796849
,
0.90003085
};
std
::
vector
<
float
>
scale_data
=
{
0.658487
,
0.03700604
,
2.463201
};
std
::
vector
<
float
>
bias_data
=
{
0.03497279
,
0.17080553
,
0.5636415
};
std
::
vector
<
float
>
mean_data
=
{
0.1954783
,
0.6203974
,
0.8116831
};
std
::
vector
<
float
>
variance_data
=
{
0.30558077
,
0.04536599
,
0.05461315
};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
1.77046824e+00
,
-
8.05950999e-01
,
1.89769119e-01
,
2.70979643e+00
,
2.28379035e+00
,
3.37928861e-01
,
9.98617530e-01
,
-
4.83835101e-01
,
-
7.10869908e-01
,
-
1.07034385e-01
,
-
2.32744321e-01
,
3.06560963e-01
,
-
2.03234047e-01
,
-
7.07888365e-01
,
-
8.56317282e-01
,
-
1.71621382e-01
,
-
1.92677066e-01
,
-
2.37493858e-01
,
2.01305658e-01
,
2.28160262e-01
,
1.03185430e-01
,
1.78373277e-01
,
5.09308279e-02
,
2.42810518e-01
,
-
1.69228360e-01
,
-
1.22493818e-01
,
8.10402334e-02
,
9.81894583e-02
,
-
5.88841513e-02
,
-
9.08869803e-02
,
1.16629556e-01
,
-
5.11445105e-04
,
-
1.79648399e+01
,
1.99707508e+00
,
-
4.01903248e+00
,
-
8.53731060e+00
,
-
1.55278311e+01
,
-
1.19264421e+01
,
-
1.72633123e+00
,
-
6.93161058e+00
,
-
1.01784554e+01
,
1.59821415e+00
,
4.30211163e+00
,
6.84334660e+00
,
-
2.01348572e+01
,
-
1.16383028e+01
,
-
4.61544800e+00
,
-
1.52477398e+01
,
4.45901126e-01
,
-
7.86099210e-02
,
8.46513629e-01
,
9.97116446e-01
,
-
1.71726203e+00
,
8.29761624e-02
,
6.86453462e-01
,
1.01070285e+00
,
5.27264357e-01
,
-
5.45261383e-01
,
1.57374811e+00
,
4.59154993e-02
,
-
5.11959970e-01
,
-
8.69639993e-01
,
-
8.65459919e-01
,
7.26914644e-01
,
-
1.04206637e-01
,
1.14543661e-01
,
-
4.96918678e-01
,
6.87990561e-02
,
6.89393356e-02
,
1.97330773e-01
,
5.16659655e-02
,
1.01048872e-02
,
1.01564340e-01
,
2.37750299e-02
,
-
3.78632471e-02
,
2.41298079e-02
,
2.04928555e-02
,
-
2.97655046e-01
,
3.83717060e-01
,
1.05692141e-01
,
2.53922558e+00
,
-
1.77568626e+01
,
-
1.00343809e+01
,
-
1.22682428e+00
,
-
1.94577579e+01
,
-
2.76707697e+00
,
1.47579327e+01
,
4.94736385e+00
,
2.68847847e+01
,
-
6.49254417e+00
,
1.94286156e+00
,
-
7.19223642e+00
,
-
3.70413971e+00
,
-
4.04303551e-01
,
-
1.01827660e+01
,
1.49476433e+00
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_3d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_3d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
half_type
,
{
2
,
2
,
2
,
2
,
2
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
half_type
,
{
2
});
// using migraphx::half copy conversion since it doesn't have initializer_list constructor
std
::
vector
<
float
>
tmp
=
{
5.
,
5.
,
8.
,
7.
,
3.
,
4.
,
1.
,
7.
,
5.
,
5.
,
9.
,
4.
,
7.
,
2.
,
2.
,
2.
,
6.
,
1.
,
4.
,
9.
,
2.
,
8.
,
0.
,
2.
,
1.
,
4.
,
8.
,
8.
,
3.
,
3.
,
0.
,
8.
};
std
::
vector
<
migraphx
::
half
>
x_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
1.
,
1.
};
std
::
vector
<
migraphx
::
half
>
scale_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
0.
,
0.
,
};
std
::
vector
<
migraphx
::
half
>
bias_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
-
0.75
,
0.29
};
std
::
vector
<
migraphx
::
half
>
mean_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
0.31
,
0.37
};
std
::
vector
<
migraphx
::
half
>
variance_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
10.33
,
10.33
,
15.71
,
13.914
,
6.734
,
8.53
,
3.143
,
13.914
,
7.742
,
7.742
,
14.32
,
6.098
,
11.03
,
2.81
,
2.81
,
2.81
,
12.125
,
3.143
,
8.53
,
17.52
,
4.938
,
15.71
,
1.347
,
4.938
,
1.167
,
6.098
,
12.67
,
12.67
,
4.453
,
4.453
,
-
0.4768
,
12.67
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
celu_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"celu_verify_test.onnx"
);
...
...
@@ -223,6 +451,94 @@ TEST_CASE(gather_elements)
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
gemm_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"gemm_brcst_C_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
6
}};
std
::
vector
<
float
>
a_data
=
{
0.26472837
,
0.8525864
,
0.41929847
,
0.14151508
,
0.43216065
,
0.67468566
,
0.42488748
,
0.82021785
,
0.9782456
,
0.5794279
,
0.6627283
,
0.4790396
,
0.9237051
,
0.7340607
,
0.67379653
,
0.87168175
,
0.37324256
,
0.33278653
,
0.42736676
,
0.024699844
,
0.75851107
,
0.48719302
,
0.5834426
,
0.6938476
,
0.43747696
,
0.24054702
,
0.26912406
,
0.6760658
,
0.5419149
,
0.89949054
};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
7
}};
std
::
vector
<
float
>
b_data
=
{
0.65727437
,
0.54262096
,
0.14126152
,
0.8994123
,
0.21831702
,
0.81191784
,
0.9371278
,
0.3438551
,
0.7121373
,
0.90316695
,
0.26614252
,
0.80144906
,
0.80301756
,
0.49930334
,
0.0719704
,
0.63484156
,
0.7343097
,
0.32130218
,
0.7094916
,
0.6116475
,
0.74144083
,
0.021210382
,
0.38724765
,
0.44830495
,
0.62347615
,
0.022489505
,
0.23316588
,
0.76540905
,
0.895689
,
0.81540287
,
0.223875
,
0.9275573
,
0.4621397
,
0.70785195
,
0.5658555
};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
float_type
,
{
6
,
1
}};
std
::
vector
<
float
>
c_data
=
{
0.07358502
,
0.13792239
,
0.8574055
,
0.40553397
,
0.38205826
,
0.62062204
};
migraphx
::
parameter_map
params
;
params
[
"A"
]
=
migraphx
::
argument
(
a_shape
,
a_data
.
data
());
params
[
"B"
]
=
migraphx
::
argument
(
b_shape
,
b_data
.
data
());
params
[
"C"
]
=
migraphx
::
argument
(
c_shape
,
c_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.45261115
,
0.83629227
,
0.7533463
,
0.7189715
,
0.69160205
,
0.824082
,
0.9187499
,
0.6659525
,
0.96956736
,
0.84293026
,
0.8400868
,
0.84835225
,
1.0982862
,
1.0642393
,
1.1447254
,
1.6184721
,
1.6048342
,
1.4741788
,
1.4334437
,
1.638659
,
1.7428316
,
0.8098607
,
1.2157929
,
1.1010075
,
1.0706307
,
1.0429881
,
1.1771785
,
1.2362702
,
0.8239243
,
1.1112559
,
0.9639262
,
1.0813537
,
0.8825792
,
1.121141
,
1.1885703
,
1.2227502
,
1.4568202
,
1.1388762
,
1.55058
,
1.0958102
,
1.4637487
,
1.5756242
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
gemm_half_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"gemm_half_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
half_type
,
{
8
,
6
}};
std
::
vector
tmp
=
{
0.2646
,
0.8525
,
0.4192
,
0.1415
,
0.4321
,
0.675
,
0.4248
,
0.8203
,
0.978
,
0.5796
,
0.6626
,
0.479
,
0.924
,
0.734
,
0.674
,
0.8716
,
0.3733
,
0.3328
,
0.4272
,
0.0247
,
0.7583
,
0.4873
,
0.5835
,
0.694
,
0.4375
,
0.2406
,
0.269
,
0.6763
,
0.542
,
0.8994
,
0.657
,
0.5425
,
0.1412
,
0.8994
,
0.2183
,
0.812
,
0.937
,
0.3438
,
0.712
,
0.9033
,
0.266
,
0.8013
,
0.803
,
0.4993
,
0.07196
,
0.635
,
0.7344
,
0.3213
};
std
::
vector
<
migraphx
::
half
>
a_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
half_type
,
{
8
,
7
}};
tmp
=
{
0.7095
,
0.612
,
0.741
,
0.02121
,
0.3872
,
0.4482
,
0.6235
,
0.02249
,
0.2332
,
0.7656
,
0.8955
,
0.8154
,
0.2239
,
0.9277
,
0.4622
,
0.708
,
0.566
,
0.0736
,
0.138
,
0.8574
,
0.4055
,
0.382
,
0.6206
,
0.424
,
0.3674
,
0.435
,
0.998
,
0.3594
,
0.701
,
0.6216
,
0.01826
,
0.6313
,
0.514
,
0.1095
,
0.3203
,
0.01636
,
0.537
,
0.01952
,
0.4502
,
0.8965
,
0.5415
,
0.7456
,
0.793
,
0.756
,
0.9
,
0.5264
,
0.05368
,
0.4214
,
0.276
,
0.1517
,
0.08453
,
0.83
,
0.417
,
0.1682
,
0.845
,
0.1729
};
std
::
vector
<
migraphx
::
half
>
b_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
half_type
,
{
6
,
1
}};
tmp
=
{
0.10846
,
0.672
,
0.527
,
0.94
,
0.429
,
0.2291
};
std
::
vector
<
migraphx
::
half
>
c_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
parameter_map
params
;
params
[
"A"
]
=
migraphx
::
argument
(
a_shape
,
a_data
.
data
());
params
[
"B"
]
=
migraphx
::
argument
(
b_shape
,
b_data
.
data
());
params
[
"C"
]
=
migraphx
::
argument
(
c_shape
,
c_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
1.071
,
1.378
,
1.465
,
1.093
,
0.968
,
1.542
,
1.145
,
1.287
,
1.533
,
1.75
,
1.338
,
1.449
,
1.592
,
1.668
,
1.265
,
1.531
,
1.656
,
1.348
,
1.2705
,
1.525
,
1.479
,
1.754
,
2.143
,
2.062
,
1.921
,
1.836
,
2.203
,
1.952
,
1.055
,
1.225
,
1.418
,
1.209
,
1.155
,
1.42
,
1.234
,
1.302
,
1.593
,
1.368
,
1.289
,
1.327
,
1.451
,
1.394
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
greaterorequal_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"greaterorequal_test.onnx"
);
...
...
test/op_shape_test.cpp
View file @
870a396b
...
...
@@ -81,14 +81,70 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function"
);
}
TEST_CASE
(
batch_norm_inference_shape
)
TEST_CASE
(
argmax_axis0
)
{
const
size_t
channels
=
3
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
channels
,
3
,
3
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
}};
expect_shape
(
s
,
migraphx
::
make_op
(
"batch_norm_inference"
),
s
,
vars
,
vars
,
vars
,
vars
);
throws_shape
(
migraphx
::
make_op
(
"batch_norm_inference"
),
s
);
throws_shape
(
migraphx
::
make_op
(
"batch_norm_inference"
),
s
,
vars
,
vars
,
vars
,
vars
,
vars
);
migraphx
::
shape
input
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
,
5
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{
1
,
3
,
4
,
5
}},
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
0
}}),
input
);
}
TEST_CASE
(
argmax_axis1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
,
5
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{
2
,
1
,
4
,
5
}},
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
1
}}),
input
);
}
TEST_CASE
(
argmax_axis2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{
2
,
3
,
1
,
5
}},
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
2
}}),
input
);
}
TEST_CASE
(
argmax_axis_neg
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{
2
,
3
,
4
,
1
}},
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
-
1
}}),
input
);
}
TEST_CASE
(
argmax_axis_outofbounds
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
throws_shape
(
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
4
}}),
input
);
}
TEST_CASE
(
argmax_dyn0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{{
1
,
4
,
0
},
{
1
,
1
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}},
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
1
}}),
input
);
}
TEST_CASE
(
argmax_dyn1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
6
,
0
},
{
4
,
6
,
0
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
1
,
1
,
0
},
{
4
,
6
,
0
}}},
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
2
}}),
input
);
}
TEST_CASE
(
binary_dyn_static_error
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
4
}};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
1
,
1
,
0
},
{
4
,
4
,
4
},
{
4
,
4
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
throws_shape
(
migraphx
::
make_op
(
"add"
),
a_shape
,
b_shape
);
}
TEST_CASE
(
broadcast
)
...
...
@@ -128,6 +184,69 @@ TEST_CASE(broadcast)
}
}
TEST_CASE
(
broadcast_axis_out_of_range_error
)
{
std
::
vector
<
std
::
size_t
>
lens
{
1
,
1
};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
1
},
{
0
}};
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
4
},
{
"out_lens"
,
lens
}}),
input
);
}
TEST_CASE
(
broadcast_2in_static_static
)
{
migraphx
::
shape
a_input
{
migraphx
::
shape
::
float_type
,
{
4
},
{
1
}};
migraphx
::
shape
b_input
{
migraphx
::
shape
::
float_type
,
{
4
,
4
},
{
4
,
1
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
4
},
{
1
,
0
}},
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
0
}}),
a_input
,
b_input
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
4
},
{
0
,
1
}},
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
a_input
,
b_input
);
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
2
}}),
a_input
,
b_input
);
}
TEST_CASE
(
broadcast_2in_not_matching_error
)
{
migraphx
::
shape
a_input
{
migraphx
::
shape
::
float_type
,
{
4
},
{
1
}};
migraphx
::
shape
b_input
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
a_input
,
b_input
);
}
TEST_CASE
(
broadcast_2in_dynamic_s0_error1
)
{
migraphx
::
shape
a_input
{
migraphx
::
shape
::
float_type
,
{
4
,
2
},
{
2
,
1
}};
migraphx
::
shape
b_input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
2
,
2
,
0
}}};
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
0
}}),
b_input
,
a_input
);
}
TEST_CASE
(
broadcast_2in_dynamic_s0_error2
)
{
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
dd
{{
4
,
4
,
0
}};
migraphx
::
shape
a_input
{
migraphx
::
shape
::
float_type
,
dd
};
migraphx
::
shape
b_input
{
migraphx
::
shape
::
float_type
,
{
4
,
4
},
{
4
,
1
}};
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
0
}}),
a_input
,
b_input
);
}
TEST_CASE
(
broadcast_2in_static_dyn
)
{
migraphx
::
shape
a_input
{
migraphx
::
shape
::
float_type
,
{
4
},
{
1
}};
migraphx
::
shape
b_input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
2
,
2
,
0
}}};
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
0
}}),
a_input
,
b_input
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
2
,
2
,
0
}}},
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
a_input
,
b_input
);
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
2
}}),
a_input
,
b_input
);
}
TEST_CASE
(
broadcast_2in_dyn_s0_ndim_greater_than_1_error
)
{
migraphx
::
shape
a_input
{
migraphx
::
shape
::
float_type
,
{
4
,
2
}};
migraphx
::
shape
b_input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
2
,
2
,
0
}}};
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
0
}}),
a_input
,
b_input
);
}
TEST_CASE
(
convolution_shape
)
{
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
,
1
}};
...
...
@@ -261,8 +380,7 @@ TEST_CASE(convolution_shape)
migraphx
::
make_op
(
"convolution"
,
{{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
},
{
"use_dynamic_same_auto_pad"
,
true
}}),
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
input_dyn_shape
,
weights_shape
);
...
...
@@ -275,8 +393,7 @@ TEST_CASE(convolution_shape)
migraphx
::
make_op
(
"convolution"
,
{{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
},
{
"use_dynamic_same_auto_pad"
,
true
}}),
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
input_dyn_shape
,
weights_shape
);
...
...
@@ -290,8 +407,7 @@ TEST_CASE(convolution_shape)
migraphx
::
make_op
(
"convolution"
,
{{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
},
{
"use_dynamic_same_auto_pad"
,
true
}}),
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
}}),
input_dyn_shape
,
weights_shape
);
}
...
...
@@ -307,6 +423,12 @@ TEST_CASE(contiguous_shape)
expect_shape
(
single
,
migraphx
::
make_op
(
"contiguous"
),
single
);
}
TEST_CASE
(
contiguous_dyn_shape
)
{
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
2
}}};
expect_shape
(
s0
,
migraphx
::
make_op
(
"contiguous"
),
s0
);
}
TEST_CASE
(
contiguous_shape_scalar
)
{
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
};
...
...
@@ -345,6 +467,199 @@ TEST_CASE(deconvolution_shape)
weights_3d
);
}
TEST_CASE
(
dot_ndim_error0
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_ndim_error1
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
2
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_ndim_error2
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_ndim_error3
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
6
,
5
,
4
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_ndim_error4
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
5
,
7
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_mismatch_inner_error0
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
10
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_mismatch_inner_error1
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
6
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_mismatch_inner_error2
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_mismatch_inner_error3
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
5
,
7
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_mismatch_outer_error
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
6
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
2
,
5
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_2D_test0
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
8
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_2D_test1
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
4
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_2D_test2
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
8
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_2D_test3
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
1
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
1
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_3D_test0
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
5
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
8
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_3D_test_1
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
6
,
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
6
,
5
,
4
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
6
,
1
,
4
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_3D_test2
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
5
,
7
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
7
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_4D_test
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
6
,
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
6
,
5
,
4
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
6
,
1
,
4
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_dyn_static_test0
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
}}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
8
,
8
,
0
}}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_dyn_static_mismatch_error
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_dyn_dyn_test0
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
}}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{{
5
,
5
,
0
},
{
6
,
8
,
8
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
6
,
8
,
8
}}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_dyn_dyn_test1
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
5
,
5
}}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{{
4
,
5
,
5
},
{
6
,
8
,
8
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
6
,
8
,
8
}}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_dyn_mismatch_test0
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
5
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
dot_dyn_mismatch_test1
)
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
5
,
5
,
0
},
{
2
,
5
,
0
}}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
TEST_CASE
(
flatten_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
4
,
6
,
8
}};
...
...
@@ -373,6 +688,62 @@ TEST_CASE(flatten_shape)
throws_shape
(
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
-
5
}}),
input
);
}
TEST_CASE
(
flatten_dyn_axis0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
6
,
6
,
0
},
{
8
,
8
,
0
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
192
,
768
,
0
}}},
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
0
}}),
input
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
192
,
768
,
0
}}},
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
-
4
}}),
input
);
}
TEST_CASE
(
flatten_dyn_axis1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
2
},
{
4
,
4
,
0
},
{
4
,
6
,
5
},
{
4
,
6
,
5
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
2
},
{
4
*
4
*
4
,
4
*
6
*
6
,
0
}}},
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
1
}}),
input
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
2
},
{
4
*
4
*
4
,
4
*
6
*
6
,
0
}}},
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
-
3
}}),
input
);
}
TEST_CASE
(
flatten_dyn_axis2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
2
},
{
4
,
4
,
0
},
{
4
,
6
,
5
},
{
4
,
6
,
5
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
*
4
,
2
*
4
,
0
},
{
4
*
4
,
6
*
6
,
5
*
5
}}},
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
2
}}),
input
);
}
TEST_CASE
(
flatten_dyn_axis3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
6
,
6
,
0
},
{
8
,
8
,
0
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
*
4
*
6
,
4
*
4
*
6
,
0
},
{
8
,
8
,
0
}}},
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
3
}}),
input
);
}
TEST_CASE
(
flatten_dyn_axis4
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
6
,
6
,
0
},
{
8
,
8
,
0
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
*
4
*
6
*
8
,
4
*
4
*
6
*
8
,
0
},
{
1
,
1
,
0
}}},
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
4
}}),
input
);
}
TEST_CASE
(
gather
)
{
{
...
...
@@ -460,46 +831,6 @@ TEST_CASE(gather)
}
}
// 3 input arguments
TEST_CASE
(
gemm
)
{
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
10
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
6
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
8
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
5
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
8
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
6
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
2
,
5
,
8
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
}
TEST_CASE
(
get_tuple_elem_test
)
{
migraphx
::
shape
s0
{
migraphx
::
shape
::
bool_type
,
{
1
,
1
}};
...
...
@@ -926,130 +1257,30 @@ TEST_CASE(lstm)
std
::
size_t
seq_len
=
2
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
2
;
float
clip
=
0.0
f
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
3
*
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
3
*
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
6
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
throws_shape
(
migraphx
::
make_op
(
"lstm"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
forward
)},
{
"clip"
,
clip
}}),
in_shape
,
w_shape
,
r_shape
,
b_shape
,
ih_shape
);
}
}
// 2 inputs arguments
TEST_CASE
(
matmul
)
{
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
2
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
4
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
6
,
5
,
4
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
6
,
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
6
,
5
,
4
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
6
,
1
,
4
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
6
,
1
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
6
,
5
,
4
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
6
,
1
,
4
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
8
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
1
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
1
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
1
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
5
,
7
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
7
}},
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
5
,
7
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
}
std
::
size_t
num_dirct
=
2
;
float
clip
=
0.0
f
;
{
migraphx
::
shape
s_m1
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
4
,
5
}};
migraphx
::
shape
s_m2
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
5
,
7
}};
throws_shape
(
migraphx
::
make_op
(
"dot"
),
s_m1
,
s_m2
);
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
3
*
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
3
*
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
6
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
throws_shape
(
migraphx
::
make_op
(
"lstm"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
forward
)},
{
"clip"
,
clip
}}),
in_shape
,
w_shape
,
r_shape
,
b_shape
,
ih_shape
);
}
}
...
...
@@ -1127,6 +1358,213 @@ TEST_CASE(multibroadcast)
}
}
TEST_CASE
(
multibroadcast_2in_static_dyn0
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
4
}};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
1
,
4
,
0
},
{
4
,
4
,
4
},
{
4
,
4
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
4
,
4
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
4
,
4
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_dyn1
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
6
}};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
8
,
8
,
0
},
{
6
,
6
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
8
,
8
,
0
},
{
6
,
6
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
8
,
8
,
0
},
{
6
,
6
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_dyn2
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
6
}};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
8
,
8
,
0
},
{
6
,
6
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
8
,
8
,
0
},
{
6
,
6
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
migraphx
::
to_value
(
b
)}}),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
8
,
8
,
0
},
{
6
,
6
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
migraphx
::
to_value
(
b
)}}),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_dyn_error0
)
{
// doesn't match on first dimension
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
1
,
3
,
0
},
{
6
,
6
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_dyn_error1
)
{
// doesn't match on first dimension
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
1
,
4
,
0
},
{
6
,
6
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_dyn_error2
)
{
// doesn't match on first dimension
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
1
,
2
,
0
},
{
6
,
6
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_dyn_dyn0
)
{
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
a
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
a
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
2
,
4
,
2
},
{
2
,
4
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_dyn_dyn1
)
{
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
a
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
a
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
2
,
4
,
2
},
{
2
,
4
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
migraphx
::
to_value
(
a
)}}),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}}},
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
migraphx
::
to_value
(
a
)}}),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_dyn_dyn_error0
)
{
// max doesn't match on second dimension of a
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
a
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
a
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
2
,
5
,
2
},
{
2
,
4
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_dyn_dyn_error1
)
{
// opt doesn't match on second dimension of a
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
a
{{
1
,
4
,
0
},
{
2
,
4
,
2
},
{
2
,
4
,
0
}};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
a
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
2
,
4
,
3
},
{
2
,
4
,
0
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_static0
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_static1
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
8
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
8
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
8
},
{
0
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
8
},
{
8
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_static2
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
8
},
{
0
,
0
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
8
},
{
4
,
1
,
0
}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_static3
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
1
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
},
{
16
,
4
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
},
{
0
,
1
,
0
}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_static4
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
1
,
4
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
1
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
},
{
4
,
0
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
},
{
0
,
1
,
0
}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_2in_static_static_error0
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
}};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
);
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multinomial
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
...
...
@@ -1278,16 +1716,108 @@ TEST_CASE(nms_shape)
score_thres_s
);
}
TEST_CASE
(
pooling_shape
)
TEST_CASE
(
pad_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
5
,
5
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}}}),
input
);
}
TEST_CASE
(
pad_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
6
,
6
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
0
,
2
,
2
,
0
,
0
,
1
,
1
}}}),
input
);
}
TEST_CASE
(
pad_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
2
},
{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
3
,
5
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
2
},
{
3
,
3
,
0
},
{
5
,
7
,
0
},
{
5
,
7
,
0
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}}}),
input
);
}
TEST_CASE
(
pad_dyn_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
2
},
{
3
,
3
,
0
},
{
3
,
5
,
5
},
{
3
,
5
,
5
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
2
},
{
3
,
3
,
0
},
{
5
,
7
,
7
},
{
5
,
7
,
7
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}}}),
input
);
}
TEST_CASE
(
pooling_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
throws_shape
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
1
}},
{
"stride"
,
{
0
}},
{
"lengths"
,
{
1
}}}),
input
);
}
TEST_CASE
(
pooling_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
TEST_CASE
(
pooling_shape2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
2
,
2
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
pooling_shape3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
2
,
2
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
3
,
3
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
throws_shape
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
1
}},
{
"stride"
,
{
0
}},
{
"lengths"
,
{
1
}}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
0
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
...
...
@@ -1295,9 +1825,15 @@ TEST_CASE(pooling_shape)
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
migraphx
::
shape
output1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
2
,
2
}};
expect_shape
(
output1
,
TEST_CASE
(
pooling_dyn_shape2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
},
{
2
,
2
,
2
},
{
2
,
2
,
0
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
...
...
@@ -1307,6 +1843,37 @@ TEST_CASE(pooling_shape)
input
);
}
TEST_CASE
(
pooling_dyn_shape3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
12
,
8
},
{
4
,
12
,
8
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
2
,
4
,
3
},
{
2
,
4
,
3
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape4
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
12
,
8
},
{
4
,
12
,
8
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
3
,
6
,
4
},
{
3
,
6
,
4
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
2
,
2
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
3
,
3
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
prefix_scan_sum
)
{
{
...
...
@@ -1411,9 +1978,51 @@ void test_reduce_ops()
}
}
// dynamic shape
template
<
class
T
>
void
test_dyn_reduce_ops
()
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
2
,
3
,
3
},
{
1
,
1
,
0
}})},
T
{{
-
1
}},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
1
,
1
,
0
},
{
2
,
4
,
4
}})},
T
{{
0
}},
input
);
}
{
// Empty axis argument reduces all axes
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
1
,
1
,
0
},
{
1
,
1
,
0
}})},
T
{{}},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
throws_shape
(
T
{{
4
}},
input
);
}
}
TEST_CASE
(
reduce_max
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_max
>
();
}
TEST_CASE
(
reduce_mean
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_mean
>
();
}
TEST_CASE
(
reduce_prod
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_prod
>
();
}
TEST_CASE
(
reduce_sum
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_sum
>
();
}
TEST_CASE
(
reduce_max_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_max
>
();
}
TEST_CASE
(
reduce_mean_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_mean
>
();
}
TEST_CASE
(
reduce_prod_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_prod
>
();
}
TEST_CASE
(
reduce_sum_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_sum
>
();
}
TEST_CASE
(
reshape_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
24
,
1
,
1
,
1
}};
...
...
@@ -1449,6 +2058,55 @@ TEST_CASE(reshape_shape)
}
}
TEST_CASE
(
reshape_dyn_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
24
,
24
,
0
},
{
1
,
1
,
0
},
{
1
,
1
,
0
}}};
for
(
auto
&&
new_shape
:
std
::
vector
<
std
::
vector
<
int64_t
>>
{
{
-
1
,
1
,
1
,
24
},
{
0
,
8
,
3
,
1
},
{
-
1
,
3
,
4
,
2
},
{
0
,
2
,
4
,
3
}})
{
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
out_dyn_dims
{};
for
(
std
::
size_t
i
=
0
;
i
<
new_shape
.
size
();
++
i
)
{
if
(
new_shape
[
i
]
==
0
or
new_shape
[
i
]
==
-
1
)
{
out_dyn_dims
.
push_back
(
input
.
dyn_dims
().
at
(
i
));
}
else
{
std
::
size_t
d
=
new_shape
[
i
];
out_dyn_dims
.
push_back
({
d
,
d
,
0
});
}
}
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
out_dyn_dims
};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
new_shape
}}),
input
);
}
}
TEST_CASE
(
reshape_multiple_non_fixed_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
24
,
24
,
0
},
{
10
,
20
,
0
},
{
1
,
1
,
0
}}};
std
::
vector
<
int64_t
>
new_shape
=
{
0
,
1
,
0
,
24
};
throws_shape
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
new_shape
}}),
input
);
}
TEST_CASE
(
reshape_fixed_ele_not_matching_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
24
,
24
,
0
},
{
10
,
10
,
0
},
{
1
,
1
,
0
}}};
std
::
vector
<
int64_t
>
new_shape
=
{
0
,
1
,
5
,
24
};
throws_shape
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
new_shape
}}),
input
);
}
TEST_CASE
(
reshape_non_fixed_not_matching_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
24
,
24
,
0
},
{
1
,
1
,
0
},
{
1
,
1
,
0
}}};
std
::
vector
<
int64_t
>
new_shape
=
{
2
,
1
,
1
,
24
};
throws_shape
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
new_shape
}}),
input
);
}
TEST_CASE
(
rnn
)
{
{
...
...
@@ -1649,6 +2307,20 @@ TEST_CASE(slice_shape)
TEST_CASE
(
softmax
)
{
test_softmax_variations
<
migraphx
::
op
::
softmax
>
();
}
TEST_CASE
(
softmax_dyn0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}};
expect_shape
(
input
,
migraphx
::
make_op
(
"softmax"
,
{{
"axis"
,
0
}}),
input
);
}
TEST_CASE
(
softmax_dyn1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
4
,
6
,
0
},
{
5
,
8
,
6
}}};
expect_shape
(
input
,
migraphx
::
make_op
(
"softmax"
,
{{
"axis"
,
0
}}),
input
);
}
TEST_CASE
(
test_argmax
)
{
{
...
...
@@ -1771,6 +2443,30 @@ TEST_CASE(test_squeeze_all)
expect_shape
(
s2
,
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
s1
);
}
TEST_CASE
(
test_squeeze_dyn
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
}}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
3
,
3
,
0
}}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
3
}}}),
s1
);
migraphx
::
shape
s3
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
3
,
3
,
0
}}};
expect_shape
(
s3
,
migraphx
::
make_op
(
"squeeze"
),
s1
);
throws_shape
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
s1
);
}
TEST_CASE
(
test_squeeze_dyn_neg_axes
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
}}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
3
,
3
,
0
}}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
-
2
}}}),
s1
);
migraphx
::
shape
s3
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
3
,
3
,
0
}}};
expect_shape
(
s3
,
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
-
2
,
-
4
}}}),
s1
);
}
TEST_CASE
(
test_squeeze_transpose
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
},
{
4
,
1
,
4
}};
...
...
@@ -1812,6 +2508,30 @@ TEST_CASE(test_unsqueeze)
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_dyn
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
3
},
{
2
,
5
,
0
},
{
3
,
3
,
0
}}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
3
},
{
2
,
5
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
}}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
migraphx
::
shape
s3
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
3
},
{
2
,
5
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
1
,
1
,
0
}}};
expect_shape
(
s3
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
}}}),
s1
);
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_dyn_neg_axes
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
3
},
{
2
,
5
,
0
},
{
3
,
3
,
0
}}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
3
},
{
2
,
5
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
}}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
2
}}}),
s1
);
migraphx
::
shape
s3
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
3
},
{
2
,
5
,
0
},
{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
1
,
1
,
0
}}};
expect_shape
(
s3
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
1
,
-
3
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
...
...
@@ -1843,13 +2563,27 @@ TEST_CASE(test_unsqueeze_mismatch_step_axis)
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
,
3
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_negative_axis
)
TEST_CASE
(
test_unsqueeze_negative_axis
1
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
1
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_negative_axis2
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
1
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_negative_axis3
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
1
,
5
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
3
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_scalar
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
1
},
{
0
}};
...
...
@@ -1955,6 +2689,28 @@ TEST_CASE(transpose_shape)
throws_shape
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
2
}}}),
input
);
}
TEST_CASE
(
transpose_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
0
},
{
1
,
4
,
0
}}};
expect_shape
(
input
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
}}}),
input
);
expect_shape
(
output
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
input
);
}
TEST_CASE
(
transpose_dyn_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
4
,
4
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
4
,
4
,
0
},
{
1
,
4
,
0
}}};
expect_shape
(
input
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
2
}}}),
input
);
expect_shape
(
output
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
2
,
1
,
0
}}}),
input
);
}
TEST_CASE
(
transpose_axes_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
throws_shape
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
}}}),
input
);
}
TEST_CASE
(
step_test
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
}};
...
...
test/operators.cpp
View file @
870a396b
...
...
@@ -29,7 +29,6 @@
#include <migraphx/module.hpp>
#include <sstream>
#include <string>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
...
...
test/py/CMakeLists.txt
View file @
870a396b
...
...
@@ -56,4 +56,5 @@ add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test
(
gpu test_gpu.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
array test_array.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
backend onnx_backend_test.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
gpu_async test_gpu_async.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
endif
()
test/py/onnx_backend_test.py
View file @
870a396b
...
...
@@ -94,6 +94,16 @@ def disabled_tests_onnx_1_8_1(backend_test):
backend_test
.
exclude
(
r
'test_unsqueeze_unsorted_axes_cpu'
)
def
disabled_tests_onnx_1_10_0
(
backend_test
):
# unsupported shape attributes
backend_test
.
exclude
(
r
'test_shape_end_1_cpu'
)
backend_test
.
exclude
(
r
'test_shape_end_negative_1_cpu'
)
backend_test
.
exclude
(
r
'test_shape_start_1_cpu'
)
backend_test
.
exclude
(
r
'test_shape_start_1_end_2_cpu'
)
backend_test
.
exclude
(
r
'test_shape_start_1_end_negative_1_cpu'
)
backend_test
.
exclude
(
r
'test_shape_start_negative_1_cpu'
)
def
create_backend_test
(
testname
=
None
,
target_device
=
None
):
if
target_device
is
not
None
:
c2
.
set_device
(
target_device
)
...
...
@@ -138,6 +148,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test
.
include
(
r
'.*test_eyelike.*'
)
backend_test
.
include
(
r
'.*test_flatten.*'
)
backend_test
.
include
(
r
'.*test_floor.*'
)
backend_test
.
include
(
r
'.*test_fmod.*'
)
backend_test
.
include
(
r
'.*test_gather.*'
)
backend_test
.
include
(
r
'.*test_gemm.*'
)
backend_test
.
include
(
r
'.*test_globalaveragepool.*'
)
...
...
@@ -162,6 +173,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test
.
include
(
r
'.*test_MaxPool[1-9]d.*'
)
backend_test
.
include
(
r
'.*test_mean.*'
)
backend_test
.
include
(
r
'.*test_min.*'
)
backend_test
.
include
(
r
' .*test_mod.*'
)
backend_test
.
include
(
r
'.*test_mul.*'
)
backend_test
.
include
(
r
'.*test_multinomial.*'
)
backend_test
.
include
(
r
'.*test_Multinomial.*'
)
...
...
@@ -179,6 +191,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test
.
include
(
r
'.*test_operator_max_.*'
)
backend_test
.
include
(
r
'.*test_operator_maxpool.*'
)
backend_test
.
include
(
r
'.*test_operator_min.*'
)
backend_test
.
include
(
r
'.*test_operator_mod.*'
)
backend_test
.
include
(
r
'.*test_operator_mm.*'
)
backend_test
.
include
(
r
'.*test_operator_non_float_params.*'
)
backend_test
.
include
(
r
'.*test_operator_params.*'
)
...
...
@@ -311,6 +324,9 @@ def create_backend_test(testname=None, target_device=None):
if
version
.
parse
(
onnx
.
__version__
)
>=
version
.
parse
(
"1.8.0"
):
disabled_tests_onnx_1_8_1
(
backend_test
)
if
version
.
parse
(
onnx
.
__version__
)
>=
version
.
parse
(
"1.10.0"
):
disabled_tests_onnx_1_10_0
(
backend_test
)
# import all test cases at global scope to make
# them visible to python.unittest.
...
...
Prev
1
…
17
18
19
20
21
22
23
24
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment