Commit ffcb68b4 authored by Manupa Karunaratne's avatar Manupa Karunaratne
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into mlir-attention

parents ee88607c 7604ecf5
layer_norm_4d_half_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_4d_half_testZ
x





Z
scale


Z
bias


b
y





B
\ No newline at end of file
 layer_norm_4d_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_4d_testZ
x




Z
scale

Z
bias

b
y




B
\ No newline at end of file
 )layer_norm_invalid_input_count_error_test:q

xy"LayerNormalization)layer_norm_invalid_input_count_error_testZ
x


b
y


B
\ No newline at end of file
 (layer_norm_invalid_minus_axis_error_test:
=
x
scale
biasy"LayerNormalization*
axis(layer_norm_invalid_minus_axis_error_testZ
x



Z
scale



Z
bias



b
y



B
\ No newline at end of file
layer_norm_small_eps_half_test:
4
x
scaley"LayerNormalization*
epsilon̼+layer_norm_small_eps_half_testZ
x



Z
scale


b
y



B
\ No newline at end of file
 layer_norm_without_bias_test:
!
x
scaley"LayerNormalizationlayer_norm_without_bias_testZ
x


Z
scale

b
y


B
\ No newline at end of file
 mvn_default_axes_fp16_test:
&
dataout"MeanVarianceNormalizationmvn_default_axes_fp16_testZ
data





b
out





B
\ No newline at end of file
 $mvn_default_axes_rank_too_small_test:
&
dataout"MeanVarianceNormalization$mvn_default_axes_rank_too_small_testZ
data



b
out



B
\ No newline at end of file
 mvn_default_axes_test:~
&
dataout"MeanVarianceNormalizationmvn_default_axes_testZ
data




b
out




B
\ No newline at end of file
 mvn_rank_2_fp16_test:z
3
dataout"MeanVarianceNormalization*
axes@mvn_rank_2_fp16_testZ
data



b
out



B
\ No newline at end of file
 mvn_rank_2_test:u
3
dataout"MeanVarianceNormalization*
axes@mvn_rank_2_testZ
data


b
out


B
\ No newline at end of file
...@@ -42,11 +42,14 @@ ...@@ -42,11 +42,14 @@
#include <migraphx/op/lrn.hpp> #include <migraphx/op/lrn.hpp>
#include <migraphx/op/reshape.hpp> #include <migraphx/op/reshape.hpp>
#include <migraphx/op/unknown.hpp> #include <migraphx/op/unknown.hpp>
#include <migraphx/env.hpp>
#include <migraphx/serialize.hpp> #include <migraphx/serialize.hpp>
#include "test.hpp" #include "test.hpp"
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_CK_WORKAROUNDS);
migraphx::program optimize_onnx(const std::string& name, bool run_passes = false) migraphx::program optimize_onnx(const std::string& name, bool run_passes = false)
{ {
migraphx::onnx_options options; migraphx::onnx_options options;
...@@ -181,6 +184,19 @@ TEST_CASE(argmax_test) ...@@ -181,6 +184,19 @@ TEST_CASE(argmax_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(argmax_select_last_index_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto ins = mm->add_instruction(
migraphx::make_op("argmax", {{"axis", 2}, {"select_last_index", true}}), l0);
mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {2}}}), ins);
auto prog = optimize_onnx("argmax_select_last_index_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(argmax_dyn_test) TEST_CASE(argmax_dyn_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -210,6 +226,19 @@ TEST_CASE(argmin_test) ...@@ -210,6 +226,19 @@ TEST_CASE(argmin_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(argmin_select_last_index_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto ins = mm->add_instruction(
migraphx::make_op("argmin", {{"axis", 3}, {"select_last_index", true}}), l0);
mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {3}}}), ins);
auto prog = optimize_onnx("argmin_select_last_index_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(asin_test) TEST_CASE(asin_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -362,10 +391,10 @@ TEST_CASE(averagepool_notset_test) ...@@ -362,10 +391,10 @@ TEST_CASE(averagepool_notset_test)
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling", auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}}, {"padding", {2, 2, 2, 2}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins);
...@@ -382,11 +411,11 @@ TEST_CASE(averagepool_nt_cip_test) ...@@ -382,11 +411,11 @@ TEST_CASE(averagepool_nt_cip_test)
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1}; std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input); auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction(migraphx::make_op("pooling", auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
...@@ -426,11 +455,11 @@ TEST_CASE(averagepool_sl_cip_test) ...@@ -426,11 +455,11 @@ TEST_CASE(averagepool_sl_cip_test)
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0}; std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input); auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction(migraphx::make_op("pooling", auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
...@@ -444,10 +473,10 @@ TEST_CASE(averagepool_same_upper_test) ...@@ -444,10 +473,10 @@ TEST_CASE(averagepool_same_upper_test)
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling", auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}}, {"padding", {1, 1, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins);
...@@ -1634,7 +1663,7 @@ TEST_CASE(conv_transpose_input_pads_asymm_1d_test) ...@@ -1634,7 +1663,7 @@ TEST_CASE(conv_transpose_input_pads_asymm_1d_test)
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3}}); auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3}});
auto l2 = mm->add_instruction( auto l2 = mm->add_instruction(
migraphx::make_op("convolution_backwards", migraphx::make_op("convolution_backwards",
{{"padding", {0}}, {"stride", {2}}, {"dilation", {1}}}), {{"padding", {0}}, {"stride", {2}}, {"dilation", {1}}}),
l0, l0,
l1); l1);
mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {6}}}), mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {6}}}),
...@@ -1668,7 +1697,7 @@ TEST_CASE(conv_transpose_output_padding_3d_test) ...@@ -1668,7 +1697,7 @@ TEST_CASE(conv_transpose_output_padding_3d_test)
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}}); auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}});
auto l2 = mm->add_instruction( auto l2 = mm->add_instruction(
migraphx::make_op("convolution_backwards", migraphx::make_op("convolution_backwards",
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}), {{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
l0, l0,
l1); l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2); mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2);
...@@ -1701,7 +1730,7 @@ TEST_CASE(conv_transpose_output_shape_3d_test) ...@@ -1701,7 +1730,7 @@ TEST_CASE(conv_transpose_output_shape_3d_test)
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}}); auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}});
auto l2 = mm->add_instruction( auto l2 = mm->add_instruction(
migraphx::make_op("convolution_backwards", migraphx::make_op("convolution_backwards",
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}), {{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
l0, l0,
l1); l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2); mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2);
...@@ -1996,7 +2025,7 @@ TEST_CASE(equal_test) ...@@ -1996,7 +2025,7 @@ TEST_CASE(equal_test)
auto eq = mm->add_instruction(migraphx::make_op("equal"), input1, input2); auto eq = mm->add_instruction(migraphx::make_op("equal"), input1, input2);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
eq); eq);
mm->add_return({ret}); mm->add_return({ret});
...@@ -2016,7 +2045,7 @@ TEST_CASE(equal_bool_test) ...@@ -2016,7 +2045,7 @@ TEST_CASE(equal_bool_test)
auto input2 = mm->add_parameter("x2", sb); auto input2 = mm->add_parameter("x2", sb);
auto cin1 = mm->add_instruction( auto cin1 = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
input1); input1);
auto ret = mm->add_instruction(migraphx::make_op("equal"), cin1, input2); auto ret = mm->add_instruction(migraphx::make_op("equal"), cin1, input2);
mm->add_return({ret}); mm->add_return({ret});
...@@ -2726,7 +2755,7 @@ TEST_CASE(greater_test) ...@@ -2726,7 +2755,7 @@ TEST_CASE(greater_test)
auto gr = mm->add_instruction(migraphx::make_op("greater"), input1, input2); auto gr = mm->add_instruction(migraphx::make_op("greater"), input1, input2);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
gr); gr);
mm->add_return({ret}); mm->add_return({ret});
...@@ -2745,7 +2774,7 @@ TEST_CASE(greater_bool_test) ...@@ -2745,7 +2774,7 @@ TEST_CASE(greater_bool_test)
auto input2 = mm->add_parameter("x2", sb); auto input2 = mm->add_parameter("x2", sb);
auto cin1 = mm->add_instruction( auto cin1 = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
input1); input1);
auto ret = mm->add_instruction(migraphx::make_op("greater"), cin1, input2); auto ret = mm->add_instruction(migraphx::make_op("greater"), cin1, input2);
mm->add_return({ret}); mm->add_return({ret});
...@@ -2786,6 +2815,145 @@ TEST_CASE(group_conv_test) ...@@ -2786,6 +2815,145 @@ TEST_CASE(group_conv_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
migraphx::program make_group_norm(const std::vector<int64_t>& input_dims,
const std::vector<int64_t>& scale_dims,
const std::vector<int64_t>& bias_dims,
const std::vector<int64_t>& reshape_dims,
const std::vector<int64_t>& reduce_axes,
const float eps_value = 1e-5f,
const migraphx::shape::type_t dtype = migraphx::shape::float_type)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {dtype, input_dims});
auto scale = mm->add_parameter("scale", {dtype, scale_dims});
auto bias = mm->add_parameter("bias", {dtype, bias_dims});
auto eps = mm->add_literal(migraphx::literal{dtype, {eps_value}});
auto x_reshaped =
mm->add_instruction(migraphx::make_op("reshape", {{"dims", reshape_dims}}), x);
auto mean =
mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", reduce_axes}}), x_reshaped);
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x_reshaped, mean});
auto x_sqdiff_mean = add_common_op(*mm, migraphx::make_op("sqdiff"), {x_reshaped, mean});
auto var = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", reduce_axes}}),
x_sqdiff_mean);
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), {var_eps});
auto result = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, rsqrt});
auto scale_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", reshape_dims}}), scale);
auto bias_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", reshape_dims}}), bias);
auto scaled = mm->add_instruction(migraphx::make_op("mul"), {result, scale_bcast});
auto y = mm->add_instruction(migraphx::make_op("add"), {scaled, bias_bcast});
mm->add_instruction(migraphx::make_op("reshape", {{"dims", input_dims}}), y);
return p;
}
TEST_CASE(group_norm_3d_test)
{
migraphx::program p = make_group_norm(
{1, 4, 2}, {2}, {2}, {1, 2, 2, 2}, {2, 3}, 1e-5f, migraphx::shape::float_type);
auto prog = optimize_onnx("group_norm_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_norm_3d_half_test)
{
migraphx::program p = make_group_norm(
{1, 4, 2}, {2}, {2}, {1, 2, 2, 2}, {2, 3}, 1e-5f, migraphx::shape::half_type);
auto prog = optimize_onnx("group_norm_3d_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_norm_4d_test)
{
migraphx::program p = make_group_norm(
{1, 4, 3, 3}, {2}, {2}, {1, 2, 2, 3, 3}, {2, 3, 4}, 1e-5f, migraphx::shape::float_type);
auto prog = optimize_onnx("group_norm_4d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_norm_4d_half_test)
{
migraphx::program p = make_group_norm(
{1, 4, 3, 3}, {2}, {2}, {1, 2, 2, 3, 3}, {2, 3, 4}, 1e-5f, migraphx::shape::half_type);
auto prog = optimize_onnx("group_norm_4d_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_norm_5d_test)
{
migraphx::program p = make_group_norm({3, 3, 3, 3, 3},
{1},
{1},
{3, 1, 3, 3, 3, 3},
{2, 3, 4, 5},
1e-5f,
migraphx::shape::float_type);
auto prog = optimize_onnx("group_norm_5d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_norm_5d_half_test)
{
migraphx::program p = make_group_norm({3, 3, 3, 3, 3},
{1},
{1},
{3, 1, 3, 3, 3, 3},
{2, 3, 4, 5},
1e-5f,
migraphx::shape::half_type);
auto prog = optimize_onnx("group_norm_5d_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_norm_small_eps_half_test)
{
migraphx::program p = make_group_norm(
{1, 4, 2}, {2}, {2}, {1, 2, 2, 2}, {2, 3}, 1e-7f, migraphx::shape::half_type);
auto prog = optimize_onnx("group_norm_small_eps_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_norm_invalid_num_groups_error_test)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("group_norm_invalid_num_groups_error_test.onnx"); }));
}
TEST_CASE(group_norm_missing_attribute_error_test)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("group_norm_missing_attribute_error_test.onnx"); }));
}
TEST_CASE(group_norm_invalid_input_count_error_test)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("group_norm_invalid_input_count_error_test.onnx"); }));
}
TEST_CASE(group_norm_invalid_input_shape_error_test)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("group_norm_invalid_input_shape_error_test.onnx"); }));
}
TEST_CASE(group_norm_invalid_scale_shape_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("group_norm_invalid_scale_shape_test.onnx"); }));
}
TEST_CASE(group_norm_invalid_bias_shape_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("group_norm_invalid_bias_shape_test.onnx"); }));
}
TEST_CASE(hardsigmoid_default_test) TEST_CASE(hardsigmoid_default_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -3602,7 +3770,7 @@ TEST_CASE(less_test) ...@@ -3602,7 +3770,7 @@ TEST_CASE(less_test)
auto le = mm->add_instruction(migraphx::make_op("less"), input1, input2); auto le = mm->add_instruction(migraphx::make_op("less"), input1, input2);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
le); le);
mm->add_return({ret}); mm->add_return({ret});
...@@ -3621,7 +3789,7 @@ TEST_CASE(less_bool_test) ...@@ -3621,7 +3789,7 @@ TEST_CASE(less_bool_test)
auto input2 = mm->add_parameter("x2", sb); auto input2 = mm->add_parameter("x2", sb);
auto cin1 = mm->add_instruction( auto cin1 = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
input1); input1);
auto ret = mm->add_instruction(migraphx::make_op("less"), cin1, input2); auto ret = mm->add_instruction(migraphx::make_op("less"), cin1, input2);
mm->add_return({ret}); mm->add_return({ret});
...@@ -3648,6 +3816,149 @@ TEST_CASE(lessorequal_test) ...@@ -3648,6 +3816,149 @@ TEST_CASE(lessorequal_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
migraphx::program make_layer_norm(const std::vector<int64_t>& input_shape,
const std::vector<int64_t>& scale_bias_shape,
const std::vector<int64_t>& reduce_axes,
size_t skipped_axis,
bool skip_bias = false,
const float eps_value = 1e-5f,
const migraphx::shape::type_t dtype = migraphx::shape::float_type)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {dtype, input_shape});
auto scale = mm->add_parameter("scale", {dtype, scale_bias_shape});
migraphx::instruction_ref bias;
if(not skip_bias)
{
bias = mm->add_parameter("bias", {dtype, scale_bias_shape});
}
auto eps = mm->add_literal(migraphx::literal{dtype, {eps_value}});
auto mean = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", reduce_axes}}), x);
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto x_sqdiff_mean = add_common_op(*mm, migraphx::make_op("sqdiff"), {x, mean});
auto var = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", reduce_axes}}),
x_sqdiff_mean);
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), {var_eps});
auto result = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, rsqrt});
migraphx::instruction_ref scale_bcast = scale;
migraphx::instruction_ref bias_bcast = bias;
if(skipped_axis > 0)
{
scale_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", skipped_axis}, {"out_lens", input_shape}}),
scale);
if(not skip_bias)
{
bias_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", skipped_axis}, {"out_lens", input_shape}}),
bias);
}
}
auto scaled = mm->add_instruction(migraphx::make_op("mul"), {result, scale_bcast});
if(not skip_bias)
{
mm->add_instruction(migraphx::make_op("add"), {scaled, bias_bcast});
}
return p;
}
TEST_CASE(layer_norm_2d_axis_zero_test)
{
migraphx::program p = make_layer_norm({3, 4}, {3, 4}, {0, 1}, 0);
auto prog = optimize_onnx("layer_norm_2d_axis_zero_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_2d_axis_one_test)
{
migraphx::program p = make_layer_norm({3, 4}, {4}, {1}, 1);
auto prog = optimize_onnx("layer_norm_2d_axis_one_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_2d_axis_minus_one_test)
{
migraphx::program p = make_layer_norm({3, 4}, {4}, {1}, 1);
auto prog = optimize_onnx("layer_norm_2d_axis_one_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_3d_test)
{
migraphx::program p = make_layer_norm({1, 4, 2}, {2}, {2}, 2);
auto prog = optimize_onnx("layer_norm_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_3d_half_test)
{
migraphx::program p =
make_layer_norm({1, 4, 2}, {2}, {2}, 2, false, 1e-5f, migraphx::shape::half_type);
auto prog = optimize_onnx("layer_norm_3d_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_4d_test)
{
migraphx::program p = make_layer_norm({3, 3, 3, 3}, {3}, {3}, 3);
auto prog = optimize_onnx("layer_norm_4d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_4d_half_test)
{
migraphx::program p =
make_layer_norm({3, 3, 3, 3}, {3}, {3}, 3, false, 1e-5f, migraphx::shape::half_type);
auto prog = optimize_onnx("layer_norm_4d_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_invalid_axis_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("layer_norm_invalid_axis_error_test.onnx"); }));
}
TEST_CASE(layer_norm_invalid_minus_axis_error_test)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("layer_norm_invalid_minus_axis_error_test.onnx"); }));
}
TEST_CASE(layer_norm_invalid_input_count_error_test)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("layer_norm_invalid_input_count_error_test.onnx"); }));
}
TEST_CASE(layer_norm_without_bias_test)
{
migraphx::program p = make_layer_norm({1, 2}, {2}, {1}, 1, true);
auto prog = optimize_onnx("layer_norm_without_bias_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(layer_norm_small_eps_half_test)
{
migraphx::program p =
make_layer_norm({1, 2}, {2}, {1}, 1, true, 1e-7, migraphx::shape::half_type);
auto prog = optimize_onnx("layer_norm_small_eps_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(log_test) TEST_CASE(log_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4219,6 +4530,66 @@ TEST_CASE(mean_integral_test) ...@@ -4219,6 +4530,66 @@ TEST_CASE(mean_integral_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
void mvn_n_rank_test(std::vector<int64_t> axes,
std::vector<size_t> input_shape,
const std::string& test_file)
{
using migraphx::make_op;
migraphx::program p;
auto* mm = p.get_main_module();
auto data = mm->add_parameter("data", {migraphx::shape::float_type, std::move(input_shape)});
auto data_mean = mm->add_instruction(make_op("reduce_mean", {{"axes", axes}}), data);
auto data_mean_squared = add_common_op(*mm, make_op("mul"), {data_mean, data_mean});
auto data_squared = add_common_op(*mm, make_op("mul"), {data, data});
auto data_squared_mean =
mm->add_instruction(make_op("reduce_mean", {{"axes", axes}}), data_squared);
auto mean_sub = add_common_op(*mm, make_op("sub"), {data_squared_mean, data_mean_squared});
auto std = add_common_op(*mm, make_op("sqrt"), {mean_sub});
auto dividend = add_common_op(*mm, make_op("sub"), {data, data_mean});
auto epsilon = mm->add_literal({migraphx::shape::float_type, {1e-9}});
auto divisor = add_common_op(*mm, make_op("add"), {std, epsilon});
add_common_op(*mm, make_op("div"), {dividend, divisor});
auto prog = optimize_onnx(test_file);
EXPECT(p == prog);
}
TEST_CASE(mvn_default_axes_test)
{
mvn_n_rank_test({0, 2, 3}, {2, 2, 2, 2}, "mvn_default_axes_test.onnx");
}
TEST_CASE(mvn_default_axes_rank_too_small_test)
{
EXPECT(
test::throws([&] { migraphx::parse_onnx("mvn_default_axes_rank_too_small_test.onnx"); }));
}
TEST_CASE(mvn_default_axes_rank_too_big_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mvn_default_axes_rank_too_big_test.onnx"); }));
}
TEST_CASE(mvn_rank_2_test) { mvn_n_rank_test({1}, {2, 2}, "mvn_rank_2_test.onnx"); }
TEST_CASE(mvn_rank_3_test) { mvn_n_rank_test({0, 1}, {2, 2, 2}, "mvn_rank_3_test.onnx"); }
TEST_CASE(mvn_axes_rank_too_small_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mvn_axes_rank_too_small_test.onnx"); }));
}
TEST_CASE(mvn_axes_rank_too_big_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mvn_axes_rank_too_big_test.onnx"); }));
}
TEST_CASE(min_test) TEST_CASE(min_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4676,6 +5047,22 @@ TEST_CASE(pad_test) ...@@ -4676,6 +5047,22 @@ TEST_CASE(pad_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(pad_asym_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 4, 5}});
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 1, 0, 3, 0, 2, 0, 4}}}), l0);
auto prog = optimize_onnx("pad_asym_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(pad_asym_invalid_pads_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("pad_asym_invalid_pads_error_test.onnx"); }));
}
TEST_CASE(pad_3arg_test) TEST_CASE(pad_3arg_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4692,6 +5079,51 @@ TEST_CASE(pad_3arg_test) ...@@ -4692,6 +5079,51 @@ TEST_CASE(pad_3arg_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(pad_4arg_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 4, 5}});
// axes=[1,3]
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {2}}, {1, 3}});
// constant_value=1
mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {1.0f}});
// pads=[1,3,2,4]
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {4}}, {1, 3, 2, 4}});
auto r = mm->add_instruction(
migraphx::make_op("pad", {{"pads", {0, 1, 0, 3, 0, 2, 0, 4}}, {"value", 1.0f}}), l0);
mm->add_return({r});
auto prog = migraphx::parse_onnx("pad_4arg_axes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(pad_4arg_invalid_axes_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("pad_4arg_invalid_axes_error_test.onnx"); }));
}
TEST_CASE(pad_4arg_neg_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 4, 5}});
// axes=[-3,-1]
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {2}}, {-3, -1}});
// constant_value=1
mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {1.0f}});
// pads=[1,3,2,4]
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {4}}, {1, 3, 2, 4}});
auto r = mm->add_instruction(
migraphx::make_op("pad", {{"pads", {0, 1, 0, 3, 0, 2, 0, 4}}, {"value", 1.0f}}), l0);
mm->add_return({r});
auto prog = migraphx::parse_onnx("pad_4arg_neg_axes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(pad_attr_dyn_test) TEST_CASE(pad_attr_dyn_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4750,6 +5182,27 @@ TEST_CASE(pad_reflect_test) ...@@ -4750,6 +5182,27 @@ TEST_CASE(pad_reflect_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(pad_reflect_with_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}});
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {1}}, {1}});
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {2}}, {2, 1}});
auto l1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {0, 1}}, {"ends", {2, 2}}}), l0);
auto l2 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {0, 0}}, {"ends", {2, 1}}}), l0);
auto l3 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {0, 0}}, {"ends", {2, 1}}}), l0);
auto r = mm->add_instruction(migraphx::make_op("concat", {{"axis", 1}}), l2, l1, l0, l3);
mm->add_return({r});
auto prog = migraphx::parse_onnx("pad_reflect_with_axes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(pad_reflect_multiaxis_test) TEST_CASE(pad_reflect_multiaxis_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5116,6 +5569,31 @@ TEST_CASE(qlinearmatmul_2D_test) ...@@ -5116,6 +5569,31 @@ TEST_CASE(qlinearmatmul_2D_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
migraphx::instruction_ref insert_quantizelinear_clip(migraphx::module& m,
const migraphx::instruction_ref ins,
const migraphx::instruction_ref round,
const migraphx::shape s,
const int64_t min_quant,
const int64_t max_quant)
{
migraphx::instruction_ref min_arg;
migraphx::instruction_ref max_arg;
if(migraphx::enabled(MIGRAPHX_ENABLE_CK_WORKAROUNDS{}))
{
std::vector<int> min_data(s.elements(), min_quant);
std::vector<int> max_data(s.elements(), max_quant);
min_arg = m.add_literal(migraphx::literal(s, min_data));
max_arg = m.add_literal(migraphx::literal(s, max_data));
}
else
{
min_arg = m.add_literal(migraphx::literal{migraphx::shape{s.type()}, {min_quant}});
max_arg = m.add_literal(migraphx::literal{migraphx::shape{s.type()}, {max_quant}});
}
return migraphx::insert_common_op(m, ins, migraphx::make_op("clip"), {round, min_arg, max_arg});
}
TEST_CASE(quantizelinear_test) TEST_CASE(quantizelinear_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5124,16 +5602,10 @@ TEST_CASE(quantizelinear_test) ...@@ -5124,16 +5602,10 @@ TEST_CASE(quantizelinear_test)
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}}); auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l1_mbcast = auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1); mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast); auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div); auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape(); auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {0}}); auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255);
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {255}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_mbcast, max_mbcast);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
...@@ -5155,16 +5627,10 @@ TEST_CASE(quantizelinear_int32_test) ...@@ -5155,16 +5627,10 @@ TEST_CASE(quantizelinear_int32_test)
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l0); l0);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast); auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div); auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape(); auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {0}}); auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255);
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {255}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_mbcast, max_mbcast);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
...@@ -5191,15 +5657,9 @@ TEST_CASE(quantizelinear_zero_point_test) ...@@ -5191,15 +5657,9 @@ TEST_CASE(quantizelinear_zero_point_test)
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_mbcast); l2_mbcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast); auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
auto s = round->get_shape(); auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {-128}}); auto clip = insert_quantizelinear_clip(*mm, div, add, s, -128, 127);
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {127}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_mbcast, max_mbcast);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
...@@ -5230,15 +5690,9 @@ migraphx::program make_quantizelinear_axis_prog() ...@@ -5230,15 +5690,9 @@ migraphx::program make_quantizelinear_axis_prog()
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_bcast); l2_bcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast); auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
auto s = round->get_shape(); auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {-128}}); auto clip = insert_quantizelinear_clip(*mm, div, add, s, -128, 127);
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {127}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_mbcast, max_mbcast);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
...@@ -5463,7 +5917,7 @@ TEST_CASE(reducel1_dyn_test) ...@@ -5463,7 +5917,7 @@ TEST_CASE(reducel1_dyn_test)
// a shape with 4 dynamic dimensions // a shape with 4 dynamic dimensions
auto l0 = mm->add_parameter("x", auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type, migraphx::shape{migraphx::shape::float_type,
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}}); {{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0); auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins = auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {-2}}}), abs_ins); mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {-2}}}), abs_ins);
...@@ -5483,7 +5937,7 @@ TEST_CASE(reducel1_dyn_test) ...@@ -5483,7 +5937,7 @@ TEST_CASE(reducel1_dyn_test)
// No axes given in the onnx file. Parser should default to all axes. // No axes given in the onnx file. Parser should default to all axes.
auto l0 = mm->add_parameter("x", auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type, migraphx::shape{migraphx::shape::float_type,
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}}); {{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0); auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins = auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), abs_ins); mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), abs_ins);
...@@ -5719,6 +6173,38 @@ TEST_CASE(reshape_non_standard_test) ...@@ -5719,6 +6173,38 @@ TEST_CASE(reshape_non_standard_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(reshape_variable_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto p0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {4, 2, 3}});
auto p1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int64_type, {2}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), p1);
mm->add_instruction(migraphx::make_op("reshape"), p0, alloc);
auto prog = optimize_onnx("reshape_variable_input_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reshape_variable_input_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto p0 = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}, {3, 3}}});
auto p1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int64_type, {2}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), p1);
auto reshape = mm->add_instruction(migraphx::make_op("reshape"), p0, alloc);
mm->add_return({reshape});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("reshape_variable_input_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(resize_downsample_c_test) TEST_CASE(resize_downsample_c_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -6610,6 +7096,73 @@ TEST_CASE(shape_gather_test) ...@@ -6610,6 +7096,73 @@ TEST_CASE(shape_gather_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(shrink_hard_test)
{
migraphx::program p;
float bias = 0.0;
float lambd = 1.5;
std::vector<size_t> lens{5};
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, lens});
auto lit_bias = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {bias}});
auto lit_neg_lambd = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {-lambd}});
auto lit_lambd = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {lambd}});
auto x_plus_bias = add_common_op(*mm, migraphx::make_op("add"), {x, lit_bias});
auto x_min_bias = add_common_op(*mm, migraphx::make_op("sub"), {x, lit_bias});
auto cond1 = add_common_op(*mm, migraphx::make_op("less"), {x, lit_neg_lambd});
auto cond2_a = add_common_op(*mm, migraphx::make_op("not"), {cond1});
auto cond2_b = add_common_op(*mm, migraphx::make_op("greater"), {x, lit_lambd});
auto cond2 = add_common_op(*mm, migraphx::make_op("logical_and"), {cond2_a, cond2_b});
auto mul1 = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), cond1);
auto mul2 = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), cond2);
auto first = add_common_op(*mm, migraphx::make_op("mul"), {mul1, x_plus_bias});
auto second = add_common_op(*mm, migraphx::make_op("mul"), {mul2, x_min_bias});
add_common_op(*mm, migraphx::make_op("add"), {first, second});
auto prog = optimize_onnx("shrink_hard_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(shrink_int8_test)
{
migraphx::program p;
float bias = 1.5;
float lambd = 1.5;
std::vector<size_t> lens{3, 3};
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::int8_type, lens});
auto lit_bias = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {bias}});
auto lit_neg_lambd = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {-lambd}});
auto lit_lambd = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {lambd}});
auto x_plus_bias = add_common_op(*mm, migraphx::make_op("add"), {x, lit_bias});
auto x_min_bias = add_common_op(*mm, migraphx::make_op("sub"), {x, lit_bias});
auto cond1 = add_common_op(*mm, migraphx::make_op("less"), {x, lit_neg_lambd});
auto cond2_a = add_common_op(*mm, migraphx::make_op("not"), {cond1});
auto cond2_b = add_common_op(*mm, migraphx::make_op("greater"), {x, lit_lambd});
auto cond2 = add_common_op(*mm, migraphx::make_op("logical_and"), {cond2_a, cond2_b});
auto mul1 = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::int8_type}}), cond1);
auto mul2 = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::int8_type}}), cond2);
auto first = add_common_op(*mm, migraphx::make_op("mul"), {mul1, x_plus_bias});
auto second = add_common_op(*mm, migraphx::make_op("mul"), {mul2, x_min_bias});
auto ret = add_common_op(*mm, migraphx::make_op("add"), {first, second});
mm->add_instruction(migraphx::make_op("convert", {{"target_type", migraphx::shape::int8_type}}),
ret);
auto prog = optimize_onnx("shrink_int8_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(sign_test) TEST_CASE(sign_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -7169,7 +7722,7 @@ TEST_CASE(squeeze_unsqueeze_dyn_test) ...@@ -7169,7 +7722,7 @@ TEST_CASE(squeeze_unsqueeze_dyn_test)
std::vector<int64_t> unsqueeze_axes{0, 1, 3, 5}; std::vector<int64_t> unsqueeze_axes{0, 1, 3, 5};
auto l0 = mm->add_parameter("0", auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type, migraphx::shape{migraphx::shape::float_type,
{{1, 1}, {1, 4}, {1, 1}, {1, 1}, {1, 4}, {1, 1}}}); {{1, 1}, {1, 4}, {1, 1}, {1, 1}, {1, 4}, {1, 1}}});
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0); auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto l1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", squeeze_axes}}), c0); auto l1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", squeeze_axes}}), c0);
auto c1 = mm->add_instruction(migraphx::make_op("contiguous"), l1); auto c1 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
...@@ -7249,7 +7802,7 @@ TEST_CASE(sum_int_test) ...@@ -7249,7 +7802,7 @@ TEST_CASE(sum_int_test)
auto input2 = mm->add_parameter("2", migraphx::shape{migraphx::shape::uint32_type, {3}}); auto input2 = mm->add_parameter("2", migraphx::shape{migraphx::shape::uint32_type, {3}});
auto cin0 = mm->add_instruction( auto cin0 = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint32_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::uint32_type)}}),
input0); input0);
auto cin1 = mm->add_instruction( auto cin1 = mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
...@@ -7568,11 +8121,6 @@ TEST_CASE(transpose_gather_test) ...@@ -7568,11 +8121,6 @@ TEST_CASE(transpose_gather_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(trilu_neg_k_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("trilu_neg_k_test.onnx"); }));
}
TEST_CASE(undefined_test) TEST_CASE(undefined_test)
{ {
migraphx::program p; migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment