Unverified Commit a24ed87e authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Merge branch 'develop' into optimize_jenkinsfile

parents 6481cd69 a09dc502
 loop_test_implicit_tripcnt:

max_trip_count
keep_going_cond
bb_loop my_local_loopuser_defined_vals_loop"Loop*
body2

a
b_inmy_local"Add

a
b_in
a_sub_b_in"Sub
+
my_local
a_sub_b_in
keep_going"Greater
0
a_sub_b_in
a_sub_b_inuser_defined_vals"AddbodyZ
iteration_num

Z
keep_going_inp
 
Z
b_in

b
keep_going
 
b
a_sub_b_in

b
my_local

b
user_defined_vals

loop_test_implicit_tripcnt*:Bmax_trip_countZ
keep_going_cond
 
Z
a

Z
b

b
b_loop

b(
user_defined_vals_loop


B
\ No newline at end of file
maxpool_dilate_test:
U
xy"MaxPool*
dilations@*
kernel_shape@*
pads@@*
strides@maxpool_dilate_testZ
x



b
y



B
\ No newline at end of file
...@@ -1092,6 +1092,115 @@ TEST_CASE(lstm_forward) ...@@ -1092,6 +1092,115 @@ TEST_CASE(lstm_forward)
} }
} }
TEST_CASE(lstm_forward_layout)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {bs, sl, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, 4 * hs, is}};
migraphx::shape r_shape{migraphx::shape::float_type, {nd, 4 * hs, hs}};
migraphx::shape bias_shape{migraphx::shape::float_type, {nd, 8 * hs}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {bs}};
migraphx::shape ih_shape{migraphx::shape::float_type, {bs, nd, hs}};
migraphx::shape pph_shape{migraphx::shape::float_type, {nd, 3 * hs}};
// 8 args, hs and last output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::forward)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_output = mm->add_instruction(migraphx::make_op("rnn_last_hs_output"), out_hs);
std::vector<int64_t> perm_hid{2, 0, 1, 3};
out_hs = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm_hid}}),
out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_output);
auto prog = optimize_onnx("lstm_f_layout_hs_test.onnx");
EXPECT(p == prog);
}
// 8 args, cell output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::forward)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_cell = mm->add_instruction(migraphx::make_op("rnn_last_cell_output"), out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_cell);
auto prog = optimize_onnx("lstm_f_layout_cell_test.onnx");
EXPECT(p == prog);
}
}
// activation functions // activation functions
TEST_CASE(lstm_forward_actv_func) TEST_CASE(lstm_forward_actv_func)
{ {
...@@ -1342,6 +1451,117 @@ TEST_CASE(lstm_reverse) ...@@ -1342,6 +1451,117 @@ TEST_CASE(lstm_reverse)
} }
} }
TEST_CASE(lstm_reverse_layout)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {bs, sl, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, 4 * hs, is}};
migraphx::shape r_shape{migraphx::shape::float_type, {nd, 4 * hs, hs}};
migraphx::shape bias_shape{migraphx::shape::float_type, {nd, 8 * hs}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {bs}};
migraphx::shape ih_shape{migraphx::shape::float_type, {bs, nd, hs}};
migraphx::shape pph_shape{migraphx::shape::float_type, {nd, 3 * hs}};
// 8 args, hs output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::reverse)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
std::vector<int64_t> perm_hid{2, 0, 1, 3};
out_hs = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm_hid}}),
out_hs);
auto prog = optimize_onnx("lstm_r_layout_test.onnx");
EXPECT(p == prog);
}
// 8 args, last and cell output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::reverse)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_output = mm->add_instruction(migraphx::make_op("rnn_last_hs_output"), out_hs);
auto last_cell = mm->add_instruction(migraphx::make_op("rnn_last_cell_output"), out_hs);
last_output = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}),
last_output);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_cell);
auto prog = optimize_onnx("lstm_r_layout_hs_cell_test.onnx");
EXPECT(p == prog);
}
}
TEST_CASE(lstm_bidirectional) TEST_CASE(lstm_bidirectional)
{ {
std::size_t sl = 5; // sequence len std::size_t sl = 5; // sequence len
...@@ -1594,6 +1814,118 @@ TEST_CASE(lstm_bidirectional) ...@@ -1594,6 +1814,118 @@ TEST_CASE(lstm_bidirectional)
} }
} }
TEST_CASE(lstm_bidirectional_layout)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {bs, sl, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, 4 * hs, is}};
migraphx::shape r_shape{migraphx::shape::float_type, {nd, 4 * hs, hs}};
migraphx::shape bias_shape{migraphx::shape::float_type, {nd, 8 * hs}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {bs}};
migraphx::shape ih_shape{migraphx::shape::float_type, {bs, nd, hs}};
migraphx::shape pph_shape{migraphx::shape::float_type, {nd, 3 * hs}};
// 0 activation function
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh"),
migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::bidirectional)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_output = mm->add_instruction(migraphx::make_op("rnn_last_hs_output"), out_hs);
std::vector<int64_t> perm_hid{2, 0, 1, 3};
out_hs = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm_hid}}),
out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_output);
auto prog = optimize_onnx("lstm_bi_layout_last_test.onnx");
EXPECT(p == prog);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh"),
migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::bidirectional)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_cell = mm->add_instruction(migraphx::make_op("rnn_last_cell_output"), out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_cell);
auto prog = optimize_onnx("lstm_bi_layout_cell_test.onnx");
EXPECT(p == prog);
}
}
TEST_CASE(lstm_bi_actv_funcs) TEST_CASE(lstm_bi_actv_funcs)
{ {
std::size_t sl = 5; // sequence len std::size_t sl = 5; // sequence len
......
...@@ -296,13 +296,32 @@ TEST_CASE(averagepool_1d_test) ...@@ -296,13 +296,32 @@ TEST_CASE(averagepool_1d_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}}), {"lengths", {3}},
{"dilations", {1}}}),
l0); l0);
auto prog = optimize_onnx("averagepool_1d_test.onnx"); auto prog = optimize_onnx("averagepool_1d_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(averagepool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("averagepool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_3d_test) TEST_CASE(averagepool_3d_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -312,7 +331,8 @@ TEST_CASE(averagepool_3d_test) ...@@ -312,7 +331,8 @@ TEST_CASE(averagepool_3d_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}}, {"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}}, {"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}), {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}}}),
l0); l0);
auto prog = optimize_onnx("averagepool_3d_test.onnx"); auto prog = optimize_onnx("averagepool_3d_test.onnx");
...@@ -332,6 +352,7 @@ TEST_CASE(averagepool_dyn_test) ...@@ -332,6 +352,7 @@ TEST_CASE(averagepool_dyn_test)
{"mode", migraphx::op::pooling_mode::average}, {"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}}, {"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}}, {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {1, 1, 1, 1, 1, 1}}, {"padding", {1, 1, 1, 1, 1, 1}},
{"padding_mode", 0}, {"padding_mode", 0},
}), }),
...@@ -357,6 +378,7 @@ TEST_CASE(averagepool_dyn_autopad_test) ...@@ -357,6 +378,7 @@ TEST_CASE(averagepool_dyn_autopad_test)
{"mode", migraphx::op::pooling_mode::average}, {"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}}, {"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}}, {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {0, 0, 0, 0, 0, 0}}, {"padding", {0, 0, 0, 0, 0, 0}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper}, {"padding_mode", migraphx::op::padding_mode_t::same_upper},
}), }),
...@@ -394,7 +416,8 @@ TEST_CASE(averagepool_notset_test) ...@@ -394,7 +416,8 @@ TEST_CASE(averagepool_notset_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}}, {"padding", {2, 2, 2, 2}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins);
...@@ -415,7 +438,8 @@ TEST_CASE(averagepool_nt_cip_test) ...@@ -415,7 +438,8 @@ TEST_CASE(averagepool_nt_cip_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
...@@ -437,6 +461,7 @@ TEST_CASE(averagepool_same_lower_test) ...@@ -437,6 +461,7 @@ TEST_CASE(averagepool_same_lower_test)
{"padding", {1, 1, 1, 1}}, {"padding", {1, 1, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}, {"lengths", {2, 2}},
{"dilations", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::default_}, {"padding_mode", migraphx::op::padding_mode_t::default_},
}), }),
input); input);
...@@ -459,7 +484,8 @@ TEST_CASE(averagepool_sl_cip_test) ...@@ -459,7 +484,8 @@ TEST_CASE(averagepool_sl_cip_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
...@@ -476,7 +502,8 @@ TEST_CASE(averagepool_same_upper_test) ...@@ -476,7 +502,8 @@ TEST_CASE(averagepool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}}, {"padding", {1, 1, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins);
...@@ -1307,7 +1334,8 @@ TEST_CASE(conv_bn_relu_maxpool_test) ...@@ -1307,7 +1334,8 @@ TEST_CASE(conv_bn_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l7); l7);
auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx");
...@@ -1505,7 +1533,8 @@ TEST_CASE(conv_relu_maxpool_test) ...@@ -1505,7 +1533,8 @@ TEST_CASE(conv_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6); l6);
auto prog = optimize_onnx("conv_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_test.onnx");
...@@ -1530,7 +1559,8 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -1530,7 +1559,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6); l6);
auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}}); auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
...@@ -1546,7 +1576,8 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -1546,7 +1576,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l13); l13);
auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx");
...@@ -3413,6 +3444,82 @@ TEST_CASE(if_tuple_test) ...@@ -3413,6 +3444,82 @@ TEST_CASE(if_tuple_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(isinf_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
auto t1 = mm->add_parameter("t1", s);
auto ret = mm->add_instruction(migraphx::make_op("isinf"), t1);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isinf_neg_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto t1 = mm->add_parameter("t1", s);
auto is_inf = mm->add_instruction(migraphx::make_op("isinf"), t1);
auto zero_l = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0}});
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), zero_l);
auto is_neg = mm->add_instruction(migraphx::make_op("less"), t1, mb_zero);
if(is_neg->get_shape().type() != migraphx::shape::bool_type)
{
is_neg = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), is_neg);
}
auto ret = mm->add_instruction(migraphx::make_op("logical_and"), is_inf, is_neg);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_neg_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isinf_double_pos_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::double_type, {2, 3}};
auto t1 = mm->add_parameter("t1", s);
auto is_inf = mm->add_instruction(migraphx::make_op("isinf"), t1);
auto zero_l = mm->add_literal(migraphx::literal{migraphx::shape::double_type, {0}});
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), zero_l);
auto is_neg = mm->add_instruction(migraphx::make_op("greater"), t1, mb_zero);
if(is_neg->get_shape().type() != migraphx::shape::bool_type)
{
is_neg = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), is_neg);
}
auto ret = mm->add_instruction(migraphx::make_op("logical_and"), is_inf, is_neg);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_double_pos_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isinf_no_detect_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
mm->add_parameter("t1", s);
auto ret = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}),
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::bool_type}, {false}}));
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_no_detect_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isnan_float_test) TEST_CASE(isnan_float_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4169,6 +4276,7 @@ TEST_CASE(lppool_l1_test) ...@@ -4169,6 +4276,7 @@ TEST_CASE(lppool_l1_test)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}, {"lengths", {3}},
{"dilations", {1}},
{"lp_order", 1}}), {"lp_order", 1}}),
l0); l0);
auto prog = optimize_onnx("lppool_l1_test.onnx"); auto prog = optimize_onnx("lppool_l1_test.onnx");
...@@ -4185,6 +4293,7 @@ TEST_CASE(lppool_l2_test) ...@@ -4185,6 +4293,7 @@ TEST_CASE(lppool_l2_test)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}, {"lengths", {3}},
{"dilations", {1}},
{"lp_order", 2}}), {"lp_order", 2}}),
l0); l0);
auto prog = optimize_onnx("lppool_l2_test.onnx"); auto prog = optimize_onnx("lppool_l2_test.onnx");
...@@ -4437,7 +4546,8 @@ TEST_CASE(maxpool_notset_test) ...@@ -4437,7 +4546,8 @@ TEST_CASE(maxpool_notset_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}}, {"padding", {0, 0, 1, 1}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input); input);
auto prog = optimize_onnx("maxpool_notset_test.onnx"); auto prog = optimize_onnx("maxpool_notset_test.onnx");
...@@ -4445,6 +4555,24 @@ TEST_CASE(maxpool_notset_test) ...@@ -4445,6 +4555,24 @@ TEST_CASE(maxpool_notset_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(maxpool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("maxpool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(maxpool_same_upper_test) TEST_CASE(maxpool_same_upper_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4454,7 +4582,8 @@ TEST_CASE(maxpool_same_upper_test) ...@@ -4454,7 +4582,8 @@ TEST_CASE(maxpool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}}, {"padding", {0, 0, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input); input);
auto prog = optimize_onnx("maxpool_same_upper_test.onnx"); auto prog = optimize_onnx("maxpool_same_upper_test.onnx");
...@@ -4679,32 +4808,141 @@ TEST_CASE(multinomial_test) ...@@ -4679,32 +4808,141 @@ TEST_CASE(multinomial_test)
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
size_t sample_size = 10; size_t sample_size = 13;
float seed = 0.0f; size_t batch_size = 3;
size_t categories = 10;
float seed = 0;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}}); auto input = mm->add_parameter(
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input); "input", migraphx::shape{migraphx::shape::float_type, {batch_size, categories}});
auto mb_maxes = auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 10}}}), maxes); auto mb_maxes = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {batch_size, 10}}}), maxes);
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes); auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf); cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction( cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf); migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
std::mt19937 gen(seed); migraphx::shape s{migraphx::shape::float_type, {1}};
std::uniform_real_distribution<> dis(0.0, 1.0); std::vector<float> seed_data = {seed};
std::vector<float> rand_samples(sample_size); auto seed_input = mm->add_literal(migraphx::literal(s, seed_data));
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); }); auto rand_dummy = mm->add_literal(
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}}; migraphx::literal{migraphx::shape{migraphx::shape::float_type, {batch_size, sample_size}},
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples}); std::vector<float>(batch_size * sample_size)});
mm->add_instruction(migraphx::make_op("multinomial"), cdf, rs_lit);
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
auto prog = optimize_onnx("multinomial_test.onnx"); auto prog = optimize_onnx("multinomial_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(multinomial_dyn_test)
{
// compile-time random seed
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 100000;
size_t categories = 5;
float seed = 1.3f;
auto input = mm->add_parameter(
"input",
migraphx::shape{migraphx::shape::float_type, {{1, categories}, {categories, categories}}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
migraphx::shape s{migraphx::shape::float_type, {1}};
std::vector<float> seed_data = {seed};
auto seed_input = mm->add_literal(migraphx::literal(s, seed_data));
// dynamic input only: must calculate alloc_shape as (batch_size, sample_size)
// read the runtime input dimensions
auto dim_of = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 2}}), input);
// make an argument of (1, 0)
migraphx::shape lit_shape(migraphx::shape::int64_type, {2});
std::vector<int64_t> data1{1, 0};
auto l1 = mm->add_literal(lit_shape, data1);
auto batch_arg = mm->add_instruction(migraphx::make_op("mul"), dim_of, l1);
std::vector<int64_t> data2(2, 0);
// make an argument of (0, sample_size)
data2[1] = sample_size;
auto l2 = mm->add_literal(lit_shape, data2);
auto alloc_shape = mm->add_instruction(migraphx::make_op("add"), batch_arg, l2);
migraphx::shape compile_shape =
migraphx::shape(migraphx::shape::float_type,
{input->get_shape().dyn_dims().front(), {sample_size, sample_size}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"shape", to_value(compile_shape)}}), alloc_shape);
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, alloc);
auto ret = mm->add_instruction(
migraphx::make_op("multinomial", {{"dtype", migraphx::shape::float_type}}), cdf, randoms);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, categories};
options.print_program_on_error = true;
auto prog = migraphx::parse_onnx("multinomial_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(multinomial_autoseed_dyn_test)
{
// runtime random seed
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 12;
size_t categories = 10;
auto input = mm->add_parameter(
"input", migraphx::shape{migraphx::shape::float_type, {{1, 10}, {10, 10}}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
auto seed_input = mm->add_instruction(migraphx::make_op("random_seed"));
// dynamic input only: must calculate alloc_shape as (batch_size, sample_size)
// read the runtime input dimensions
auto dim_of = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 2}}), input);
// make an argument of (1, 0)
migraphx::shape lit_shape(migraphx::shape::int64_type, {2});
std::vector<int64_t> data1{1, 0};
auto l1 = mm->add_literal(lit_shape, data1);
auto batch_arg = mm->add_instruction(migraphx::make_op("mul"), dim_of, l1);
std::vector<int64_t> data2(2, 0);
// make an argument of (0, sample_size)
data2[1] = sample_size;
auto l2 = mm->add_literal(lit_shape, data2);
auto alloc_shape = mm->add_instruction(migraphx::make_op("add"), batch_arg, l2);
migraphx::shape compile_shape =
migraphx::shape(migraphx::shape::float_type,
{input->get_shape().dyn_dims().front(), {sample_size, sample_size}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"shape", to_value(compile_shape)}}), alloc_shape);
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, alloc);
auto ret = mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, categories};
options.print_program_on_error = true;
auto prog = migraphx::parse_onnx("multinomial_autoseed_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(multinomial_dtype_error_test) TEST_CASE(multinomial_dtype_error_test)
{ {
EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); }));
...@@ -4712,10 +4950,11 @@ TEST_CASE(multinomial_dtype_error_test) ...@@ -4712,10 +4950,11 @@ TEST_CASE(multinomial_dtype_error_test)
TEST_CASE(multinomial_generated_seed_test) TEST_CASE(multinomial_generated_seed_test)
{ {
// multinomial op. no longer generates its own randoms
auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx"); auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx");
auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx"); auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx");
EXPECT(p1 != p2); EXPECT(p1 == p2);
} }
TEST_CASE(multinomial_int64_test) TEST_CASE(multinomial_int64_test)
...@@ -4723,27 +4962,28 @@ TEST_CASE(multinomial_int64_test) ...@@ -4723,27 +4962,28 @@ TEST_CASE(multinomial_int64_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
size_t sample_size = 10; size_t sample_size = 10;
float seed = 1.0f; float seed = 1.0;
uint32_t batch_size = 1;
migraphx::shape::type_t dtype = migraphx::shape::type_t::int64_type; migraphx::shape::type_t dtype = migraphx::shape::type_t::int64_type;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}}); auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input); auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto mb_maxes =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 10}}}), maxes); auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf); cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction( cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf); migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
std::mt19937 gen(seed); migraphx::shape s{migraphx::shape::float_type, {1}};
std::uniform_real_distribution<> dis(0.0, 1.0); std::vector<float> data = {seed};
std::vector<float> rand_samples(sample_size); auto seed_input = mm->add_literal(migraphx::literal(s, data));
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples});
mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, rs_lit);
// static size
auto rand_dummy = mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {batch_size, sample_size}},
std::vector<float>(batch_size * sample_size)});
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, randoms);
auto prog = optimize_onnx("multinomial_int64_test.onnx"); auto prog = optimize_onnx("multinomial_int64_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
...@@ -5357,6 +5597,54 @@ TEST_CASE(qlinearadd_test) ...@@ -5357,6 +5597,54 @@ TEST_CASE(qlinearadd_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(qlinearaveragepool_notset_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {10}});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::int8_type, {1, 1, 5, 5}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y =
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
fp_x);
fp_y = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), fp_y);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearaveragepool_notset_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(qlinearconv_test) TEST_CASE(qlinearconv_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5457,6 +5745,46 @@ TEST_CASE(qlinearglobalavgpool_test) ...@@ -5457,6 +5745,46 @@ TEST_CASE(qlinearglobalavgpool_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(qlinearleakyrelu_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::int8_type, {64}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {10}});
auto scale_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_x);
auto z_pt_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("leaky_relu", {{"alpha", 1.1}}), fp_x);
auto scale_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_y);
auto z_pt_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearleakyrelu_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmatmul_1D_test) TEST_CASE(qlinearmatmul_1D_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5569,6 +5897,99 @@ TEST_CASE(qlinearmatmul_2D_test) ...@@ -5569,6 +5897,99 @@ TEST_CASE(qlinearmatmul_2D_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(qlinearmul_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto a = mm->add_parameter("A", {migraphx::shape::uint8_type, {64}});
auto b = mm->add_parameter("B", {migraphx::shape::uint8_type, {64}});
auto sc_a = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_a = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {0}});
auto sc_b = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_b = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {16}});
auto sc_c = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_c = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {100}});
auto scale_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_a);
auto z_pt_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_a);
auto fp_a =
mm->add_instruction(migraphx::make_op("dequantizelinear"), a, scale_a_bcast, z_pt_a_bcast);
auto scale_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_b);
auto z_pt_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_b);
auto fp_b =
mm->add_instruction(migraphx::make_op("dequantizelinear"), b, scale_b_bcast, z_pt_b_bcast);
auto fp_c = mm->add_instruction(migraphx::make_op("mul"), fp_a, fp_b);
auto scale_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_c);
auto z_pt_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_c);
auto c =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_c, scale_c_bcast, z_pt_c_bcast);
mm->add_return({c});
auto prog = migraphx::parse_onnx("qlinearmul_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearsigmoid_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::int8_type, {64}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.0035}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {-128}});
auto scale_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_x);
auto z_pt_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("sigmoid"), fp_x);
auto scale_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_y);
auto z_pt_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearsigmoid_test.onnx");
EXPECT(p.sort() == prog.sort());
}
migraphx::instruction_ref insert_quantizelinear_clip(migraphx::module& m, migraphx::instruction_ref insert_quantizelinear_clip(migraphx::module& m,
const migraphx::instruction_ref ins, const migraphx::instruction_ref ins,
const migraphx::instruction_ref round, const migraphx::instruction_ref round,
...@@ -5603,9 +6024,9 @@ TEST_CASE(quantizelinear_test) ...@@ -5603,9 +6024,9 @@ TEST_CASE(quantizelinear_test)
auto l1_mbcast = auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1); mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast); auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div); auto nearbyint = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto s = round->get_shape(); auto s = nearbyint->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255); auto clip = insert_quantizelinear_clip(*mm, div, nearbyint, s, 0, 255);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
...@@ -5628,9 +6049,9 @@ TEST_CASE(quantizelinear_int32_test) ...@@ -5628,9 +6049,9 @@ TEST_CASE(quantizelinear_int32_test)
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l0); l0);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast); auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div); auto nearbyint = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto s = round->get_shape(); auto s = nearbyint->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255); auto clip = insert_quantizelinear_clip(*mm, div, nearbyint, s, 0, 255);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convert", migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}), {{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
...@@ -5650,7 +6071,7 @@ TEST_CASE(quantizelinear_zero_point_test) ...@@ -5650,7 +6071,7 @@ TEST_CASE(quantizelinear_zero_point_test)
auto l1_mbcast = auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1); mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast); auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div); auto round = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto l2_mbcast = auto l2_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l2); mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l2);
l2_mbcast = mm->add_instruction( l2_mbcast = mm->add_instruction(
...@@ -5683,7 +6104,7 @@ migraphx::program make_quantizelinear_axis_prog() ...@@ -5683,7 +6104,7 @@ migraphx::program make_quantizelinear_axis_prog()
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", input_lens}}), l1); migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", input_lens}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_bcast); auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_bcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div); auto round = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto l2_bcast = mm->add_instruction( auto l2_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", input_lens}}), l2); migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", input_lens}}), l2);
l2_bcast = mm->add_instruction( l2_bcast = mm->add_instruction(
...@@ -6372,9 +6793,8 @@ TEST_CASE(resize_nonstd_input_test) ...@@ -6372,9 +6793,8 @@ TEST_CASE(resize_nonstd_input_test)
auto tx = auto tx =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), inx); mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), inx);
mm->add_instruction(migraphx::make_op("undefined")); mm->add_instruction(migraphx::make_op("undefined"));
auto tx_cont = mm->add_instruction(migraphx::make_op("contiguous"), tx);
auto lrsp = mm->add_instruction(migraphx::make_op("reshape", {{"dims", {8}}}), tx_cont); auto lrsp = mm->add_instruction(migraphx::make_op("reshape", {{"dims", {8}}}), tx);
auto r = mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), lrsp, li); auto r = mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), lrsp, li);
mm->add_return({r}); mm->add_return({r});
...@@ -6813,7 +7233,7 @@ TEST_CASE(round_test) ...@@ -6813,7 +7233,7 @@ TEST_CASE(round_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::double_type, {10, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::double_type, {10, 5}});
mm->add_instruction(migraphx::make_op("round"), input); mm->add_instruction(migraphx::make_op("nearbyint"), input);
auto prog = optimize_onnx("round_test.onnx"); auto prog = optimize_onnx("round_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
...@@ -6857,20 +7277,35 @@ TEST_CASE(scatter_none_test) ...@@ -6857,20 +7277,35 @@ TEST_CASE(scatter_none_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(scatternd_test) void scatternd_test_base(const std::string& reduction, const std::string& onnx_file)
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}}); auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}}); auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}}); auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_none"), l0, l1, l2); auto r = mm->add_instruction(migraphx::make_op("scatternd_" + reduction), l0, l1, l2);
mm->add_return({r}); mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_test.onnx"); auto prog = migraphx::parse_onnx(onnx_file);
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(scatternd_test) { scatternd_test_base("none", "scatternd_test.onnx"); }
TEST_CASE(scatternd_add_test) { scatternd_test_base("add", "scatternd_add_test.onnx"); }
TEST_CASE(scatternd_mul_test) { scatternd_test_base("mul", "scatternd_mul_test.onnx"); }
TEST_CASE(scatternd_max_test) { scatternd_test_base("max", "scatternd_max_test.onnx"); }
TEST_CASE(scatternd_min_test) { scatternd_test_base("min", "scatternd_min_test.onnx"); }
TEST_CASE(scatternd_invalid_reduction_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("scatternd_invalid_reduction_test.onnx"); }));
}
TEST_CASE(scatternd_dyn_test) TEST_CASE(scatternd_dyn_test)
{ {
// dynamic input. // dynamic input.
...@@ -6894,34 +7329,6 @@ TEST_CASE(scatternd_dyn_test) ...@@ -6894,34 +7329,6 @@ TEST_CASE(scatternd_dyn_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(scatternd_add_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_add"), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_add_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(scatternd_mul_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_mul"), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_mul_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(selu_test) TEST_CASE(selu_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -7469,6 +7876,25 @@ TEST_CASE(slice_var_input_dyn1) ...@@ -7469,6 +7876,25 @@ TEST_CASE(slice_var_input_dyn1)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(slice_var_input_default_steps)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data =
mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {{3, 8}, {2, 2}}});
auto starts = mm->add_parameter("starts", migraphx::shape{migraphx::shape::int64_type, {2}});
auto ends = mm->add_parameter("ends", migraphx::shape{migraphx::shape::int64_type, {2}});
auto axes = mm->add_parameter("axes", migraphx::shape{migraphx::shape::int64_type, {2}});
mm->add_literal({{migraphx::shape::int64_type, {2}}, {1, 1}});
auto ret = mm->add_instruction(migraphx::make_op("slice"), data, starts, ends, axes);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {3, 8};
auto prog = parse_onnx("slice_var_input_default_steps.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_steps_error) TEST_CASE(slice_var_input_steps_error)
{ {
EXPECT(test::throws([&] { migraphx::parse_onnx("slice_var_input_steps_error.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("slice_var_input_steps_error.onnx"); }));
...@@ -7671,6 +8097,46 @@ TEST_CASE(split_test_default) ...@@ -7671,6 +8097,46 @@ TEST_CASE(split_test_default)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(split_test_uneven)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {12, 15}});
auto r1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {3}}}), input);
auto r2 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {3}}, {"ends", {6}}}), input);
auto r3 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {6}}, {"ends", {9}}}), input);
auto r4 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {9}}, {"ends", {12}}}), input);
auto r5 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {12}}, {"ends", {12}}}), input);
mm->add_return({r1, r2, r3, r4, r5});
auto prog = migraphx::parse_onnx("split_test_uneven.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_test_uneven_num_outputs)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {11, 15}});
auto r1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {3}}}), input);
auto r2 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {3}}, {"ends", {6}}}), input);
auto r3 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {6}}, {"ends", {9}}}), input);
auto r4 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {9}}, {"ends", {11}}}), input);
mm->add_return({r1, r2, r3, r4});
auto prog = migraphx::parse_onnx("split_test_uneven_num_outputs.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_test_no_attribute_invalid_split) TEST_CASE(split_test_no_attribute_invalid_split)
{ {
EXPECT( EXPECT(
...@@ -7688,6 +8154,11 @@ TEST_CASE(split_test_no_attribute_invalid_input_split) ...@@ -7688,6 +8154,11 @@ TEST_CASE(split_test_no_attribute_invalid_input_split)
[&] { migraphx::parse_onnx("split_test_no_attribute_invalid_input_split.onnx"); })); [&] { migraphx::parse_onnx("split_test_no_attribute_invalid_input_split.onnx"); }));
} }
TEST_CASE(split_test_invalid_num_outputs)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("split_test_invalid_num_outputs.onnx"); }));
}
TEST_CASE(sqrt_test) TEST_CASE(sqrt_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -8135,6 +8606,86 @@ TEST_CASE(undefined_test) ...@@ -8135,6 +8606,86 @@ TEST_CASE(undefined_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(unique_dynamic_sorted_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {6}};
auto x = mm->add_parameter("X", s);
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 1}, {"axis", 0}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_ind, x_ind, count});
auto prog = migraphx::parse_onnx("unique_dynamic_sorted_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unique_dynamic_sorted_3D_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int64_type, {4, 4, 4}};
auto x = mm->add_parameter("X", s);
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 1}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_ind = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_ind, x_ind, count});
auto prog = migraphx::parse_onnx("unique_dynamic_sorted_3D_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unique_sorted_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s_x{migraphx::shape::float_type, {6}};
std::vector<float> x_data = {2, 1, 1, 3, 4, 3};
auto x = mm->add_literal(migraphx::literal(s_x, x_data));
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 1}, {"axis", 0}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_idx, x_idx, count});
auto prog = migraphx::parse_onnx("unique_sorted_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unique_unsorted_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s_x{migraphx::shape::float_type, {6}};
std::vector<float> x_data = {2, 1, 1, 3, 4, 3};
auto x = mm->add_literal(migraphx::literal(s_x, x_data));
auto out = mm->add_instruction(migraphx::make_op("unique", {{"sorted", 0}, {"axis", 0}}), x);
auto y = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), out);
auto y_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), out);
auto x_idx = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 2}}), out);
auto count = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 3}}), out);
mm->add_return({y, y_idx, x_idx, count});
auto prog = migraphx::parse_onnx("unique_unsorted_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unknown_test) TEST_CASE(unknown_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -8188,6 +8739,27 @@ TEST_CASE(upsample_test) ...@@ -8188,6 +8739,27 @@ TEST_CASE(upsample_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(upsample_ver7_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sx{migraphx::shape::float_type, {1, 1, 2, 2}};
auto ix = mm->add_parameter("X", sx);
migraphx::shape si{migraphx::shape::int32_type, {1, 1, 4, 6}};
std::vector<int> ind = {0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3};
auto li = mm->add_literal(migraphx::literal(si, ind));
auto rsp = mm->add_instruction(migraphx::make_op("reshape", {{"dims", {4}}}), ix);
auto r = mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), rsp, li);
mm->add_return({r});
auto prog = migraphx::parse_onnx("upsample_ver7_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unknown_test_throw_print_error) TEST_CASE(unknown_test_throw_print_error)
{ {
migraphx::onnx_options options; migraphx::onnx_options options;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment