Commit 5d057776 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

merge changes from develop branch

parents d6b4ae77 9b19b73f
eyelike_default_test:U

T1T2"EyeLikeeyelike_default_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_double_test:T

T1T2"EyeLikeeyelike_double_testZ
T1
 

b
T2
 

B
\ No newline at end of file
eyelike_half_test:R

T1T2"EyeLikeeyelike_half_testZ
T1



b
T2



B
\ No newline at end of file
eyelike_k_outofbounds_neg_test:r
$
T1T2"EyeLike*
keyelike_k_outofbounds_neg_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_k_outofbounds_pos_test:i

T1T2"EyeLike*
keyelike_k_outofbounds_pos_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_k_test:Y

T1T2"EyeLike*
keyelike_k_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_not_rank2_test:[

T1T2"EyeLikeeyelike_not_rank2_testZ
T1



b
T2


B
\ No newline at end of file
eyelike_set_dtype_test:e

T1T2"EyeLike*
dtype eyelike_set_dtype_testZ
T1


b
T2
 

B
\ No newline at end of file
eyelike_verify_negk_test:l
$
T1T2"EyeLike*
keyelike_verify_negk_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_verify_test:^

T1T2"EyeLike*
keyelike_verify_testZ
T1


b
T2


B
\ No newline at end of file
......@@ -351,6 +351,65 @@ def ceil_test():
return ([node], [x], [y])
@onnx_test
def celu_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.8)
return ([node], [x], [y])
@onnx_test
def celu_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('Celu', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def celu_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.5)
return ([node], [x], [y])
@onnx_test
def celu_wrong_type_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [2, 3])
node = onnx.helper.make_node('Celu', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def celu_zero_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.0)
return ([node], [x], [y])
@onnx_test
def clip_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
......@@ -426,6 +485,22 @@ def clip_test_op11_no_args1():
return ([node], [x], [y])
@onnx_test
def clip_test_args_type_mismatch():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 3])
min_val = helper.make_tensor('min', TensorProto.FLOAT, [1, 3],
[1.5, 2.5, 3.5])
max_val = helper.make_tensor('max', TensorProto.INT64, [3, 1], [2, 3, 4])
node = onnx.helper.make_node('Clip',
inputs=['0', 'min', 'max'],
outputs=['1'])
return ([node], [x], [y], [min_val, max_val])
@onnx_test
def concat_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 4, 3])
......@@ -1381,6 +1456,114 @@ def expand_test():
return ([shape_const, node], [x], [y])
@onnx_test
def eyelike_default_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_double_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.DOUBLE, [6, 15])
T2 = helper.make_tensor_value_info('T2', TensorProto.DOUBLE, [6, 15])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_half_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT16, [8, 8])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT16, [8, 8])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_k_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike', inputs=['T1'], outputs=['T2'], k=1)
return ([node], [T1], [T2])
@onnx_test
def eyelike_k_outofbounds_neg_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [2, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [2, 4])
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2])
@onnx_test
def eyelike_k_outofbounds_pos_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike', inputs=['T1'], outputs=['T2'], k=4)
return ([node], [T1], [T2])
@onnx_test
def eyelike_not_rank2_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4, 2])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_verify_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike', inputs=['T1'], outputs=['T2'], k=1)
return ([node], [T1], [T2])
@onnx_test
def eyelike_verify_negk_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2])
@onnx_test
def eyelike_set_dtype_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.DOUBLE, [3, 4])
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
dtype=TensorProto.DOUBLE)
return ([node], [T1], [T2])
@onnx_test
def flatten_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......@@ -2621,6 +2804,70 @@ def loop_test():
return ([node], [iter, cond, a, b], [b_loop, uout])
@onnx_test
def lpnormalization_axis_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
axis=2)
return ([node], [x], [y])
@onnx_test
def lpnormalization_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'LpNormalization',
inputs=['x'],
outputs=['y'],
axis=0,
)
return ([node], [x], [y])
@onnx_test
def lpnormalization_l1_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'LpNormalization',
inputs=['x'],
outputs=['y'],
p=1,
)
return ([node], [x], [y])
@onnx_test
def lpnormalization_l2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
p=2)
return ([node], [x], [y])
@onnx_test
def lpnormalization_p_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
p=3)
return ([node], [x], [y])
@onnx_test
def lrn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24])
......@@ -4310,6 +4557,54 @@ def sinh_test():
return ([node], [x], [y])
@onnx_test
def size_float_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def size_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [3, 1])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def size_int_test():
x = helper.make_tensor_value_info('x', TensorProto.INT32, [8, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def size_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5, 3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def slice_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2])
......
lpnormalization_axis_error_test:q
$
xy"LpNormalization*
axis lpnormalization_axis_error_testZ
x


b
y


B
\ No newline at end of file
lpnormalization_l1_test:f
!
xy"LpNormalization*
plpnormalization_l1_testZ
x


b
y


B
\ No newline at end of file
lpnormalization_l2_test:f
!
xy"LpNormalization*
plpnormalization_l2_testZ
x


b
y


B
\ No newline at end of file
lpnormalization_p_error_test:k
!
xy"LpNormalization*
plpnormalization_p_error_testZ
x


b
y


B
\ No newline at end of file
......@@ -46,6 +46,40 @@ migraphx::program optimize_onnx(const std::string& name, bool run_passes = false
return prog;
}
void add_celu_instruction(migraphx::module* mm, const migraphx::shape& s, float alpha)
{
auto x = mm->add_parameter("x", s);
const auto& input_lens = s.lens();
const auto& input_type = s.type();
auto zero_lit =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0.}}));
auto one_lit =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1.}}));
auto alpha_lit = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto linear_part = mm->add_instruction(migraphx::make_op("max"), zero_lit, x);
auto divi = mm->add_instruction(migraphx::make_op("div"), x, alpha_lit);
auto expo = mm->add_instruction(migraphx::make_op("exp"), divi);
auto sub = mm->add_instruction(migraphx::make_op("sub"), expo, one_lit);
auto mul = mm->add_instruction(migraphx::make_op("mul"), alpha_lit, sub);
auto exp_part = mm->add_instruction(migraphx::make_op("min"), zero_lit, mul);
mm->add_instruction(migraphx::make_op("add"), linear_part, exp_part);
}
static std::vector<double> make_r_eyelike(size_t num_rows, size_t num_cols, size_t k)
{
std::vector<double> eyelike_mat(num_rows * num_cols, 0);
for(size_t i = 0; i < num_rows; ++i)
{
if(i + k < num_cols)
eyelike_mat[(num_cols + 1) * i + k] = 1.;
}
return eyelike_mat;
}
TEST_CASE(acos_test)
{
migraphx::program p;
......@@ -191,11 +225,12 @@ TEST_CASE(averagepool_1d_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5}});
mm->add_instruction(
migraphx::make_op(
"pooling",
{{"mode", "average"}, {"padding", {0, 0}}, {"stride", {1}}, {"lengths", {3}}}),
l0);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0}},
{"stride", {1}},
{"lengths", {3}}}),
l0);
auto prog = optimize_onnx("averagepool_1d_test.onnx");
EXPECT(p == prog);
......@@ -207,7 +242,7 @@ TEST_CASE(averagepool_3d_test)
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5, 5}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}),
......@@ -223,7 +258,7 @@ TEST_CASE(averagepool_notset_test)
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
......@@ -244,7 +279,7 @@ TEST_CASE(averagepool_nt_cip_test)
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
......@@ -261,7 +296,7 @@ TEST_CASE(averagepool_same_lower_test)
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
......@@ -282,7 +317,7 @@ TEST_CASE(averagepool_sl_cip_test)
std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
......@@ -299,7 +334,7 @@ TEST_CASE(averagepool_same_upper_test)
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
......@@ -368,6 +403,42 @@ TEST_CASE(ceil_test)
EXPECT(p == prog);
}
TEST_CASE(celu_alpha_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens = {3};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
float alpha = 0.8;
add_celu_instruction(mm, s, alpha);
auto prog = optimize_onnx("celu_alpha_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(celu_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens = {2, 3};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
float alpha = 1.0;
add_celu_instruction(mm, s, alpha);
auto prog = optimize_onnx("celu_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(celu_wrong_type_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("celu_wrong_type_test.onnx"); }));
}
TEST_CASE(celu_zero_alpha_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("celu_zero_alpha_test.onnx"); }));
}
TEST_CASE(clip_test)
{
migraphx::program p;
......@@ -458,6 +529,28 @@ TEST_CASE(clip_test_op11_no_args1)
EXPECT(p == prog);
}
TEST_CASE(clip_test_args_type_mismatch)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto min_val = mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {1, 3}}, {1.5, 2.5, 3.5}});
auto max_val = mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {3, 1}}, {2, 3, 4}});
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3}});
min_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 3}}}), min_val);
max_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 3}}}), max_val);
max_val = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), max_val);
auto r = mm->add_instruction(migraphx::make_op("clip"), l0, min_val, max_val);
mm->add_return({r});
auto prog = migraphx::parse_onnx("clip_test_args_type_mismatch.onnx");
EXPECT(p == prog);
}
TEST_CASE(concat_test)
{
migraphx::program p;
......@@ -669,11 +762,12 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto l6 = mm->add_instruction(
migraphx::make_op("batch_norm_inference", {{"epsilon", 1.0e-5f}}), l5, p3, p4, p5, p6);
auto l7 = mm->add_instruction(migraphx::make_op("relu"), l6);
mm->add_instruction(
migraphx::make_op(
"pooling",
{{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l7);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
l7);
auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx");
EXPECT(p == prog);
......@@ -693,11 +787,12 @@ TEST_CASE(conv_relu_maxpool_test)
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", l3->get_shape().lens()}}), l2);
auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4);
auto l6 = mm->add_instruction(migraphx::make_op("relu"), l5);
mm->add_instruction(
migraphx::make_op(
"pooling",
{{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l6);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
l6);
auto prog = optimize_onnx("conv_relu_maxpool_test.onnx");
EXPECT(p == prog);
......@@ -717,11 +812,12 @@ TEST_CASE(conv_relu_maxpool_x2_test)
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", l3->get_shape().lens()}}), l2);
auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4);
auto l6 = mm->add_instruction(migraphx::make_op("relu"), l5);
auto l7 = mm->add_instruction(
migraphx::make_op(
"pooling",
{{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l6);
auto l7 = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
l6);
auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
auto l9 = mm->add_parameter("4", {migraphx::shape::float_type, {1}});
......@@ -732,11 +828,12 @@ TEST_CASE(conv_relu_maxpool_x2_test)
l9);
auto l12 = mm->add_instruction(migraphx::make_op("add"), l10, l11);
auto l13 = mm->add_instruction(migraphx::make_op("relu"), l12);
mm->add_instruction(
migraphx::make_op(
"pooling",
{{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l13);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
l13);
auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx");
......@@ -1259,6 +1356,121 @@ TEST_CASE(external_data_diff_path_test)
EXPECT(p == prog);
}
TEST_CASE(eyelike_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4};
const size_t k = 0;
auto num_rows = input_lens.front();
auto num_cols = input_lens.back();
auto input_type = migraphx::shape::float_type;
auto output_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
mm->add_parameter("T1", s);
auto eyelike_mat = make_r_eyelike(num_rows, num_cols, k);
mm->add_literal(migraphx::literal{migraphx::shape{output_type, input_lens}, eyelike_mat});
auto prog = optimize_onnx("eyelike_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(eyelike_double_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{6, 15};
const size_t k = 0;
auto num_rows = input_lens.front();
auto num_cols = input_lens.back();
auto input_type = migraphx::shape::double_type;
auto output_type = migraphx::shape::double_type;
migraphx::shape s{input_type, input_lens};
mm->add_parameter("T1", s);
auto eyelike_mat = make_r_eyelike(num_rows, num_cols, k);
mm->add_literal(migraphx::literal{migraphx::shape{output_type, input_lens}, eyelike_mat});
auto prog = optimize_onnx("eyelike_double_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(eyelike_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{8, 8};
const size_t k = 0;
auto num_rows = input_lens.front();
auto num_cols = input_lens.back();
auto input_type = migraphx::shape::half_type;
auto output_type = migraphx::shape::half_type;
migraphx::shape s{input_type, input_lens};
mm->add_parameter("T1", s);
auto eyelike_mat = make_r_eyelike(num_rows, num_cols, k);
mm->add_literal(migraphx::literal{migraphx::shape{output_type, input_lens}, eyelike_mat});
auto prog = optimize_onnx("eyelike_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(eyelike_k_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4};
const size_t k = 1;
auto num_rows = input_lens.front();
auto num_cols = input_lens.back();
auto input_type = migraphx::shape::float_type;
auto output_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
mm->add_parameter("T1", s);
auto eyelike_mat = make_r_eyelike(num_rows, num_cols, k);
mm->add_literal(migraphx::literal{migraphx::shape{output_type, input_lens}, eyelike_mat});
auto prog = optimize_onnx("eyelike_k_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(eyelike_k_outofbounds_neg_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("eyelike_k_outofbounds_neg_test.onnx"); }));
}
TEST_CASE(eyelike_k_outofbounds_pos_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("eyelike_k_outofbounds_pos_test.onnx"); }));
}
TEST_CASE(eyelike_not_rank2_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("eyelike_not_rank2_test.onnx"); }));
}
TEST_CASE(eyelike_set_dtype_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4};
const size_t k = 0;
auto num_rows = input_lens.front();
auto num_cols = input_lens.back();
auto input_type = migraphx::shape::float_type;
auto output_type = migraphx::shape::double_type;
migraphx::shape s{input_type, input_lens};
mm->add_parameter("T1", s);
auto eyelike_mat = make_r_eyelike(num_rows, num_cols, k);
mm->add_literal(migraphx::literal{migraphx::shape{output_type, input_lens}, eyelike_mat});
auto prog = optimize_onnx("eyelike_set_dtype_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(flatten_test)
{
migraphx::program p;
......@@ -1481,7 +1693,7 @@ TEST_CASE(globalavgpool_test)
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
auto op = migraphx::op::pooling{"average"};
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average};
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
op.padding = {0, 0, 0, 0};
......@@ -1498,7 +1710,7 @@ TEST_CASE(globalmaxpool_test)
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
auto op = migraphx::op::pooling{"max"};
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::max};
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
op.padding = {0, 0, 0, 0};
......@@ -2344,6 +2556,46 @@ TEST_CASE(loop_test)
EXPECT(p == prog);
}
TEST_CASE(lpnormalization_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
std::ptrdiff_t axis = 0;
auto p_val = mm->add_instruction(migraphx::make_op("mul"), x, x);
auto norms = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {axis}}}), p_val);
norms = mm->add_instruction(migraphx::make_op("sqrt"), norms);
norms =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}), norms);
auto zero_mb =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0.}}));
auto one_mb =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1.}}));
auto is_zero = mm->add_instruction(migraphx::make_op("equal"), norms, zero_mb);
auto norms_zeros_to_one =
mm->add_instruction(migraphx::make_op("where"), is_zero, one_mb, norms);
mm->add_instruction(migraphx::make_op("div"), x, norms_zeros_to_one);
auto prog = optimize_onnx("lpnormalization_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(lpnormalization_axis_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("lpnormalization_axis_error_test.onnx"); }));
}
TEST_CASE(lpnormalization_p_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("lpnormalization_p_error_test.onnx"); }));
}
TEST_CASE(lrn_test)
{
migraphx::program p;
......@@ -2491,11 +2743,12 @@ TEST_CASE(maxpool_notset_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
mm->add_instruction(
migraphx::make_op(
"pooling",
{{"mode", "max"}, {"padding", {0, 0, 1, 1}}, {"stride", {2, 2}}, {"lengths", {6, 6}}}),
input);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
input);
auto prog = optimize_onnx("maxpool_notset_test.onnx");
......@@ -2507,11 +2760,12 @@ TEST_CASE(maxpool_same_upper_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
mm->add_instruction(
migraphx::make_op(
"pooling",
{{"mode", "max"}, {"padding", {0, 0, 1, 1}}, {"stride", {1, 1}}, {"lengths", {2, 2}}}),
input);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
input);
auto prog = optimize_onnx("maxpool_same_upper_test.onnx");
......@@ -4143,6 +4397,40 @@ TEST_CASE(sinh_test)
EXPECT(p == prog);
}
TEST_CASE(size_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape{migraphx::shape::float_type, {2, 3, 4}};
mm->add_parameter("x", s);
mm->add_literal(migraphx::literal{migraphx::shape::int64_type, {s.elements()}});
auto prog = optimize_onnx("size_float_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(size_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape{migraphx::shape::half_type, {3, 1}};
mm->add_parameter("x", s);
mm->add_literal(migraphx::literal{migraphx::shape::int64_type, {s.elements()}});
auto prog = optimize_onnx("size_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(size_int_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape{migraphx::shape::int32_type, {8, 2, 3}};
mm->add_parameter("x", s);
mm->add_literal(migraphx::literal{migraphx::shape::int64_type, {s.elements()}});
auto prog = optimize_onnx("size_int_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(slice_test)
{
migraphx::program p;
......
size_float_test:I
xy"Sizesize_float_testZ
x



b
y

B
\ No newline at end of file
size_half_test:D
xy"Sizesize_half_testZ
x



b
y

B
\ No newline at end of file
 size_int_test:G
xy"Size size_int_testZ
x



b
y

B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment