Commit a27808b3 authored by Ted Themistokleous's avatar Ted Themistokleous
Browse files

Add tests for valid multi output shapes going into IF operator

Adding test to validate what a "valid" multi input should look like and
that we correctly handle trailing 1s and correctly sized outputs.

Generated and added the two tests from gen_onnx.py with matching test
in onnx_test.cpp

-if_then_else_multi_output_shapes_test.onnx
-if_then_else_multi_output_shapes_test2.onnx
parent 80a23cfb
......@@ -2541,6 +2541,147 @@ def if_then_else_incompatible_output_shapes_test():
return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor])
@onnx_test
def if_then_else_multi_output_shapes_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT,
[2, 3, 1])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info('then_out',
onnx.TensorProto.FLOAT,
[2, 3, 1])
then_out2 = onnx.helper.make_tensor_value_info('then_out2',
onnx.TensorProto.FLOAT,
[2, 3, 1])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
else_out2 = onnx.helper.make_tensor_value_info('else_out2',
onnx.TensorProto.FLOAT,
[2, 3])
xt = np.ones((2, 3, 1)).astype(np.float)
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
yt = np.random.randn(2, 3).astype(np.float)
yt_tensor = helper.make_tensor(name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['then_out'])
then_add_node2 = onnx.helper.make_node('Add',
inputs=['x', 'x'],
outputs=['then_out2'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'yt'],
outputs=['else_out'])
else_sub_node = onnx.helper.make_node('Sub',
inputs=['y', 'yt'],
outputs=['else_out2'])
then_body = onnx.helper.make_graph([then_add_node, then_add_node2],
'then_body', [], [then_out, then_out2])
else_body = onnx.helper.make_graph([else_mul_node, else_sub_node],
'else_body', [], [else_out, else_out2])
cond = np.array([1]).astype(np.bool)
cond_tensor = helper.make_tensor(name="cond",
data_type=TensorProto.BOOL,
dims=cond.shape,
vals=cond.astype(bool))
res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, [])
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['res'],
then_branch=then_body,
else_branch=else_body)
return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor])
@onnx_test
def if_then_else_multi_output_shapes_test2():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT,
[2, 3, 1])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT,
[2, 3, 1])
then_out = onnx.helper.make_tensor_value_info('then_out',
onnx.TensorProto.FLOAT,
[2, 3, 1])
then_out2 = onnx.helper.make_tensor_value_info('then_out2',
onnx.TensorProto.FLOAT,
[2, 3, 1])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3, 1])
else_out2 = onnx.helper.make_tensor_value_info('else_out2',
onnx.TensorProto.FLOAT,
[2, 3, 1])
xt = np.ones((2, 3, 1)).astype(np.float)
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
yt = np.random.randn(2, 3, 1).astype(np.float)
yt_tensor = helper.make_tensor(name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['then_out'])
then_add_node2 = onnx.helper.make_node('Add',
inputs=['x', 'x'],
outputs=['then_out2'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'yt'],
outputs=['else_out'])
else_sub_node = onnx.helper.make_node('Sub',
inputs=['y', 'yt'],
outputs=['else_out2'])
then_body = onnx.helper.make_graph([then_add_node, then_add_node2],
'then_body', [], [then_out, then_out2])
else_body = onnx.helper.make_graph([else_mul_node, else_sub_node],
'else_body', [], [else_out, else_out2])
cond = np.array([1]).astype(np.bool)
cond_tensor = helper.make_tensor(name="cond",
data_type=TensorProto.BOOL,
dims=cond.shape,
vals=cond.astype(bool))
res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, [])
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['res'],
then_branch=then_body,
else_branch=else_body)
return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor])
@onnx_test
def if_then_else_diff_type_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.INT64, [2])
......
......@@ -2646,6 +2646,77 @@ TEST_CASE(if_else_empty_constant_test)
EXPECT(p == prog);
}
TEST_CASE(if_then_else_multi_output_shapes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sc{migraphx::shape::bool_type, {1}};
auto cond = mm->add_literal(migraphx::literal(sc, {1}));
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::shape s_trail{migraphx::shape::float_type, {2, 3, 1}};
std::vector<float> ones(s.elements(), 1.0f);
auto l1 = mm->add_literal(s_trail, ones);
std::vector<float> rand = {-0.598881, 0.495922, 0.145435, -0.483517, -0.613278, -0.953624};
auto l2 = mm->add_literal(s, rand);
auto x = mm->add_parameter("x", s_trail);
auto y = mm->add_parameter("y", s);
auto* then_mod = p.create_module("If_5_if");
auto rt = then_mod->add_instruction(migraphx::make_op("add"), x, l1);
auto rt2 = then_mod->add_instruction(migraphx::make_op("add"), x, x);
then_mod->add_return({rt, rt2});
auto* else_mod = p.create_module("If_5_else");
auto re = else_mod->add_instruction(migraphx::make_op("mul"), y, l2);
auto re2 = else_mod->add_instruction(migraphx::make_op("sub"), y, l2);
auto unsqueeze = else_mod->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {2}}}), re);
auto unsqueeze2 =
else_mod->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {2}}}), re2);
else_mod->add_return({unsqueeze, unsqueeze2});
auto ret = mm->add_instruction(migraphx::make_op("if"), {cond}, {then_mod, else_mod});
auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), ret);
mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), ret);
mm->add_return({r});
auto prog = migraphx::parse_onnx("if_then_else_multi_output_shapes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(if_then_else_multi_output_shapes_test2)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sc{migraphx::shape::bool_type, {1}};
auto cond = mm->add_literal(migraphx::literal(sc, {1}));
migraphx::shape s{migraphx::shape::float_type, {2, 3, 1}};
migraphx::shape s_trail{migraphx::shape::float_type, {2, 3, 1}};
std::vector<float> ones(s.elements(), 1.0f);
auto l1 = mm->add_literal(s_trail, ones);
std::vector<float> rand = {-0.245949, -0.258438, -0.435196, -0.975557, -1.07789, 1.90269};
auto l2 = mm->add_literal(s, rand);
auto x = mm->add_parameter("x", s_trail);
auto y = mm->add_parameter("y", s);
auto* then_mod = p.create_module("If_5_if");
auto rt = then_mod->add_instruction(migraphx::make_op("add"), x, l1);
auto rt2 = then_mod->add_instruction(migraphx::make_op("add"), x, x);
then_mod->add_return({rt, rt2});
auto* else_mod = p.create_module("If_5_else");
auto re = else_mod->add_instruction(migraphx::make_op("mul"), y, l2);
auto re2 = else_mod->add_instruction(migraphx::make_op("sub"), y, l2);
else_mod->add_return({re, re2});
auto ret = mm->add_instruction(migraphx::make_op("if"), {cond}, {then_mod, else_mod});
auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), ret);
mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), ret);
mm->add_return({r});
auto prog = migraphx::parse_onnx("if_then_else_multi_output_shapes_test2.onnx");
EXPECT(p == prog);
}
TEST_CASE(if_then_else_diff_shape)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("if_then_else_diff_shape_test.onnx"); }));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment