Commit 8a5bc2fb authored by Paul's avatar Paul
Browse files

Merge

parents 868230f5 bb0e04ce
......@@ -626,6 +626,46 @@ def constant_scalar_test():
return ([node], [], [y])
@onnx_test
def constant_empty_scalar_int64_test():
x = np.array([]).astype(np.int64)
y = helper.make_tensor_value_info('0', TensorProto.INT64, [0])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='one_element_tensor',
data_type=TensorProto.INT64,
dims=x.shape,
vals=x.flatten().astype(np.int64),
),
)
return ([node], [], [y])
@onnx_test
def constant_one_val_int64_test():
x = np.array([1]).astype(np.int64)
y = helper.make_tensor_value_info('0', TensorProto.INT64, [0])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='empty_tensor',
data_type=TensorProto.INT64,
dims=x.shape,
vals=x.flatten().astype(np.int64),
),
)
return ([node], [], [y])
@onnx_test
def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
......@@ -851,6 +891,96 @@ def conv_bn_relu_maxpool_test():
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
@onnx_test
def conv_dynamic_batch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[None, 1, 3, 3])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_and_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_batch_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_kernel_same_lower_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_LOWER')
return ([node], [x, y], [out])
@onnx_test
def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
......@@ -2496,6 +2626,62 @@ def instance_norm_test():
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_half_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [2])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_type_mismatch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [2])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_invalid_type_test():
x = helper.make_tensor_value_info('0', TensorProto.INT32, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_nonbroadcastable_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [4])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_val_test():
x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
......@@ -3086,7 +3272,7 @@ def max_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Max',
......@@ -3220,7 +3406,7 @@ def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Min',
......@@ -3231,6 +3417,89 @@ def min_test():
return ([node], [a, b, c], [y])
@onnx_test
def mod_test():
a = helper.make_tensor_value_info('0', TensorProto.INT32, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.INT32, [3, 3, 3])
node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2'])
return ([node], [a, b], [y])
@onnx_test
def mod_test_half():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [3, 3, 3])
node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2'])
return ([node], [a, b], [y])
@onnx_test
def mod_test_different_dtypes():
a = helper.make_tensor_value_info('0', TensorProto.INT16, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.INT32, [3, 3, 3])
node = onnx.helper.make_node(
'Mod',
inputs=['0', '1'],
outputs=['2'],
)
return ([node], [a, b], [y])
@onnx_test
def mod_test_fmod():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3])
node = onnx.helper.make_node(
'Mod',
inputs=['0', '1'],
outputs=['2'],
fmod=1 #fmod flag = 1
)
return ([node], [a, b], [y])
@onnx_test
def mod_test_fmod_half():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [3, 3, 3])
node = onnx.helper.make_node('Mod',
inputs=['0', '1'],
outputs=['2'],
fmod=1)
return ([node], [a, b], [y])
@onnx_test
def mod_test_fmod_different_dtypes():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3])
node = onnx.helper.make_node(
'Mod',
inputs=['0', '1'],
outputs=['2'],
fmod=1 #fmod flag = 1
)
return ([node], [a, b], [y])
@onnx_test
def multinomial_test():
sample_size = 10
......
instance_norm_half_test:
#
0
1
23"InstanceNormalizationinstance_norm_half_testZ
0





Z
1


Z
2


b
3





B
\ No newline at end of file
instance_norm_invalid_type_test:
#
0
1
23"InstanceNormalizationinstance_norm_invalid_type_testZ
0




Z
1

Z
2

b
3




B
\ No newline at end of file
#instance_norm_nonbroadcastable_test:
#
0
1
23"InstanceNormalization#instance_norm_nonbroadcastable_testZ
0




Z
1

Z
2

b
3




B
\ No newline at end of file
 instance_norm_type_mismatch_test:
#
0
1
23"InstanceNormalization instance_norm_type_mismatch_testZ
0




Z
1


Z
2


b
3




B
\ No newline at end of file
 max-example:e
max_test:a

0
1
23"Max test-dropoutZ
23"Maxmax_testZ
0

......@@ -15,7 +15,7 @@

b
2
3

B
\ No newline at end of file
B
\ No newline at end of file
 min-example:e
min_test:a

0
1
23"Min test-dropoutZ
23"Minmin_testZ
0

......@@ -15,7 +15,7 @@

b
2
3

B
\ No newline at end of file
B
\ No newline at end of file
mod_test:e

0
12"Modmod_testZ
0



Z
1



b
2



B
\ No newline at end of file
mod_test_different_dtypes:v

0
12"Modmod_test_different_dtypesZ
0



Z
1



b
2



B
\ No newline at end of file
 mod_test_fmod:w

0
12"Mod*
fmod mod_test_fmodZ
0



Z
1



b
2



B
\ No newline at end of file
mod_test_fmod_different_dtypes:

0
12"Mod*
fmodmod_test_fmod_different_dtypesZ
0



Z
1



b
2



B
\ No newline at end of file
mod_test_fmod_half:|

0
12"Mod*
fmodmod_test_fmod_halfZ
0




Z
1




b
2




B
\ No newline at end of file
 mod_test_half:j

0
12"Mod mod_test_halfZ
0




Z
1




b
2




B
\ No newline at end of file
......@@ -636,11 +636,31 @@ TEST_CASE(constant_scalar_test)
EXPECT(p == prog);
}
TEST_CASE(constant_empty_scalar_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto prog = optimize_onnx("constant_empty_scalar_int64_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_one_val_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {1}}, {1}});
auto prog = optimize_onnx("constant_one_val_int64_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_empty_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal());
mm->add_literal(migraphx::literal(migraphx::shape::int32_type));
migraphx::shape s(migraphx::shape::int64_type, {1}, {0});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -796,6 +816,170 @@ TEST_CASE(conv_bn_relu_maxpool_test)
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 6, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 6, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_and_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
options.map_dyn_input_dims["1"] = {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}};
auto prog = migraphx::parse_onnx("conv_dynamic_img_and_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_batch_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 =
mm->add_instruction(migraphx::make_op("convolution",
{{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same},
{"use_dynamic_same_auto_pad", false}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_kernel_same_lower)
{
std::cout << "here1\n";
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
std::cout << "here2\n";
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
std::cout << "here3\n";
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
std::cout << "here\n";
auto prog = migraphx::parse_onnx("conv_dynamic_kernel_same_lower_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_relu_maxpool_test)
{
migraphx::program p;
......@@ -2370,8 +2554,9 @@ TEST_CASE(instance_norm_test)
auto l0 = mm->add_instruction(migraphx::make_op("sqdiff"), x, mean_bcast);
auto variance = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), l0);
auto l1 = mm->add_instruction(migraphx::make_op("sub"), x, mean_bcast);
auto epsilon_literal = mm->add_literal(1e-5f);
auto epsilon_bcast = mm->add_instruction(
auto epsilon_literal =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type}, {1e-5}});
auto epsilon_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", dims}}), epsilon_literal);
auto variance_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), variance);
......@@ -2390,6 +2575,60 @@ TEST_CASE(instance_norm_test)
EXPECT(p == prog);
}
TEST_CASE(instance_norm_half_test)
{
std::vector<size_t> dims{1, 2, 3, 3};
migraphx::shape s1{migraphx::shape::half_type, dims};
migraphx::shape s2{migraphx::shape::half_type, {2}};
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("0", s1);
auto scale = mm->add_parameter("1", s2);
auto bias = mm->add_parameter("2", s2);
auto mean = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), x);
auto mean_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), mean);
auto l0 = mm->add_instruction(migraphx::make_op("sqdiff"), x, mean_bcast);
auto variance = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), l0);
auto l1 = mm->add_instruction(migraphx::make_op("sub"), x, mean_bcast);
auto epsilon_literal =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type}, {1e-5}});
auto epsilon_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", dims}}), epsilon_literal);
auto variance_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), variance);
auto l2 = mm->add_instruction(migraphx::make_op("add"), variance_bcast, epsilon_bcast);
auto l3 = mm->add_instruction(migraphx::make_op("rsqrt"), l2);
auto l4 = mm->add_instruction(migraphx::make_op("mul"), l1, l3);
auto scale_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), scale);
auto bias_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), bias);
auto l5 = mm->add_instruction(migraphx::make_op("mul"), l4, scale_bcast);
mm->add_instruction(migraphx::make_op("add"), l5, bias_bcast);
auto prog = optimize_onnx("instance_norm_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(instance_norm_type_mismatch_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("instance_norm_type_mismatch_test.onnx"); }));
}
TEST_CASE(instance_norm_invalid_type_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("instance_norm_invalid_type_test.onnx"); }));
}
TEST_CASE(instance_norm_nonbroadcastable_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("instance_norm_nonbroadcastable_test.onnx"); }));
}
TEST_CASE(leaky_relu_test)
{
migraphx::program p;
......@@ -2832,7 +3071,9 @@ TEST_CASE(max_test)
auto l0 = mm->add_instruction(migraphx::make_op("max"), input0, input1);
mm->add_instruction(migraphx::make_op("max"), l0, input2);
optimize_onnx("max_test.onnx");
auto prog = optimize_onnx("max_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(maxpool_notset_test)
......@@ -2947,7 +3188,79 @@ TEST_CASE(min_test)
auto l0 = mm->add_instruction(migraphx::make_op("min"), input0, input1);
mm->add_instruction(migraphx::make_op("min"), l0, input2);
optimize_onnx("min_test.onnx");
auto prog = optimize_onnx("min_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(mod_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}});
auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}});
mm->add_instruction(migraphx::make_op("mod"), input0, input1);
auto prog = optimize_onnx("mod_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(mod_test_half)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mod_test_half.onnx"); }));
}
TEST_CASE(mod_test_different_dtypes)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::int16_type, {3, 3, 3}});
auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}});
add_common_op(*mm, migraphx::make_op("mod"), {input0, input1});
auto prog = optimize_onnx("mod_test_different_dtypes.onnx");
EXPECT(p == prog);
}
TEST_CASE(mod_test_fmod)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}});
auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}});
mm->add_instruction(migraphx::make_op("fmod"), input0, input1);
auto prog = optimize_onnx("mod_test_fmod.onnx");
EXPECT(p == prog);
}
TEST_CASE(mod_test_fmod_half)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}});
auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}});
mm->add_instruction(migraphx::make_op("fmod"), input0, input1);
auto prog = optimize_onnx("mod_test_fmod_half.onnx");
EXPECT(p == prog);
}
TEST_CASE(mod_test_fmod_different_dtypes)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}});
auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}});
add_common_op(*mm, migraphx::make_op("fmod"), {input0, input1});
auto prog = optimize_onnx("mod_test_fmod_different_dtypes.onnx");
EXPECT(p == prog);
}
TEST_CASE(multinomial_test)
......@@ -3773,7 +4086,7 @@ TEST_CASE(reducesum_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), x);
auto r = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0, 1, 2, 3}}}), l1);
......@@ -3788,7 +4101,7 @@ TEST_CASE(reducesum_noop_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
mm->add_return({x});
auto prog = migraphx::parse_onnx("reducesum_noop_test.onnx");
......@@ -4998,7 +5311,7 @@ TEST_CASE(squeeze_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 1, 5, 1}});
auto l1 = mm->add_instruction(migraphx::make_op("squeeze"), l0);
mm->add_return({l1});
......@@ -5433,7 +5746,59 @@ TEST_CASE(variable_batch_test)
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test)
TEST_CASE(variable_batch_user_input_test1)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 16, 16}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 2, 0};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test2)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 5, 0};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test3)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test4)
{
migraphx::program p;
auto* mm = p.get_main_module();
......@@ -5449,6 +5814,26 @@ TEST_CASE(variable_batch_user_input_test)
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test5)
{
// Error using default_dim_value and default_dyn_dim_value
migraphx::onnx_options options;
options.default_dim_value = 2;
options.default_dyn_dim_value = {1, 2, 0};
EXPECT(test::throws([&] { migraphx::parse_onnx("variable_batch_test.onnx", options); }));
}
TEST_CASE(variable_batch_user_input_test6)
{
// Error using both map_dyn_input_dims and map_input_dims
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}};
options.map_input_dims["0"] = {2, 3, 16, 16};
EXPECT(test::throws([&] { migraphx::parse_onnx("variable_batch_test.onnx", options); }));
}
TEST_CASE(variable_batch_leq_zero_test)
{
migraphx::program p;
......
......@@ -631,6 +631,120 @@ TEST_CASE(mean_integral_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(mod_test)
{
migraphx::program p = migraphx::parse_onnx("mod_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s{migraphx::shape::int32_type, {3, 3, 3}};
std::vector<int32_t> a = {-4, 7, 5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7,
5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7, 5};
std::vector<int32_t> b = {2, -3, 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3,
8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, 8};
migraphx::parameter_map p_map;
p_map["0"] = migraphx::argument(s, a.data());
p_map["1"] = migraphx::argument(s, b.data());
auto result = p.eval(p_map).back();
std::vector<int32_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(mod_test_different_types)
{
migraphx::program p = migraphx::parse_onnx("mod_test_different_dtypes.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s_int16{migraphx::shape::int16_type, {3, 3, 3}};
migraphx::shape s_int32{migraphx::shape::int32_type, {3, 3, 3}};
std::vector<int16_t> a = {-4, 7, 5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7,
5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7, 5};
std::vector<int32_t> b = {2, -3, 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3,
8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, 8};
migraphx::parameter_map p_map;
p_map["0"] = migraphx::argument(s_int16, a.data());
p_map["1"] = migraphx::argument(s_int32, b.data());
auto result = p.eval(p_map).back();
std::vector<int32_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod)
{
migraphx::program p = migraphx::parse_onnx("mod_test_fmod.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s{migraphx::shape::float_type, {3, 3, 3}};
std::vector<float> a = {1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9,
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 16.6, 17.9, 18.2,
19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.2, 26.3, 27.1};
std::vector<float> b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17,
16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4};
migraphx::parameter_map p_map;
p_map["0"] = migraphx::argument(s, a.data());
p_map["1"] = migraphx::argument(s, b.data());
auto result = p.eval(p_map).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9,
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod_different_types)
{
migraphx::program p = migraphx::parse_onnx("mod_test_fmod_different_dtypes.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s_float{migraphx::shape::float_type, {3, 3, 3}};
migraphx::shape s_int{migraphx::shape::int32_type, {3, 3, 3}};
std::vector<float> a = {1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9,
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 16.6, 17.9, 18.2,
19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.2, 26.3, 27.1};
std::vector<int32_t> b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17,
16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4};
migraphx::parameter_map p_map;
p_map["0"] = migraphx::argument(s_float, a.data());
p_map["1"] = migraphx::argument(s_int, b.data());
auto result = p.eval(p_map).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9,
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(nonzero_test)
{
migraphx::program p = migraphx::parse_onnx("nonzero_dynamic_test.onnx");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment