Commit f303624d authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into...

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into refactor_dyn_fixed_compare
parents f4a3a182 48cc33e4
......@@ -237,6 +237,64 @@ def averagepool_3d_test():
return ([node], [x], [out])
@onnx_test
def averagepool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 5, 5, 5])
out = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 3, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['0'],
outputs=['1'],
kernel_shape=[3, 3, 3])
return ([node], [x], [out])
@onnx_test
def averagepool_dyn_autopad_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 5, 5])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER')
return ([node], [x], [y])
@onnx_test
def averagepool_dyn_asym_padding_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
strides=[2, 2],
pads=[0, 0, 1, 1])
return ([node], [x], [y])
@onnx_test
def averagepool_dyn_cip_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 1, 1])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
count_include_pad=1)
return ([node], [x], [y])
@onnx_test
def averagepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
......@@ -2069,6 +2127,21 @@ def globalavgpool_test():
return ([node], [x], [y])
@onnx_test
def globalavgpool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalAveragePool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def globallppool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
......@@ -2083,6 +2156,21 @@ def globallppool_test():
return ([node], [x], [y])
@onnx_test
def globallppool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalLpPool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def globalmaxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
......@@ -2097,6 +2185,21 @@ def globalmaxpool_test():
return ([node], [x], [y])
@onnx_test
def globalmaxpool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalMaxPool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
......@@ -6297,6 +6400,21 @@ def transpose_test():
return ([node], [x], [y])
@onnx_test
def transpose_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 2, 2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 2, 2])
node = onnx.helper.make_node(
'Transpose',
perm=[0, 3, 1, 2],
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def transpose_gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6])
......
......@@ -273,6 +273,51 @@ TEST_CASE(averagepool_3d_test)
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}, {5, 5, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}),
l0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("averagepool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_autopad_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_autopad_error_test.onnx", options); }));
}
TEST_CASE(averagepool_dyn_asym_padding_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_asym_padding_error_test.onnx", options); }));
}
TEST_CASE(averagepool_dyn_cip_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_cip_error_test.onnx", options); }));
}
TEST_CASE(averagepool_notset_test)
{
migraphx::program p;
......@@ -2144,6 +2189,28 @@ TEST_CASE(globalavgpool_test)
EXPECT(p == prog);
}
TEST_CASE(globalavgpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"lengths", {16, 16}},
{"padding", {0, 0, 0, 0}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("globalavgpool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(globallppool_test)
{
migraphx::program p;
......@@ -2161,6 +2228,29 @@ TEST_CASE(globallppool_test)
EXPECT(p == prog);
}
TEST_CASE(globallppool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {16, 32, 0}, {16, 32, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::lpnorm},
{"dyn_global", true},
{"padding", {0, 0, 0, 0}},
{"lengths", {}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {16, 32, 0};
auto prog = migraphx::parse_onnx("globallppool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(globalmaxpool_test)
{
migraphx::program p;
......@@ -2178,6 +2268,28 @@ TEST_CASE(globalmaxpool_test)
EXPECT(p == prog);
}
TEST_CASE(globalmaxpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {32, 32, 0}, {32, 32, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"lengths", {32, 32}},
{"padding", {0, 0, 0, 0}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("globalmaxpool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(greater_test)
{
migraphx::program p;
......@@ -5996,6 +6108,24 @@ TEST_CASE(transpose_test)
EXPECT(p == prog);
}
TEST_CASE(transpose_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}});
std::vector<int64_t> perm{0, 3, 1, 2};
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), input);
mm->add_return({t0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("transpose_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(topk_attrk_test)
{
migraphx::program p;
......
......@@ -1549,16 +1549,76 @@ TEST_CASE(nms_shape)
score_thres_s);
}
TEST_CASE(pooling_shape)
TEST_CASE(pooling_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
throws_shape(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1}},
{"stride", {0}},
{"lengths", {1}}}),
input);
}
TEST_CASE(pooling_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 1, 1}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {3, 3}},
{"lengths", {1, 1}}}),
input);
}
TEST_CASE(pooling_shape2)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 2, 2}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {3, 3}},
{"lengths", {1, 1}},
{"ceil_mode", true}}),
input);
}
TEST_CASE(pooling_shape3)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 3, 3}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {2, 2}},
{"stride", {3, 3}},
{"lengths", {3, 3}},
{"ceil_mode", true}}),
input);
}
TEST_CASE(pooling_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 3}, {3, 3, 3}, {3, 3, 0}}};
throws_shape(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1}},
{"stride", {0}},
{"lengths", {1}}}),
input);
}
TEST_CASE(pooling_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 3}, {3, 3, 3}, {3, 3, 0}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 3}, {1, 1, 1}, {1, 1, 0}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
......@@ -1566,9 +1626,15 @@ TEST_CASE(pooling_shape)
{"stride", {3, 3}},
{"lengths", {1, 1}}}),
input);
}
migraphx::shape output1{migraphx::shape::float_type, {4, 3, 2, 2}};
expect_shape(output1,
TEST_CASE(pooling_dyn_shape2)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {5, 5, 0}, {3, 3, 3}, {3, 3, 0}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 0}, {5, 5, 0}, {2, 2, 2}, {2, 2, 0}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
......@@ -1578,6 +1644,37 @@ TEST_CASE(pooling_shape)
input);
}
TEST_CASE(pooling_dyn_shape3)
{
migraphx::shape input{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {4, 12, 8}, {4, 12, 8}}};
migraphx::shape output{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {2, 4, 3}, {2, 4, 3}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {3, 3}},
{"lengths", {1, 1}}}),
input);
}
TEST_CASE(pooling_dyn_shape4)
{
migraphx::shape input{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {4, 12, 8}, {4, 12, 8}}};
migraphx::shape output{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {3, 6, 4}, {3, 6, 4}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {2, 2}},
{"stride", {3, 3}},
{"lengths", {3, 3}},
{"ceil_mode", true}}),
input);
}
TEST_CASE(prefix_scan_sum)
{
{
......@@ -2288,6 +2385,28 @@ TEST_CASE(transpose_shape)
throws_shape(migraphx::make_op("transpose", {{"permutation", {1, 2}}}), input);
}
TEST_CASE(transpose_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{2, 2, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {1, 0}}}), input);
}
TEST_CASE(transpose_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{4, 4, 0}, {4, 4, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1, 2}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {2, 1, 0}}}), input);
}
TEST_CASE(transpose_axes_error)
{
migraphx::shape input{migraphx::shape::float_type, {2, 2}};
throws_shape(migraphx::make_op("transpose", {{"permutation", {1}}}), input);
}
TEST_CASE(step_test)
{
migraphx::shape s1{migraphx::shape::float_type, {1, 2, 4}};
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment