Unverified Commit 67f77ac1 authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

Dynamic ref convolution op (#1224)



* Dynamic shape handling in shape object

* rewrite empty lens multibroadcast test

* Shape class changes to handle dynamic
* More throw errors for functions that don't make sense for dynamic shape
* Print output changes
* Serialization changes

* Fixing serialization errors

* Remove const on dyn_dim copy getters

* Dynamic shape tests

* Fix serialize errors

* Add dyn_data struct to avoid ambiguous constructor

* Tidy fix: emplace_back() over for loop

* Tidy fix: use move

* Use std::initializer_list in constructor
Reverts the dyn_data struct change
Should get around the ambiguous braced initialization list error

* avoid typedef

* element_space, min,max,opt _lens change

* formatting

* Comments fix

* dynamic bytes() test

* Seralize and reflect changes

* formatting

* Test the dynamic lens functions

* progress

* Formatting

* Dynamic conv draft progress

* Add operator<< tests for coverage

* Coverage update

* Add to conv dynamic batch test

* Dynamic image size test

* Dynamic weight handling

* Dyn image shape test change, fix dyn weight cond

* Comment update

* Dynamic weights shape test and fix

* Use ternary operator

* Tidy fixes

* Handle dynamic graph input shapes in ONNX parser

* Formatting

* Handle dynamic shape for convolution

* formatting

* cppcheck fixes

* Add onnx test files

* Fix typo

* Disable auto_pad for dynamic input shape

* check_shapes object checks for allowing dynamic shapes

* Fix any_of

* Change to maintain const objectness

* Formatting

* Check shapes allow dynamic

* Refactor compute_shape() call into op.compute()
Allows for per operator differences with handling dynamic shape
Fix operation.hpp change to use the generator

* Comment fix

* Refactor normalize_attributes() calls to use max_lens()

* Comment addition

* Update other normalize_attributes() calls

* Change to using constructor and add tests

* Use const member function

* Add more dynamic shape support

* Add tests for error code coverage

* Fix opt shape bug and add shape tests

* capture all by ref

* Fix typo with img shape calculation

* Add more tests

* dynamic auto pad attempt
Linker error with pad_calc.cpp

* Fix parse dyn auto_pad
Should only need to use dynamic auto pad when the image shape or kernel
shape are dynamic. For a dynamic batch size, the auto pad calculation is
the same.

* Fix linking error

* Fix auto_pad bug
Fixed input tensor with auto_pad setting on

* auto_pad onnx tests

* Fix auto_pad calculation, evaluate in ref_conv
add ref_ops tests

* Add shape tests, fix bugs

* Refactor first two output dynamic len calculation

* Conv MLIR test update

* i64 MLIR test fix

* Fix MLIR test typo
Co-authored-by: default avatarChris Austen <causten@users.noreply.github.com>
parent 7dcae037
...@@ -851,6 +851,96 @@ def conv_bn_relu_maxpool_test(): ...@@ -851,6 +851,96 @@ def conv_bn_relu_maxpool_test():
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out]) return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
@onnx_test
def conv_dynamic_batch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[None, 1, 3, 3])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_and_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_batch_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_kernel_same_lower_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_LOWER')
return ([node], [x, y], [out])
@onnx_test @onnx_test
def conv_relu_maxpool_test(): def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
......
...@@ -796,6 +796,170 @@ TEST_CASE(conv_bn_relu_maxpool_test) ...@@ -796,6 +796,170 @@ TEST_CASE(conv_bn_relu_maxpool_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(conv_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 6, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 6, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_and_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
options.map_dyn_input_dims["1"] = {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}};
auto prog = migraphx::parse_onnx("conv_dynamic_img_and_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_batch_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 =
mm->add_instruction(migraphx::make_op("convolution",
{{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same},
{"use_dynamic_same_auto_pad", false}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_kernel_same_lower)
{
std::cout << "here1\n";
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
std::cout << "here2\n";
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
std::cout << "here3\n";
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
std::cout << "here\n";
auto prog = migraphx::parse_onnx("conv_dynamic_kernel_same_lower_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_relu_maxpool_test) TEST_CASE(conv_relu_maxpool_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -144,6 +144,7 @@ TEST_CASE(convolution_shape) ...@@ -144,6 +144,7 @@ TEST_CASE(convolution_shape)
throws_shape(migraphx::make_op("convolution"), input2, weights2); throws_shape(migraphx::make_op("convolution"), input2, weights2);
throws_shape(migraphx::make_op("convolution"), input2, weights); throws_shape(migraphx::make_op("convolution"), input2, weights);
// 1D convolution
migraphx::shape output_1d{migraphx::shape::float_type, {4, 4, 1}}; migraphx::shape output_1d{migraphx::shape::float_type, {4, 4, 1}};
migraphx::shape input_1d{migraphx::shape::float_type, {4, 3, 3}}; migraphx::shape input_1d{migraphx::shape::float_type, {4, 3, 3}};
migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}}; migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}};
...@@ -153,6 +154,11 @@ TEST_CASE(convolution_shape) ...@@ -153,6 +154,11 @@ TEST_CASE(convolution_shape)
input_1d, input_1d,
weights_1d); weights_1d);
// channel numbers mismatch
weights_1d = {migraphx::shape::float_type, {4, 8, 3}};
throws_shape(migraphx::make_op("convolution"), input_1d, weights_1d);
// 3D convolution
migraphx::shape output_3d{migraphx::shape::float_type, {4, 4, 1, 1, 1}}; migraphx::shape output_3d{migraphx::shape::float_type, {4, 4, 1, 1, 1}};
migraphx::shape input_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}}; migraphx::shape input_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
migraphx::shape weights_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}}; migraphx::shape weights_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
...@@ -164,6 +170,130 @@ TEST_CASE(convolution_shape) ...@@ -164,6 +170,130 @@ TEST_CASE(convolution_shape)
weights_3d); weights_3d);
throws_shape(migraphx::make_op("convolution"), input_3d, weights_3d); throws_shape(migraphx::make_op("convolution"), input_3d, weights_3d);
// dynamic batch
migraphx::shape input_dyn_shape{migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape output_dyn_shape{migraphx::shape::float_type,
{{
1,
100,
0,
},
{1, 1, 0},
{3, 3, 0},
{3, 3, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic image
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 20, 0}, {5, 20, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{3, 18, 0},
{3, 18, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic weights
input_dyn_shape = {migraphx::shape::float_type, {1, 3, 10, 10}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{7, 9, 0},
{7, 9, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic img and weights
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 20, 0}, {5, 20, 0}}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{2, 19, 0},
{2, 19, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// input attr shape mismatch
input_dyn_shape = {migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}, {5, 5, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3, 3}};
throws_shape(migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic batch
input_dyn_shape = {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type, {{1, 10, 0}, {1, 1, 0}, {5, 5, 0}, {5, 5, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic img
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {1, 1, 0}, {5, 10, 0}, {5, 10, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic kernel
input_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {10, 10, 0}, {10, 10, 0}}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {1, 1, 0}, {10, 10, 0}, {10, 10, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
} }
TEST_CASE(contiguous_shape) TEST_CASE(contiguous_shape)
......
...@@ -873,6 +873,436 @@ TEST_CASE(contiguous_test) ...@@ -873,6 +873,436 @@ TEST_CASE(contiguous_test)
EXPECT(migraphx::verify_range(results_vector, data)); EXPECT(migraphx::verify_range(results_vector, data));
} }
TEST_CASE(conv_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape input_dyn_shape{migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {2, 3, 3, 3}};
auto input = mm->add_parameter("X", input_dyn_shape);
auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction(migraphx::make_op("convolution", {{"padding", {1, 1}}, {"stride", {2, 2}}}),
input,
weights);
p.compile(migraphx::ref::target{});
std::vector<float> a = {
2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712,
-0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606,
0.02345525, -0.33579525, 0.38901961, 1.05473483, -1.31188095, 1.8963089, -0.07265259,
0.947339, 0.41949373, -0.70814759, 0.25892952, 1.07311416, 1.2571274, -0.62318051,
-0.19951548, -0.94232577, -0.29393643, 0.42292568, -0.80230367, 1.40909171, 0.63617158,
0.13900366, 1.09253144, -0.15265895, 1.54781747, 0.72780299, 1.09189606, -0.38068101,
0.97057933, -0.58958799, 1.56188643, 0.21474874, 0.58725154, -1.27097559, -0.03024297,
1.09437096, -0.4897908, 0.34838957, -1.31042492, -1.69069934, 0.86956722, -0.40457946,
0.46691212, 1.29273605, 0.26464137, 0.22073045, -1.02178168, 0.22163901, -1.84387338,
0.75522131, -0.45775682, -0.42241111, -1.50944722, 1.07256448, -1.95876884, -0.28106022,
0.3341668, 2.13129425, -1.14728117, -1.06555498, -0.298444, -0.88322699, -0.65866792,
-2.06007552, 0.01374334, 0.45612028, 0.52715492, 1.01914406, -1.72659791, 0.80650896,
0.16860051, 2.24112225, -0.78620857, 0.36566174, -0.07020134, -0.47976932, -0.68230027,
-0.94711417, -0.54506505, 1.66504931, -0.71860826, 0.61132306};
std::vector<float> c = {
-0.14601797, -0.13000923, 0.06521662, 0.06178288, -0.11083675, 0.10154136, 0.09990512,
0.06030385, -0.11374587, -0.17523311, -0.14344215, 0.17802463, 0.06300922, -0.15325832,
0.07066704, 0.05166031, 0.00615084, -0.02606523, 0.08083995, -0.17913306, 0.0624622,
0.0735731, -0.04198661, -0.0164391, -0.06374192, 0.16569914, 0.10681538, 0.07370754,
0.02802075, 0.00282027, 0.15104802, -0.11084409, -0.00197773, 0.07924436, 0.03528272,
0.04765259, -0.15896152, 0.07917164, 0.12125669, -0.1154705, -0.11999125, 0.12749968,
-0.06269585, 0.18658121, -0.03944227, 0.0111798, -0.17731084, 0.11789055, -0.09982193,
0.08142821, 0.0729029, 0.11303909, 0.12735154, 0.03885292};
std::vector<float> sol = {-0.20817225,
0.87965256,
0.14958936,
-1.24887264,
-0.06540672,
0.20778663,
0.40456355,
-0.99900877,
0.4917807,
0.1994698,
0.64205718,
0.37798831,
-0.25315839,
0.44276932,
-0.16138598,
0.79344082};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 3, 4, 4}};
migraphx::parameter_map params0;
params0["X"] = migraphx::argument(input_fixed_shape0, a.data());
params0["W"] = migraphx::argument(weights_shape, c.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector(64);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
a = {2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712,
-0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606,
0.02345525, -0.33579525, 0.38901961, 1.05473483, -1.31188095, 1.8963089, -0.07265259,
0.947339, 0.41949373, -0.70814759, 0.25892952, 1.07311416, 1.2571274, -0.62318051,
-0.19951548, -0.94232577, -0.29393643, 0.42292568, -0.80230367, 1.40909171, 0.63617158,
0.13900366, 1.09253144, -0.15265895, 1.54781747, 0.72780299, 1.09189606, -0.38068101,
0.97057933, -0.58958799, 1.56188643, 0.21474874, 0.58725154, -1.27097559, -0.03024297,
1.09437096, -0.4897908, 0.34838957, -1.31042492, -1.69069934, 0.86956722, -0.40457946,
0.46691212, 1.29273605, 0.26464137, 0.22073045, -1.02178168, 0.22163901, -1.84387338,
0.75522131, -0.45775682, -0.42241111, -1.50944722, 1.07256448, -1.95876884, -0.28106022,
0.3341668, 2.13129425, -1.14728117, -1.06555498, -0.298444, -0.88322699, -0.65866792,
-2.06007552, 0.01374334, 0.45612028, 0.52715492, 1.01914406, -1.72659791, 0.80650896,
0.16860051, 2.24112225, -0.78620857, 0.36566174, -0.07020134, -0.47976932, -0.68230027,
-0.94711417, -0.54506505, 1.66504931, -0.71860826, 0.61132306};
c = {-0.14601797, -0.13000923, 0.06521662, 0.06178288, -0.11083675, 0.10154136, 0.09990512,
0.06030385, -0.11374587, -0.17523311, -0.14344215, 0.17802463, 0.06300922, -0.15325832,
0.07066704, 0.05166031, 0.00615084, -0.02606523, 0.08083995, -0.17913306, 0.0624622,
0.0735731, -0.04198661, -0.0164391, -0.06374192, 0.16569914, 0.10681538, 0.07370754,
0.02802075, 0.00282027, 0.15104802, -0.11084409, -0.00197773, 0.07924436, 0.03528272,
0.04765259, -0.15896152, 0.07917164, 0.12125669, -0.1154705, -0.11999125, 0.12749968,
-0.06269585, 0.18658121, -0.03944227, 0.0111798, -0.17731084, 0.11789055, -0.09982193,
0.08142821, 0.0729029, 0.11303909, 0.12735154, 0.03885292};
sol = {-0.20817225,
0.87965256,
0.14958936,
-1.24887264,
-0.06540672,
0.20778663,
0.40456355,
-0.99900877};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::parameter_map params1;
params1["X"] = migraphx::argument(input_fixed_shape1, a.data());
params1["W"] = migraphx::argument(weights_shape, c.data());
result = p.eval(params1).back();
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
}
TEST_CASE(conv_dynamic_img_shape_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape input_dyn_shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {4, 6, 0}, {4, 6, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {1, 3, 3, 3}};
auto input = mm->add_parameter("X", input_dyn_shape);
auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction(migraphx::make_op("convolution", {{"padding", {0, 0}}, {"stride", {1, 1}}}),
input,
weights);
p.compile(migraphx::ref::target{});
std::vector<float> a = {0.28007596, 0.46114671, 0.12171969, 0.52260835, 0.40916841, 0.07163955,
0.09896668, 0.98628836, 0.69406788, 0.44868846, 0.64017681, 0.27048886,
0.30187397, 0.07334207, 0.05258557, 0.80747513, 0.81330534, 0.00497161,
0.33005534, 0.08908686, 0.46794691, 0.61768946, 0.55104806, 0.13406187,
0.70244284, 0.61296941, 0.46742536, 0.29712714, 0.91839388, 0.0834397,
0.14476327, 0.37857075, 0.25922384, 0.61620963, 0.69455439, 0.70389431,
0.77388606, 0.1752363, 0.74631394, 0.24604889, 0.53600244, 0.22116457,
0.81217463, 0.10789447, 0.43083784, 0.63371852, 0.69742316, 0.09536905};
std::vector<float> c = {0.98411968, 0.2899219, 0.44638833, 0.30390816, 0.03989896, 0.2445332,
0.32700131, 0.57517075, 0.06956476, 0.93079306, 0.19882314, 0.52940601,
0.35624753, 0.35938406, 0.9111428, 0.88923574, 0.61040283, 0.2797513,
0.15479768, 0.46534674, 0.16970931, 0.49704618, 0.07062198, 0.01678321,
0.53150934, 0.39244495, 0.9963813};
std::vector<float> sol = {6.1329393, 4.3199925, 5.448438, 3.8497565};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::parameter_map params0;
params0["X"] = migraphx::argument(input_fixed_shape0, a.data());
params0["W"] = migraphx::argument(weights_shape, c.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector(72);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
a = {0.95600171, 0.20768181, 0.82844489, 0.14928212, 0.51280462, 0.1359196, 0.68903648,
0.84174772, 0.425509, 0.956926, 0.82533291, 0.33821531, 0.57576055, 0.75330186,
0.82710394, 0.93343847, 0.14499469, 0.74558021, 0.13935139, 0.90652876, 0.22611443,
0.85323975, 0.30631787, 0.96983037, 0.51783421, 0.32247456, 0.28243352, 0.605865,
0.33376446, 0.67864877, 0.15442507, 0.24977552, 0.86989425, 0.60036782, 0.26198306,
0.1494149, 0.13678915, 0.24892094, 0.38282467, 0.64907906, 0.83756376, 0.77603195,
0.33951558, 0.14856874, 0.45701939, 0.43786436, 0.57421759, 0.37326922, 0.63382506,
0.11464436, 0.23309047, 0.76724102, 0.98712427, 0.80800108, 0.84296564, 0.79568268,
0.45684131, 0.73867068, 0.57845499, 0.45073557, 0.27102442, 0.86460315, 0.06865567,
0.81673446, 0.881835, 0.42351639, 0.83322931, 0.34101671, 0.51979151, 0.54920645,
0.19287718, 0.33321689, 0.27752456, 0.45755893, 0.67484562, 0.68383122, 0.52361312,
0.46437257, 0.50862936, 0.32460429, 0.1726007, 0.29933345, 0.64856728, 0.06471591,
0.63370843, 0.27900152, 0.18595992, 0.48904812, 0.35368508, 0.09620202};
c = {0.709561, 0.7916206, 0.0443115, 0.62592275, 0.2498623, 0.42725624, 0.7905135,
0.53160169, 0.01303743, 0.01987505, 0.39041803, 0.89530203, 0.23155373, 0.44435213,
0.14407301, 0.80968594, 0.38216188, 0.35692557, 0.2568538, 0.83587388, 0.43654904,
0.04974508, 0.80375029, 0.25350374, 0.1820275, 0.23369029, 0.54358755};
sol = {6.305986,
5.564665,
6.122996,
5.7262855,
5.5546584,
5.779489,
5.798161,
5.160476,
6.702436,
5.4851074,
6.227567,
5.2016754};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {1, 3, 6, 5}};
migraphx::parameter_map params1;
params1["X"] = migraphx::argument(input_fixed_shape1, a.data());
params1["W"] = migraphx::argument(weights_shape, c.data());
result = p.eval(params1).back();
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
}
TEST_CASE(conv_dynamic_weights_shape_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape input_shape{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::shape weights_shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {2, 3, 0}, {2, 3, 0}}};
auto input = mm->add_parameter("X", input_shape);
auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction(migraphx::make_op("convolution", {{"padding", {0, 0}}, {"stride", {1, 1}}}),
input,
weights);
p.compile(migraphx::ref::target{});
std::vector<float> a = {0.28007596, 0.46114671, 0.12171969, 0.52260835, 0.40916841, 0.07163955,
0.09896668, 0.98628836, 0.69406788, 0.44868846, 0.64017681, 0.27048886,
0.30187397, 0.07334207, 0.05258557, 0.80747513, 0.81330534, 0.00497161,
0.33005534, 0.08908686, 0.46794691, 0.61768946, 0.55104806, 0.13406187,
0.70244284, 0.61296941, 0.46742536, 0.29712714, 0.91839388, 0.0834397,
0.14476327, 0.37857075, 0.25922384, 0.61620963, 0.69455439, 0.70389431,
0.77388606, 0.1752363, 0.74631394, 0.24604889, 0.53600244, 0.22116457,
0.81217463, 0.10789447, 0.43083784, 0.63371852, 0.69742316, 0.09536905};
std::vector<float> c = {0.98411968,
0.2899219,
0.44638833,
0.30390816,
0.03989896,
0.2445332,
0.32700131,
0.57517075,
0.06956476,
0.93079306,
0.19882314,
0.52940601};
std::vector<float> sol = {1.9939406,
2.2703054,
1.8896171,
2.062202,
2.3035214,
1.629366,
2.1606991,
2.1917608,
1.6797699};
migraphx::shape weight_fixed_shape0{migraphx::shape::float_type, {1, 3, 2, 2}};
migraphx::parameter_map params0;
params0["X"] = migraphx::argument(input_shape, a.data());
params0["W"] = migraphx::argument(weight_fixed_shape0, c.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector(72);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
c = {0.98411968, 0.2899219, 0.44638833, 0.30390816, 0.03989896, 0.2445332, 0.32700131,
0.57517075, 0.06956476, 0.93079306, 0.19882314, 0.52940601, 0.35624753, 0.35938406,
0.9111428, 0.88923574, 0.61040283, 0.2797513, 0.15479768, 0.46534674, 0.16970931,
0.49704618, 0.07062198, 0.01678321, 0.53150934, 0.39244495, 0.9963813};
sol = {6.1329393, 4.3199925, 5.448438, 3.8497565};
migraphx::shape weights_fixed_shape1{migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::parameter_map params1;
params1["X"] = migraphx::argument(input_shape, a.data());
params1["W"] = migraphx::argument(weights_fixed_shape1, c.data());
result = p.eval(params1).back();
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
}
TEST_CASE(conv_dynamic_img_same_upper_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape input_dyn_shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {4, 6, 0}, {4, 6, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {1, 3, 3, 3}};
auto input = mm->add_parameter("X", input_dyn_shape);
auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction(
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
input,
weights);
p.compile(migraphx::ref::target{});
std::vector<float> a = {0.63321185, 0.6466339, 0.8515352, 0.44240063, 0.5018913, 0.5068494,
0.75330657, 0.7383877, 0.15870683, 0.8171611, 0.56118083, 0.87004256,
0.24401724, 0.8815178, 0.4222333, 0.27191755,
0.41633207, 0.2460619, 0.32004243, 0.6962248, 0.12284133, 0.2620491,
0.96931046, 0.6030955, 0.7623861, 0.2395751, 0.61440414, 0.577285,
0.80087787, 0.12776066, 0.26566318, 0.46569306,
0.96701574, 0.3850145, 0.14165345, 0.5887347, 0.7152134, 0.5295342,
0.6303507, 0.4037548, 0.18556239, 0.79416305, 0.29107493, 0.18770285,
0.6870904, 0.30701008, 0.314684, 0.91075855};
std::vector<float> c = {
2.8150102e-01, 3.3198616e-01, 9.5149356e-01, 7.4039467e-02, 9.6555042e-01,
2.8815505e-01, 2.5100240e-01, 5.2186239e-01, 2.3850012e-01,
8.2963020e-01, 3.0763101e-04, 6.7026985e-01, 1.4260857e-01, 9.7517288e-01,
3.6847427e-02, 8.5804445e-01, 7.3440993e-01, 6.7948365e-01,
7.9253986e-02, 7.3943835e-01, 1.7813577e-01, 1.0780835e-01, 4.2304707e-01,
4.0084350e-01, 1.1114500e-01, 4.4846520e-01, 5.0109702e-01};
std::vector<float> sol = {3.013387,
3.7111127,
4.2946506,
3.579301,
4.5306826,
6.1262493,
6.332169,
4.495293,
4.46013,
6.0938954,
5.848162,
4.514299,
2.9587686,
4.117671,
3.5187216,
2.3236327};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::parameter_map params0;
params0["X"] = migraphx::argument(input_fixed_shape0, a.data());
params0["W"] = migraphx::argument(weights_shape, c.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
}
TEST_CASE(conv_dynamic_kernel_same_lower_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape input_shape{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::shape weights_shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {2, 3, 0}, {2, 3, 0}}};
auto input = mm->add_parameter("X", input_shape);
auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction(
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
input,
weights);
p.compile(migraphx::ref::target{});
std::vector<float> a = {0.63321185, 0.6466339, 0.8515352, 0.44240063, 0.5018913, 0.5068494,
0.75330657, 0.7383877, 0.15870683, 0.8171611, 0.56118083, 0.87004256,
0.24401724, 0.8815178, 0.4222333, 0.27191755,
0.41633207, 0.2460619, 0.32004243, 0.6962248, 0.12284133, 0.2620491,
0.96931046, 0.6030955, 0.7623861, 0.2395751, 0.61440414, 0.577285,
0.80087787, 0.12776066, 0.26566318, 0.46569306,
0.96701574, 0.3850145, 0.14165345, 0.5887347, 0.7152134, 0.5295342,
0.6303507, 0.4037548, 0.18556239, 0.79416305, 0.29107493, 0.18770285,
0.6870904, 0.30701008, 0.314684, 0.91075855};
std::vector<float> c = {2.8150102e-01,
3.3198616e-01,
9.5149356e-01,
7.4039467e-02,
9.6555042e-01,
2.8815505e-01,
2.5100240e-01,
5.2186239e-01,
2.3850012e-01,
8.2963020e-01,
3.0763101e-04,
6.7026985e-01};
std::vector<float> sol = {2.453681,
2.536207,
3.0187201,
1.7912633,
2.1738236,
2.9695358,
3.2319589,
1.859269,
2.5953722,
2.50734,
2.7736917,
1.2229807,
1.5900216,
0.9225286,
1.43048,
0.74341124};
migraphx::shape weight_fixed_shape0{migraphx::shape::float_type, {1, 3, 2, 2}};
migraphx::parameter_map params0;
params0["X"] = migraphx::argument(input_shape, a.data());
params0["W"] = migraphx::argument(weight_fixed_shape0, c.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
}
TEST_CASE(conv2d_padding_stride_test) TEST_CASE(conv2d_padding_stride_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -68,8 +68,10 @@ struct operation ...@@ -68,8 +68,10 @@ struct operation
* *
* @param ctx This is the context created by the `target` during compilation. Implementations * @param ctx This is the context created by the `target` during compilation. Implementations
* can use the target's `context` class rather than the `context` interface class. * can use the target's `context` class rather than the `context` interface class.
* @param output This is the output shape. It is equivalent to running `compute_shape` with each * @param output Equivalent to running `compute_shape` with each `shape` of the `argument`.
* `shape` of the `argument`. * For a fixed shape, the returned argument will have the same shape as `output`.
* For a dynamic shape, the returned `argument` will be a fixed shape within the bounds
* set in the dynamic shape `output`.
* @param input This is the `argument` result from the previous instruction's computation. * @param input This is the `argument` result from the previous instruction's computation.
* @return Return an `argument` of the result computation. The `shape` of `argument` should be * @return Return an `argument` of the result computation. The `shape` of `argument` should be
* the same the `output` shape. * the same the `output` shape.
...@@ -137,7 +139,7 @@ auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs) ...@@ -137,7 +139,7 @@ auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs)) -> decltype(x.normalize_compute_shape(inputs))
{ {
dependent_type<operation, T> y = x; dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].lens()); normalize_attributes(y, inputs[0].max_lens());
return any_cast<T>(y).normalize_compute_shape(inputs); return any_cast<T>(y).normalize_compute_shape(inputs);
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment