"src/include/vscode:/vscode.git/clone" did not exist on "f33c7a48e138cb8e596ee6bc8cf23916bf486e8e"
Unverified Commit 67f77ac1 authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

Dynamic ref convolution op (#1224)



* Dynamic shape handling in shape object

* rewrite empty lens multibroadcast test

* Shape class changes to handle dynamic
* More throw errors for functions that don't make sense for dynamic shape
* Print output changes
* Serialization changes

* Fixing serialization errors

* Remove const on dyn_dim copy getters

* Dynamic shape tests

* Fix serialize errors

* Add dyn_data struct to avoid ambiguous constructor

* Tidy fix: emplace_back() over for loop

* Tidy fix: use move

* Use std::initializer_list in constructor
Reverts the dyn_data struct change
Should get around the ambiguous braced initialization list error

* avoid typedef

* element_space, min,max,opt _lens change

* formatting

* Comments fix

* dynamic bytes() test

* Seralize and reflect changes

* formatting

* Test the dynamic lens functions

* progress

* Formatting

* Dynamic conv draft progress

* Add operator<< tests for coverage

* Coverage update

* Add to conv dynamic batch test

* Dynamic image size test

* Dynamic weight handling

* Dyn image shape test change, fix dyn weight cond

* Comment update

* Dynamic weights shape test and fix

* Use ternary operator

* Tidy fixes

* Handle dynamic graph input shapes in ONNX parser

* Formatting

* Handle dynamic shape for convolution

* formatting

* cppcheck fixes

* Add onnx test files

* Fix typo

* Disable auto_pad for dynamic input shape

* check_shapes object checks for allowing dynamic shapes

* Fix any_of

* Change to maintain const objectness

* Formatting

* Check shapes allow dynamic

* Refactor compute_shape() call into op.compute()
Allows for per operator differences with handling dynamic shape
Fix operation.hpp change to use the generator

* Comment fix

* Refactor normalize_attributes() calls to use max_lens()

* Comment addition

* Update other normalize_attributes() calls

* Change to using constructor and add tests

* Use const member function

* Add more dynamic shape support

* Add tests for error code coverage

* Fix opt shape bug and add shape tests

* capture all by ref

* Fix typo with img shape calculation

* Add more tests

* dynamic auto pad attempt
Linker error with pad_calc.cpp

* Fix parse dyn auto_pad
Should only need to use dynamic auto pad when the image shape or kernel
shape are dynamic. For a dynamic batch size, the auto pad calculation is
the same.

* Fix linking error

* Fix auto_pad bug
Fixed input tensor with auto_pad setting on

* auto_pad onnx tests

* Fix auto_pad calculation, evaluate in ref_conv
add ref_ops tests

* Add shape tests, fix bugs

* Refactor first two output dynamic len calculation

* Conv MLIR test update

* i64 MLIR test fix

* Fix MLIR test typo
Co-authored-by: default avatarChris Austen <causten@users.noreply.github.com>
parent 7dcae037
...@@ -851,6 +851,96 @@ def conv_bn_relu_maxpool_test(): ...@@ -851,6 +851,96 @@ def conv_bn_relu_maxpool_test():
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out]) return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
@onnx_test
def conv_dynamic_batch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[None, 1, 3, 3])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_and_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_batch_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_kernel_same_lower_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_LOWER')
return ([node], [x, y], [out])
@onnx_test @onnx_test
def conv_relu_maxpool_test(): def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
......
...@@ -796,6 +796,170 @@ TEST_CASE(conv_bn_relu_maxpool_test) ...@@ -796,6 +796,170 @@ TEST_CASE(conv_bn_relu_maxpool_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(conv_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 6, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 6, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_and_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
options.map_dyn_input_dims["1"] = {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}};
auto prog = migraphx::parse_onnx("conv_dynamic_img_and_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_batch_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 =
mm->add_instruction(migraphx::make_op("convolution",
{{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same},
{"use_dynamic_same_auto_pad", false}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_kernel_same_lower)
{
std::cout << "here1\n";
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
std::cout << "here2\n";
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
std::cout << "here3\n";
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
std::cout << "here\n";
auto prog = migraphx::parse_onnx("conv_dynamic_kernel_same_lower_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_relu_maxpool_test) TEST_CASE(conv_relu_maxpool_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -144,6 +144,7 @@ TEST_CASE(convolution_shape) ...@@ -144,6 +144,7 @@ TEST_CASE(convolution_shape)
throws_shape(migraphx::make_op("convolution"), input2, weights2); throws_shape(migraphx::make_op("convolution"), input2, weights2);
throws_shape(migraphx::make_op("convolution"), input2, weights); throws_shape(migraphx::make_op("convolution"), input2, weights);
// 1D convolution
migraphx::shape output_1d{migraphx::shape::float_type, {4, 4, 1}}; migraphx::shape output_1d{migraphx::shape::float_type, {4, 4, 1}};
migraphx::shape input_1d{migraphx::shape::float_type, {4, 3, 3}}; migraphx::shape input_1d{migraphx::shape::float_type, {4, 3, 3}};
migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}}; migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}};
...@@ -153,6 +154,11 @@ TEST_CASE(convolution_shape) ...@@ -153,6 +154,11 @@ TEST_CASE(convolution_shape)
input_1d, input_1d,
weights_1d); weights_1d);
// channel numbers mismatch
weights_1d = {migraphx::shape::float_type, {4, 8, 3}};
throws_shape(migraphx::make_op("convolution"), input_1d, weights_1d);
// 3D convolution
migraphx::shape output_3d{migraphx::shape::float_type, {4, 4, 1, 1, 1}}; migraphx::shape output_3d{migraphx::shape::float_type, {4, 4, 1, 1, 1}};
migraphx::shape input_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}}; migraphx::shape input_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
migraphx::shape weights_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}}; migraphx::shape weights_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
...@@ -164,6 +170,130 @@ TEST_CASE(convolution_shape) ...@@ -164,6 +170,130 @@ TEST_CASE(convolution_shape)
weights_3d); weights_3d);
throws_shape(migraphx::make_op("convolution"), input_3d, weights_3d); throws_shape(migraphx::make_op("convolution"), input_3d, weights_3d);
// dynamic batch
migraphx::shape input_dyn_shape{migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape output_dyn_shape{migraphx::shape::float_type,
{{
1,
100,
0,
},
{1, 1, 0},
{3, 3, 0},
{3, 3, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic image
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 20, 0}, {5, 20, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{3, 18, 0},
{3, 18, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic weights
input_dyn_shape = {migraphx::shape::float_type, {1, 3, 10, 10}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{7, 9, 0},
{7, 9, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic img and weights
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 20, 0}, {5, 20, 0}}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{2, 19, 0},
{2, 19, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// input attr shape mismatch
input_dyn_shape = {migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}, {5, 5, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3, 3}};
throws_shape(migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic batch
input_dyn_shape = {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type, {{1, 10, 0}, {1, 1, 0}, {5, 5, 0}, {5, 5, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic img
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {1, 1, 0}, {5, 10, 0}, {5, 10, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic kernel
input_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {10, 10, 0}, {10, 10, 0}}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {1, 1, 0}, {10, 10, 0}, {10, 10, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
} }
TEST_CASE(contiguous_shape) TEST_CASE(contiguous_shape)
......
This diff is collapsed.
...@@ -68,8 +68,10 @@ struct operation ...@@ -68,8 +68,10 @@ struct operation
* *
* @param ctx This is the context created by the `target` during compilation. Implementations * @param ctx This is the context created by the `target` during compilation. Implementations
* can use the target's `context` class rather than the `context` interface class. * can use the target's `context` class rather than the `context` interface class.
* @param output This is the output shape. It is equivalent to running `compute_shape` with each * @param output Equivalent to running `compute_shape` with each `shape` of the `argument`.
* `shape` of the `argument`. * For a fixed shape, the returned argument will have the same shape as `output`.
* For a dynamic shape, the returned `argument` will be a fixed shape within the bounds
* set in the dynamic shape `output`.
* @param input This is the `argument` result from the previous instruction's computation. * @param input This is the `argument` result from the previous instruction's computation.
* @return Return an `argument` of the result computation. The `shape` of `argument` should be * @return Return an `argument` of the result computation. The `shape` of `argument` should be
* the same the `output` shape. * the same the `output` shape.
...@@ -137,7 +139,7 @@ auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs) ...@@ -137,7 +139,7 @@ auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs)) -> decltype(x.normalize_compute_shape(inputs))
{ {
dependent_type<operation, T> y = x; dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].lens()); normalize_attributes(y, inputs[0].max_lens());
return any_cast<T>(y).normalize_compute_shape(inputs); return any_cast<T>(y).normalize_compute_shape(inputs);
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment