Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
2dff4dd2
Commit
2dff4dd2
authored
Jun 22, 2022
by
charlie
Browse files
Handle dynamic shape for convolution
parent
4b1b8032
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
65 additions
and
3 deletions
+65
-3
src/onnx/parse_convolution.cpp
src/onnx/parse_convolution.cpp
+4
-3
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+21
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+40
-0
No files found.
src/onnx/parse_convolution.cpp
View file @
2dff4dd2
...
@@ -28,11 +28,12 @@ struct parse_convolution : op_parser<parse_convolution>
...
@@ -28,11 +28,12 @@ struct parse_convolution : op_parser<parse_convolution>
auto
values
=
op
.
to_value
();
auto
values
=
op
.
to_value
();
auto
l0
=
args
[
0
];
auto
l0
=
args
[
0
];
auto
weights
=
args
[
1
];
auto
weights
=
args
[
1
];
auto
in_lens
=
l0
->
get_shape
().
lens
();
auto
l0_shape
=
l0
->
get_shape
();
auto
in_lens
=
l0_shape
.
max_lens
();
assert
(
in_lens
.
size
()
>
2
);
assert
(
in_lens
.
size
()
>
2
);
auto
kdims
=
in_lens
.
size
()
-
2
;
auto
kdims
=
in_lens
.
size
()
-
2
;
// ensure pads availabe only when auto_pad is "NOT_SET"
// ensure pads availab
l
e only when auto_pad is "NOT_SET"
check_padding_mode
(
info
,
"CONV"
);
check_padding_mode
(
info
,
"CONV"
);
if
(
contains
(
info
.
attributes
,
"strides"
))
if
(
contains
(
info
.
attributes
,
"strides"
))
...
@@ -59,7 +60,7 @@ struct parse_convolution : op_parser<parse_convolution>
...
@@ -59,7 +60,7 @@ struct parse_convolution : op_parser<parse_convolution>
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
{
{
auto
weight_lens
=
weights
->
get_shape
().
lens
();
auto
weight_lens
=
weights
->
get_shape
().
max_
lens
();
std
::
vector
<
std
::
size_t
>
k_lens
(
weight_lens
.
begin
()
+
2
,
weight_lens
.
end
());
std
::
vector
<
std
::
size_t
>
k_lens
(
weight_lens
.
begin
()
+
2
,
weight_lens
.
end
());
cal_auto_padding_size
(
info
,
cal_auto_padding_size
(
info
,
values
,
values
,
...
...
test/onnx/gen_onnx.py
View file @
2dff4dd2
...
@@ -827,6 +827,27 @@ def conv_bn_relu_maxpool_test():
...
@@ -827,6 +827,27 @@ def conv_bn_relu_maxpool_test():
return
([
node0
,
node1
,
node2
,
node3
],
[
x
,
y
,
z
,
m
,
n
,
k
,
l
],
[
out
])
return
([
node0
,
node1
,
node2
,
node3
],
[
x
,
y
,
z
,
m
,
n
,
k
,
l
],
[
out
])
@
onnx_test
def
conv_dynamic_batch_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
3
,
3
])
out
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'Conv'
,
inputs
=
[
'0'
,
'1'
],
outputs
=
[
'2'
])
return
([
node
],
[
x
,
y
],
[
out
])
@
onnx_test
def
conv_dynamic_img_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
3
,
3
])
out
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
None
,
None
])
node
=
onnx
.
helper
.
make_node
(
'Conv'
,
inputs
=
[
'0'
,
'1'
],
outputs
=
[
'2'
])
return
([
node
],
[
x
,
y
],
[
out
])
@
onnx_test
@
onnx_test
def
conv_relu_maxpool_test
():
def
conv_relu_maxpool_test
():
...
...
test/onnx/onnx_test.cpp
View file @
2dff4dd2
...
@@ -773,6 +773,46 @@ TEST_CASE(conv_bn_relu_maxpool_test)
...
@@ -773,6 +773,46 @@ TEST_CASE(conv_bn_relu_maxpool_test)
EXPECT(p == prog);
EXPECT(p == prog);
}
}
TEST_CASE(conv_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {{1, 6, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 6, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_relu_maxpool_test)
TEST_CASE(conv_relu_maxpool_test)
{
{
migraphx::program p;
migraphx::program p;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment