Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
c5d866ed
Unverified
Commit
c5d866ed
authored
Jan 13, 2023
by
Chris Austen
Committed by
GitHub
Jan 13, 2023
Browse files
Merge branch 'develop' into dyn_pad
parents
883d4903
1eb5a1d4
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
241 additions
and
34 deletions
+241
-34
src/onnx/parse_matmul.cpp
src/onnx/parse_matmul.cpp
+58
-34
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+90
-0
test/onnx/matmul_dyn_broadcast_error.onnx
test/onnx/matmul_dyn_broadcast_error.onnx
+0
-0
test/onnx/matmul_dyn_mm_test.onnx
test/onnx/matmul_dyn_mm_test.onnx
+0
-0
test/onnx/matmul_dyn_mv_test.onnx
test/onnx/matmul_dyn_mv_test.onnx
+0
-0
test/onnx/matmul_dyn_vm_test.onnx
test/onnx/matmul_dyn_vm_test.onnx
+0
-0
test/onnx/matmul_dyn_vv_test.onnx
test/onnx/matmul_dyn_vv_test.onnx
+0
-0
test/onnx/matmulinteger_dyn_error.onnx
test/onnx/matmulinteger_dyn_error.onnx
+0
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+93
-0
No files found.
src/onnx/parse_matmul.cpp
View file @
c5d866ed
...
...
@@ -43,55 +43,79 @@ struct parse_matmul : op_parser<parse_matmul>
const
onnx_parser
::
node_info
&
info
,
std
::
vector
<
instruction_ref
>
args
)
const
{
auto
l0
=
args
[
0
];
auto
l1
=
args
[
1
];
auto
l0_len
s
=
l
0
->
get_shape
()
.
lens
()
;
auto
l1_len
s
=
l
1
->
get_shape
()
.
lens
()
;
auto
a0
=
args
[
0
];
auto
a1
=
args
[
1
];
auto
s
0
=
a
0
->
get_shape
();
auto
s
1
=
a
1
->
get_shape
();
// args[0] is a vector, prepend 1 to the shape
instruction_ref
dot_res
;
bool
is_a_prepended
=
false
;
if
(
l0_lens
.
size
()
==
1
)
bool
is_b_appended
=
false
;
if
(
s0
.
ndim
()
==
1
)
{
is_a_prepended
=
true
;
l0_lens
.
insert
(
l0_lens
.
begin
(),
1
);
l0
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
args
[
0
]);
a0
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
args
[
0
]);
}
bool
is_b_appended
=
false
;
if
(
l1_lens
.
size
()
==
1
)
if
(
s1
.
ndim
()
==
1
)
{
is_b_appended
=
true
;
l1_lens
.
push_back
(
1
);
l1
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
args
[
1
]);
a1
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
args
[
1
]);
}
if
(
s0
.
dynamic
()
or
s1
.
dynamic
())
{
if
(
opd
.
op_name
==
"quant_dot"
)
{
MIGRAPHX_THROW
(
"PARSE_MATMUL: dynamic MatMulInteger not supported"
);
}
auto
s0_dds
=
a0
->
get_shape
().
to_dynamic
().
dyn_dims
();
auto
s1_dds
=
a1
->
get_shape
().
to_dynamic
().
dyn_dims
();
instruction_ref
bl0
=
l0
;
instruction_ref
bl1
=
l1
;
// TODO: handling this case requires a new multibroadcast mode
if
(
not
std
::
equal
(
l0_len
s
.
rbegin
()
+
2
,
l
0_
len
s
.
rend
(),
l
1_
len
s
.
rbegin
()
+
2
,
l
1_
len
s
.
rend
()))
s0_dd
s
.
rbegin
()
+
2
,
s
0_
dd
s
.
rend
(),
s
1_
dd
s
.
rbegin
()
+
2
,
s
1_
dd
s
.
rend
()))
{
auto
l0_it
=
l0_lens
.
begin
()
+
l0_lens
.
size
()
-
2
;
std
::
vector
<
std
::
size_t
>
l0_broadcasted_lens
(
l0_lens
.
begin
(),
l0_it
);
auto
l1_it
=
l1_lens
.
begin
()
+
l1_lens
.
size
()
-
2
;
std
::
vector
<
std
::
size_t
>
l1_broadcasted_lens
(
l1_lens
.
begin
(),
l1_it
);
auto
output_lens
=
compute_broadcasted_lens
(
l0_broadcasted_lens
,
l1_broadcasted_lens
);
MIGRAPHX_THROW
(
"PARSE_MATMUL: dynamic shape broadcasting not supported"
);
}
dot_res
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
),
a0
,
a1
);
}
else
{
auto
s0_lens
=
a0
->
get_shape
().
lens
();
auto
s1_lens
=
a1
->
get_shape
().
lens
();
instruction_ref
ba0
=
a0
;
instruction_ref
ba1
=
a1
;
// try broadcasting if dimensions other than last two do not match
if
(
not
std
::
equal
(
s0_lens
.
rbegin
()
+
2
,
s0_lens
.
rend
(),
s1_lens
.
rbegin
()
+
2
,
s1_lens
.
rend
()))
{
auto
l0_it
=
s0_lens
.
begin
()
+
s0_lens
.
size
()
-
2
;
std
::
vector
<
std
::
size_t
>
l0_broadcasted_lens
(
s0_lens
.
begin
(),
l0_it
);
auto
l1_it
=
s1_lens
.
begin
()
+
s1_lens
.
size
()
-
2
;
std
::
vector
<
std
::
size_t
>
l1_broadcasted_lens
(
s1_lens
.
begin
(),
l1_it
);
auto
output_lens
=
compute_broadcasted_lens
(
l0_broadcasted_lens
,
l1_broadcasted_lens
);
l0_broadcasted_lens
=
output_lens
;
l0_broadcasted_lens
.
insert
(
l0_broadcasted_lens
.
end
(),
l0_it
,
l
0_lens
.
end
());
l0_broadcasted_lens
.
insert
(
l0_broadcasted_lens
.
end
(),
l0_it
,
s
0_lens
.
end
());
l1_broadcasted_lens
=
output_lens
;
l1_broadcasted_lens
.
insert
(
l1_broadcasted_lens
.
end
(),
l1_it
,
l
1_lens
.
end
());
if
(
l
0_lens
!=
l0_broadcasted_lens
)
l1_broadcasted_lens
.
insert
(
l1_broadcasted_lens
.
end
(),
l1_it
,
s
1_lens
.
end
());
if
(
s
0_lens
!=
l0_broadcasted_lens
)
{
bl
0
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l0_broadcasted_lens
}}),
l
0
);
ba
0
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l0_broadcasted_lens
}}),
a
0
);
}
if
(
l
1_lens
!=
l1_broadcasted_lens
)
if
(
s
1_lens
!=
l1_broadcasted_lens
)
{
bl1
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l1_broadcasted_lens
}}),
l1
);
ba1
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l1_broadcasted_lens
}}),
a1
);
}
}
dot_res
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
),
ba0
,
ba1
);
}
instruction_ref
dot_res
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
),
bl0
,
bl1
);
int64_t
num_axis
=
static_cast
<
int64_t
>
(
dot_res
->
get_shape
().
lens
().
size
());
// squeeze the appended or prepended dimensions
int64_t
num_axis
=
dot_res
->
get_shape
().
ndim
();
if
(
is_a_prepended
)
{
dot_res
=
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
num_axis
-
2
}}}),
dot_res
);
...
...
test/onnx/gen_onnx.py
View file @
c5d866ed
...
...
@@ -3563,6 +3563,81 @@ def matmul_vv_test():
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
matmul_dyn_mm_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
,
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
matmul_dyn_mv_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
matmul_dyn_vm_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
,
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
None
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
matmul_dyn_vv_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
matmul_dyn_broadcast_error
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
5
,
7
,
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
5
,
None
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
matmulinteger_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT8
,
[
3
,
6
,
16
])
...
...
@@ -3578,6 +3653,21 @@ def matmulinteger_test():
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
matmulinteger_dyn_error
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT8
,
[
None
,
6
,
16
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
INT8
,
[
None
,
16
,
8
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT32
,
[
None
,
6
,
8
])
node
=
onnx
.
helper
.
make_node
(
'MatMulInteger'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
()
def
max_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
test/onnx/matmul_dyn_broadcast_error.onnx
0 → 100644
View file @
c5d866ed
File added
test/onnx/matmul_dyn_mm_test.onnx
0 → 100644
View file @
c5d866ed
File added
test/onnx/matmul_dyn_mv_test.onnx
0 → 100644
View file @
c5d866ed
File added
test/onnx/matmul_dyn_vm_test.onnx
0 → 100644
View file @
c5d866ed
File added
test/onnx/matmul_dyn_vv_test.onnx
0 → 100644
View file @
c5d866ed
File added
test/onnx/matmulinteger_dyn_error.onnx
0 → 100644
View file @
c5d866ed
File added
test/onnx/onnx_test.cpp
View file @
c5d866ed
...
...
@@ -3432,6 +3432,92 @@ TEST_CASE(matmul_vv_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_mm_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
1
,
5
,
3
}}});
auto
ret
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
1
,
5
,
3
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_mv_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
sl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
l1
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
sl1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
1
}}}),
res
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mv_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_vm_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
4
,
10
,
8
}}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
sl0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
res
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
4
,
10
,
8
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_vm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_vv_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
::
dynamic_dimension
dd
{
5
,
8
,
7
};
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
auto
sl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
l1
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
sl0
,
sl1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
sr0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
res
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
sr0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
dd
;
auto
prog
=
parse_onnx
(
"matmul_dyn_vv_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_broadcast_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"matmul_dyn_broadcast_error.onnx"
,
options
);
}));
}
TEST_CASE
(
matmulinteger_test
)
{
migraphx
::
program
p
;
...
...
@@ -3445,6 +3531,13 @@ TEST_CASE(matmulinteger_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmulinteger_dyn_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"matmulinteger_dyn_error.onnx"
,
options
);
}));
}
TEST_CASE
(
max_test
)
{
migraphx
::
program
p
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment