Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
4ea977e3
Commit
4ea977e3
authored
Nov 21, 2022
by
charlie
Browse files
Parser and onnx_test updates
parent
f6c31887
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
271 additions
and
49 deletions
+271
-49
src/onnx/parse_matmul.cpp
src/onnx/parse_matmul.cpp
+110
-49
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+75
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+86
-0
No files found.
src/onnx/parse_matmul.cpp
View file @
4ea977e3
...
@@ -43,66 +43,127 @@ struct parse_matmul : op_parser<parse_matmul>
...
@@ -43,66 +43,127 @@ struct parse_matmul : op_parser<parse_matmul>
const
onnx_parser
::
node_info
&
info
,
const
onnx_parser
::
node_info
&
info
,
std
::
vector
<
instruction_ref
>
args
)
const
std
::
vector
<
instruction_ref
>
args
)
const
{
{
auto
l0
=
args
[
0
];
auto
a0
=
args
[
0
];
auto
l1
=
args
[
1
];
auto
a1
=
args
[
1
];
auto
l0_len
s
=
l
0
->
get_shape
()
.
lens
()
;
auto
s
0
=
a
0
->
get_shape
();
auto
l1_len
s
=
l
1
->
get_shape
()
.
lens
()
;
auto
s
1
=
a
1
->
get_shape
();
// args[0] is a vector, prepend 1 to the shape
if
(
s0
.
dynamic
()
or
s1
.
dynamic
())
bool
is_a_prepended
=
false
;
if
(
l0_lens
.
size
()
==
1
)
{
{
is_a_prepended
=
true
;
if
(
opd
.
op_name
==
"quant_dot"
)
l0_lens
.
insert
(
l0_lens
.
begin
(),
1
);
{
l0
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
args
[
0
]);
MIGRAPHX_THROW
(
"PARSE_MATMUL: dynamic MatMulInteger not supported"
);
}
}
auto
s0_dds
=
s0
.
to_dynamic
().
dyn_dims
();
auto
s1_dds
=
s1
.
to_dynamic
().
dyn_dims
();
// prepend or append 1 dynamic_dimension for 1D tensors
bool
is_a_prepended
=
false
;
if
(
s0
.
ndim
()
==
1
)
{
is_a_prepended
=
true
;
s0_dds
.
insert
(
s0_dds
.
begin
(),
{
1
,
1
,
0
});
a0
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
args
[
0
]);
}
bool
is_b_appended
=
false
;
bool
is_b_appended
=
false
;
if
(
l1_lens
.
size
()
==
1
)
if
(
s1
.
ndim
()
==
1
)
{
{
is_b_appended
=
true
;
is_b_appended
=
true
;
l1_len
s
.
push_back
(
1
);
s1_dd
s
.
push_back
(
{
1
,
1
,
0
}
);
l
1
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
args
[
1
]);
a
1
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
args
[
1
]);
}
}
instruction_ref
bl0
=
l0
;
// TODO: handling this case requires a new multibroadcast mode
instruction_ref
bl1
=
l1
;
if
(
not
std
::
equal
(
if
(
not
std
::
equal
(
s0_dds
.
rbegin
()
+
2
,
s0_dds
.
rend
(),
s1_dds
.
rbegin
()
+
2
,
s1_dds
.
rend
()))
l0_lens
.
rbegin
()
+
2
,
l0_lens
.
rend
(),
l1_lens
.
rbegin
()
+
2
,
l1_lens
.
rend
()))
{
{
MIGRAPHX_THROW
(
"PARSE_MATMUL: dynamic shape broadcasting not supported"
);
auto
l0_it
=
l0_lens
.
begin
()
+
l0_lens
.
size
()
-
2
;
}
std
::
vector
<
std
::
size_t
>
l0_broadcasted_lens
(
l0_lens
.
begin
(),
l0_it
);
auto
l1_it
=
l1_lens
.
begin
()
+
l1_lens
.
size
()
-
2
;
instruction_ref
dot_res
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
),
a0
,
a1
);
std
::
vector
<
std
::
size_t
>
l1_broadcasted_lens
(
l1_lens
.
begin
(),
l1_it
);
auto
output_lens
=
compute_broadcasted_lens
(
l0_broadcasted_lens
,
l1_broadcasted_lens
);
// squeeze the appended or prepended dimensions
l0_broadcasted_lens
=
output_lens
;
int64_t
num_axis
=
static_cast
<
int64_t
>
(
dot_res
->
get_shape
().
lens
().
size
());
l0_broadcasted_lens
.
insert
(
l0_broadcasted_lens
.
end
(),
l0_it
,
l0_lens
.
end
());
if
(
is_a_prepended
)
l1_broadcasted_lens
=
output_lens
;
l1_broadcasted_lens
.
insert
(
l1_broadcasted_lens
.
end
(),
l1_it
,
l1_lens
.
end
());
if
(
l0_lens
!=
l0_broadcasted_lens
)
{
{
bl0
=
info
.
add_instruction
(
dot_res
=
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l0_broadcasted_lens
}}),
l0
);
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
num_axis
-
2
}}}),
dot_res
);
--
num_axis
;
}
}
if
(
l1_lens
!=
l1_broadcasted_lens
)
if
(
is_b_appended
)
{
{
bl1
=
info
.
add_instruction
(
dot_res
=
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l1_broadcasted_lens
}}),
l1
);
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
num_axis
-
1
}}
}
),
dot_res
);
}
}
return
dot_res
;
}
}
instruction_ref
dot_res
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
),
bl0
,
bl1
);
else
int64_t
num_axis
=
static_cast
<
int64_t
>
(
dot_res
->
get_shape
().
lens
().
size
());
if
(
is_a_prepended
)
{
dot_res
=
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
num_axis
-
2
}}}),
dot_res
);
--
num_axis
;
}
if
(
is_b_appended
)
{
{
dot_res
=
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
num_axis
-
1
}}}),
dot_res
);
auto
s0_lens
=
s0
.
lens
();
}
auto
s1_lens
=
s1
.
lens
();
// args[0] is a vector, prepend 1 to the shape
bool
is_a_prepended
=
false
;
if
(
s0
.
ndim
()
==
1
)
{
is_a_prepended
=
true
;
s0_lens
.
insert
(
s0_lens
.
begin
(),
1
);
a0
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
args
[
0
]);
}
bool
is_b_appended
=
false
;
if
(
s1
.
ndim
()
==
1
)
{
is_b_appended
=
true
;
s1_lens
.
push_back
(
1
);
a1
=
info
.
add_instruction
(
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
args
[
1
]);
}
return
dot_res
;
instruction_ref
ba0
=
a0
;
instruction_ref
ba1
=
a1
;
// try broadcasting if dimensions other than last two do not match
if
(
not
std
::
equal
(
s0_lens
.
rbegin
()
+
2
,
s0_lens
.
rend
(),
s1_lens
.
rbegin
()
+
2
,
s1_lens
.
rend
()))
{
auto
l0_it
=
s0_lens
.
begin
()
+
s0_lens
.
size
()
-
2
;
std
::
vector
<
std
::
size_t
>
l0_broadcasted_lens
(
s0_lens
.
begin
(),
l0_it
);
auto
l1_it
=
s1_lens
.
begin
()
+
s1_lens
.
size
()
-
2
;
std
::
vector
<
std
::
size_t
>
l1_broadcasted_lens
(
s1_lens
.
begin
(),
l1_it
);
auto
output_lens
=
compute_broadcasted_lens
(
l0_broadcasted_lens
,
l1_broadcasted_lens
);
l0_broadcasted_lens
=
output_lens
;
l0_broadcasted_lens
.
insert
(
l0_broadcasted_lens
.
end
(),
l0_it
,
s0_lens
.
end
());
l1_broadcasted_lens
=
output_lens
;
l1_broadcasted_lens
.
insert
(
l1_broadcasted_lens
.
end
(),
l1_it
,
s1_lens
.
end
());
if
(
s0_lens
!=
l0_broadcasted_lens
)
{
ba0
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l0_broadcasted_lens
}}),
a0
);
}
if
(
s1_lens
!=
l1_broadcasted_lens
)
{
ba1
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
l1_broadcasted_lens
}}),
a1
);
}
}
instruction_ref
dot_res
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
),
ba0
,
ba1
);
// squeeze the appended or prepended dimensions
int64_t
num_axis
=
static_cast
<
int64_t
>
(
dot_res
->
get_shape
().
lens
().
size
());
if
(
is_a_prepended
)
{
dot_res
=
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
num_axis
-
2
}}}),
dot_res
);
--
num_axis
;
}
if
(
is_b_appended
)
{
dot_res
=
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
num_axis
-
1
}}}),
dot_res
);
}
return
dot_res
;
}
}
}
};
};
...
...
test/onnx/gen_onnx.py
View file @
4ea977e3
...
@@ -3390,6 +3390,66 @@ def matmul_vv_test():
...
@@ -3390,6 +3390,66 @@ def matmul_vv_test():
return
([
node
],
[
m1
,
m2
],
[
y
])
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
def
matmul_dyn_mm_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
,
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
None
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
def
matmul_dyn_mv_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
def
matmul_dyn_vm_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
,
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
,
None
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
def
matmul_dyn_vv_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
])
node
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
@
onnx_test
def
matmulinteger_test
():
def
matmulinteger_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT8
,
[
3
,
6
,
16
])
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT8
,
[
3
,
6
,
16
])
...
@@ -3405,6 +3465,21 @@ def matmulinteger_test():
...
@@ -3405,6 +3465,21 @@ def matmulinteger_test():
return
([
node
],
[
m1
,
m2
],
[
y
])
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
def
matmulinteger_dyn_error
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT8
,
[
None
,
6
,
16
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
INT8
,
[
None
,
16
,
8
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT32
,
[
None
,
6
,
8
])
node
=
onnx
.
helper
.
make_node
(
'MatMulInteger'
,
inputs
=
[
'1'
,
'2'
],
outputs
=
[
'y'
],
)
return
([
node
],
[
m1
,
m2
],
[
y
])
@
onnx_test
@
onnx_test
def
max_test
():
def
max_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
test/onnx/onnx_test.cpp
View file @
4ea977e3
...
@@ -3256,6 +3256,85 @@ TEST_CASE(matmul_vv_test)
...
@@ -3256,6 +3256,85 @@ TEST_CASE(matmul_vv_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
matmul_dyn_mm_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
1
,
5
,
3
}}});
auto
ret
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
1
,
5
,
3
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_mv_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
sl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
l1
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
sl1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
1
}}}),
res
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mv_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_vm_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
4
,
10
,
8
}}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
sl0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
res
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
4
,
10
,
8
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_vm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmul_dyn_vv_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
::
dynamic_dimension
dd
{
5
,
8
,
7
};
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
auto
sl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
l1
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
sl0
,
sl1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
sr0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
res
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
sr0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
dd
;
auto
prog
=
parse_onnx
(
"matmul_dyn_vv_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
matmulinteger_test
)
TEST_CASE
(
matmulinteger_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
@@ -3269,6 +3348,13 @@ TEST_CASE(matmulinteger_test)
...
@@ -3269,6 +3348,13 @@ TEST_CASE(matmulinteger_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
matmulinteger_dyn_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"matmulinteger_dyn_error.onnx"
);
}));
}
TEST_CASE
(
max_test
)
TEST_CASE
(
max_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment