Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
1587c533
Unverified
Commit
1587c533
authored
Oct 13, 2023
by
Lakhinder Walia
Committed by
GitHub
Oct 13, 2023
Browse files
qlinearconv operator (#2225)
parent
b66d58ac
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
513 additions
and
1 deletion
+513
-1
src/onnx/padding.cpp
src/onnx/padding.cpp
+1
-1
src/onnx/parse_qlinearconv.cpp
src/onnx/parse_qlinearconv.cpp
+241
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+110
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+55
-0
test/onnx/qlinearconv_pad_0_test.onnx
test/onnx/qlinearconv_pad_0_test.onnx
+0
-0
test/onnx/qlinearconv_pad_1_test.onnx
test/onnx/qlinearconv_pad_1_test.onnx
+0
-0
test/onnx/qlinearconv_scale_1D_test.onnx
test/onnx/qlinearconv_scale_1D_test.onnx
+0
-0
test/onnx/qlinearconv_test.onnx
test/onnx/qlinearconv_test.onnx
+0
-0
test/onnx/verify_onnx.cpp
test/onnx/verify_onnx.cpp
+106
-0
No files found.
src/onnx/padding.cpp
View file @
1587c533
...
@@ -47,7 +47,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
...
@@ -47,7 +47,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
return
;
return
;
}
}
auto
auto_pad
=
info
.
attributes
[
"auto_pad"
].
s
();
auto
auto_pad
=
to_upper
(
info
.
attributes
[
"auto_pad"
].
s
()
)
;
if
(
auto_pad
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
if
(
auto_pad
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
{
bool
is_same_upper
=
(
auto_pad
.
find
(
"SAME_UPPER"
)
!=
std
::
string
::
npos
);
bool
is_same_upper
=
(
auto_pad
.
find
(
"SAME_UPPER"
)
!=
std
::
string
::
npos
);
...
...
src/onnx/parse_qlinearconv.cpp
0 → 100644
View file @
1587c533
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/onnx/padding.hpp>
#include <migraphx/onnx/conv.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/broadcast_qdq.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/stringutils.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
onnx
{
/*
*********************************************************************************
* Reference: see QLinearConv in *
* https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md *
*********************************************************************************
com.microsoft.QLinearConv
Version
This version of the operator has been available since version 1 of the 'com.microsoft' operator set.
ATTRIBUTES:
auto_pad : string
channels_last : int
dilations : list of ints
group : int
kernel_shape : list of ints
pads : list of ints
strides : list of ints
INPUTS (8 - 9):
x : T1
x_scale : tensor(float)
x_zero_point : T1
w : T2
w_scale : tensor(float)
w_zero_point : T2
y_scale : tensor(float)
y_zero_point : T3
B (optional) : T4
OUTPUTS:
y : T3
Type Constraints:
T1 : tensor(int8), tensor(uint8)
T2 : tensor(int8), tensor(uint8)
T3 : tensor(int8), tensor(uint8)
T4 : tensor(int32)
More details also at:
https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
*/
struct
parse_qlinearconv
:
op_parser
<
parse_qlinearconv
>
{
std
::
vector
<
op_desc
>
operators
()
const
{
return
{{
"QLinearConv"
}};
}
// basic type checking for QLinearConv Operator
void
check_inputs
(
const
std
::
vector
<
instruction_ref
>&
inp_arg
)
const
{
if
(
inp_arg
.
size
()
<
8
)
MIGRAPHX_THROW
(
"QLINEARCONV: missing inputs"
);
const
instruction_ref
&
in_x
=
inp_arg
[
0
];
const
instruction_ref
&
in_scale_x
=
inp_arg
[
1
];
const
instruction_ref
&
in_w
=
inp_arg
[
3
];
const
instruction_ref
&
in_scale_w
=
inp_arg
[
4
];
const
instruction_ref
&
in_scale_y
=
inp_arg
[
6
];
auto
sh_x
=
in_x
->
get_shape
();
auto
sh_w
=
in_w
->
get_shape
();
auto
type_x
=
sh_x
.
type
();
auto
type_w
=
sh_w
.
type
();
assert
(
in_x
->
get_shape
().
ndim
()
>
2
);
if
(
type_x
!=
shape
::
int8_type
and
type_x
!=
shape
::
uint8_type
)
MIGRAPHX_THROW
(
"QLINEARCONV: unsupported input type"
);
if
(
type_w
!=
shape
::
int8_type
and
type_w
!=
shape
::
uint8_type
)
MIGRAPHX_THROW
(
"QLINEARCONV: unsupported weight type"
);
if
(
in_scale_x
->
get_shape
().
type
()
!=
shape
::
float_type
)
MIGRAPHX_THROW
(
"QLINEARCONV x scale type should be float"
);
if
(
in_scale_w
->
get_shape
().
type
()
!=
shape
::
float_type
)
MIGRAPHX_THROW
(
"QLINEARCONV: wt scale type should be float"
);
if
(
in_scale_y
->
get_shape
().
type
()
!=
shape
::
float_type
)
MIGRAPHX_THROW
(
"QLINEARCONV: y scale type should be float"
);
if
(
inp_arg
.
size
()
>
8
and
inp_arg
[
8
]
->
get_shape
().
type
()
!=
shape
::
int32_type
)
MIGRAPHX_THROW
(
"QLINEARCONV y bias should be int32"
);
}
// process all attributes of QLinearConv Operator..
value
process_attributes
(
const
onnx_parser
&
parser
,
const
onnx_parser
::
node_info
&
info
,
const
std
::
vector
<
instruction_ref
>&
args
)
const
{
value
values
;
const
auto
&
in_x
=
args
[
0
];
const
auto
&
wt
=
args
[
3
];
size_t
kdims
=
in_x
->
get_shape
().
ndim
()
-
2
;
check_padding_mode
(
info
,
"QLINEARCONV"
);
values
[
"stride"
]
=
std
::
vector
<
int
>
(
kdims
,
1
);
values
[
"dilation"
]
=
std
::
vector
<
int
>
(
kdims
,
1
);
values
[
"padding"
]
=
std
::
vector
<
int
>
(
kdims
,
0
);
values
[
"group"
]
=
1
;
if
(
contains
(
info
.
attributes
,
"group"
))
values
[
"group"
]
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"group"
)).
template
at
<
int
>();
if
(
contains
(
info
.
attributes
,
"strides"
))
{
std
::
vector
<
int
>
st
;
copy
(
info
.
attributes
.
at
(
"strides"
).
ints
(),
std
::
back_inserter
(
st
));
check_attr_sizes
(
kdims
,
st
.
size
(),
"QLINEARCONV: inconsistent strides"
);
values
[
"stride"
]
=
st
;
}
if
(
contains
(
info
.
attributes
,
"dilations"
))
{
std
::
vector
<
int
>
dil
;
copy
(
info
.
attributes
.
at
(
"dilations"
).
ints
(),
std
::
back_inserter
(
dil
));
check_attr_sizes
(
kdims
,
dil
.
size
(),
"QLINEARCONV: inconsistent dilations"
);
values
[
"dilation"
]
=
dil
;
}
if
(
contains
(
info
.
attributes
,
"pads"
))
{
std
::
vector
<
int
>
pads
;
copy
(
info
.
attributes
.
at
(
"pads"
).
ints
(),
std
::
back_inserter
(
pads
));
check_attr_sizes
(
kdims
,
pads
.
size
()
/
2
,
"QLINEARCONV: inconsistent padding"
);
values
[
"padding"
]
=
pads
;
}
else
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
{
auto
in_lens
=
in_x
->
get_shape
().
lens
();
auto
wt_lens
=
wt
->
get_shape
().
lens
();
std
::
vector
<
std
::
size_t
>
k_lens
(
wt_lens
.
begin
()
+
2
,
wt_lens
.
end
());
std
::
vector
<
int64_t
>
pads
=
values
[
"padding"
].
to_vector
<
std
::
int64_t
>
();
cal_auto_padding_size
(
info
,
values
,
k_lens
,
values
[
"dilation"
].
to_vector
<
std
::
size_t
>
(),
in_lens
,
pads
);
values
[
"padding"
]
=
pads
;
}
recalc_conv_attributes
(
values
,
kdims
);
return
values
;
}
instruction_ref
add_bias_to_conv
(
const
instruction_ref
bias_arg
,
const
instruction_ref
conv_instr
,
const
onnx_parser
::
node_info
&
info
)
const
{
auto
conv_sh
=
conv_instr
->
get_shape
();
auto
conv_lens
=
conv_sh
.
lens
();
auto
conv_type
=
conv_sh
.
type
();
auto
broadcast_bias
=
info
.
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
conv_lens
}}),
bias_arg
);
auto
f_bias
=
info
.
add_instruction
(
make_op
(
"convert"
,
{{
"target_type"
,
conv_type
}}),
broadcast_bias
);
return
info
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv_instr
,
f_bias
);
};
instruction_ref
parse
(
const
op_desc
&
/* opd */
,
const
onnx_parser
&
parser
,
const
onnx_parser
::
node_info
&
info
,
const
std
::
vector
<
instruction_ref
>&
args
)
const
{
check_inputs
(
args
);
auto
values
=
process_attributes
(
parser
,
info
,
args
);
// input: quantized x, scale, zero_pt
const
instruction_ref
&
in_x
=
args
[
0
];
const
instruction_ref
&
in_scale_x
=
args
[
1
];
const
instruction_ref
&
in_zero_pt_x
=
args
[
2
];
// input: quantized weights, scale, zero_pt
const
instruction_ref
&
in_w
=
args
[
3
];
const
instruction_ref
&
in_scale_w
=
args
[
4
];
const
instruction_ref
&
in_zero_pt_w
=
args
[
5
];
// for the dequantized output y: scale & zero_pt
const
instruction_ref
&
in_scale_y
=
args
[
6
];
const
instruction_ref
&
in_zero_pt_y
=
args
[
7
];
auto
dquant_x
=
bcast_qdq_instr
(
"dequantizelinear"
,
in_x
,
in_scale_x
,
in_zero_pt_x
,
info
);
auto
dquant_w
=
bcast_qdq_instr
(
"dequantizelinear"
,
in_w
,
in_scale_w
,
in_zero_pt_w
,
info
);
auto
conv_op
=
migraphx
::
make_op
(
"convolution"
,
values
);
auto
conv_x_w
=
info
.
add_instruction
(
conv_op
,
dquant_x
,
dquant_w
);
// Biases, if any.. : is an optional argument.
if
(
args
.
size
()
>
8
)
conv_x_w
=
add_bias_to_conv
(
args
[
8
],
conv_x_w
,
info
);
auto
quant_conv
=
bcast_qdq_instr
(
"quantizelinear"
,
conv_x_w
,
in_scale_y
,
in_zero_pt_y
,
info
);
return
quant_conv
;
}
};
}
// namespace onnx
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
test/onnx/gen_onnx.py
View file @
1587c533
...
@@ -5151,6 +5151,116 @@ def qlinearadd_bcast_test():
...
@@ -5151,6 +5151,116 @@ def qlinearadd_bcast_test():
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
[
sc_a
,
zero_pt_a
,
sc_b
,
zero_pt_b
,
sc_c
,
zero_pt_c
])
@
onnx_test
()
def
qlinearconv_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
7
,
7
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.00369204697
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
132
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
1
,
1
],
[
0
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
0.00172794575
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
255
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.00162681262
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
UINT8
,
[],
[
123
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
UINT8
,
[
1
,
1
,
7
,
7
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_pad_1_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
1.0
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
UINT8
,
[],
[
0
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
1
,
1
,
1
,
1
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_pad_0_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
1
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[],
[
1.0
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[],
[
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
INT8
,
[],
[
-
128
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
INT8
,
[
1
,
1
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
0
,
0
,
0
,
0
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
def
qlinearconv_scale_1D_test
():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x
=
helper
.
make_tensor_value_info
(
'X'
,
TensorProto
.
UINT8
,
[
1
,
1
,
5
,
5
])
sc_x
=
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
0.09411764705882353
])
zero_pt_x
=
helper
.
make_tensor
(
'2'
,
TensorProto
.
UINT8
,
[],
[
0
])
wt
=
helper
.
make_tensor
(
'3'
,
TensorProto
.
UINT8
,
[
2
,
1
,
3
,
3
],
[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
])
sc_wt
=
helper
.
make_tensor
(
'4'
,
TensorProto
.
FLOAT
,
[
2
],
[
1.0
,
0.5
])
zero_pt_wt
=
helper
.
make_tensor
(
'5'
,
TensorProto
.
UINT8
,
[
2
],
[
0
,
0
])
sc_y
=
helper
.
make_tensor
(
'6'
,
TensorProto
.
FLOAT
,
[],
[
0.6352941176470588
])
zero_pt_y
=
helper
.
make_tensor
(
'7'
,
TensorProto
.
INT8
,
[],
[
-
128
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
INT8
,
[
1
,
2
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'QLinearConv'
,
inputs
=
[
'X'
,
'1'
,
'2'
,
'3'
,
'4'
,
'5'
,
'6'
,
'7'
],
outputs
=
[
'out'
],
pads
=
[
0
,
0
,
0
,
0
],
)
return
([
node
],
[
x
],
[
out
],
[
sc_x
,
zero_pt_x
,
wt
,
sc_wt
,
zero_pt_wt
,
sc_y
,
zero_pt_y
])
@
onnx_test
()
@
onnx_test
()
def
quantizelinear_test
():
def
quantizelinear_test
():
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
5
])
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
5
])
...
...
test/onnx/onnx_test.cpp
View file @
1587c533
...
@@ -4904,6 +4904,61 @@ TEST_CASE(qlinearadd_test)
...
@@ -4904,6 +4904,61 @@ TEST_CASE(qlinearadd_test)
EXPECT(p.sort() == prog.sort());
EXPECT(p.sort() == prog.sort());
}
}
TEST_CASE(qlinearconv_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::uint8_type, {1, 1, 7, 7}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00369204697}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {132}});
auto w = mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::uint8_type, {1, 1, 1, 1}}, {0}});
auto sc_w = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00172794575}});
auto z_pt_w = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {255}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00162681262}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {123}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto scale_w_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), sc_w);
auto z_pt_w_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), z_pt_w);
auto fp_w =
mm->add_instruction(migraphx::make_op("dequantizelinear"), w, scale_w_bcast, z_pt_w_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("convolution"), fp_x, fp_w);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearconv_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(quantizelinear_test)
TEST_CASE(quantizelinear_test)
{
{
migraphx::program p;
migraphx::program p;
...
...
test/onnx/qlinearconv_pad_0_test.onnx
0 → 100644
View file @
1587c533
File added
test/onnx/qlinearconv_pad_1_test.onnx
0 → 100644
View file @
1587c533
File added
test/onnx/qlinearconv_scale_1D_test.onnx
0 → 100644
View file @
1587c533
File added
test/onnx/qlinearconv_test.onnx
0 → 100644
View file @
1587c533
File added
test/onnx/verify_onnx.cpp
View file @
1587c533
...
@@ -1318,6 +1318,112 @@ TEST_CASE(qlinearadd_bcast_test)
...
@@ -1318,6 +1318,112 @@ TEST_CASE(qlinearadd_bcast_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
}
TEST_CASE
(
qlinearconv_test
)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
7
,
7
}};
std
::
vector
<
uint8_t
>
x_data
=
{
255
,
174
,
162
,
25
,
203
,
168
,
58
,
15
,
59
,
237
,
95
,
129
,
0
,
64
,
56
,
242
,
153
,
221
,
168
,
12
,
166
,
232
,
178
,
186
,
195
,
237
,
162
,
237
,
188
,
39
,
124
,
77
,
80
,
102
,
43
,
127
,
230
,
21
,
83
,
41
,
40
,
134
,
255
,
154
,
92
,
141
,
42
,
148
,
247
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
0
,
81
,
93
,
230
,
52
,
87
,
197
,
240
,
196
,
18
,
160
,
126
,
255
,
191
,
199
,
13
,
102
,
34
,
87
,
243
,
89
,
23
,
77
,
69
,
60
,
18
,
93
,
18
,
67
,
216
,
131
,
178
,
175
,
153
,
212
,
128
,
25
,
234
,
172
,
214
,
215
,
121
,
0
,
101
,
163
,
114
,
213
,
107
,
8
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearconv_pad_0_test
)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_pad_0_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
5
,
5
}};
std
::
vector
<
uint8_t
>
x_data
=
{
0
,
11
,
21
,
32
,
42
,
53
,
64
,
74
,
85
,
96
,
106
,
117
,
128
,
138
,
149
,
159
,
170
,
181
,
191
,
202
,
212
,
223
,
234
,
244
,
255
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
int8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
// # (1, 1, 3, 3) output tensor
std
::
vector
<
int8_t
>
gold
=
{
-
43
,
-
29
,
-
15
,
28
,
42
,
56
,
99
,
113
,
127
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearconv_pad_1_test
)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_pad_1_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
5
,
5
}};
std
::
vector
<
uint8_t
>
x_data
=
{
0
,
11
,
21
,
32
,
42
,
53
,
64
,
74
,
85
,
96
,
106
,
117
,
128
,
138
,
149
,
159
,
170
,
181
,
191
,
202
,
212
,
223
,
234
,
244
,
255
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
// # (1, 1, 5, 5) output tensor
std
::
vector
<
uint8_t
>
gold
=
{
19
,
33
,
43
,
52
,
38
,
52
,
85
,
99
,
113
,
80
,
99
,
156
,
170
,
184
,
128
,
146
,
227
,
241
,
255
,
175
,
113
,
175
,
184
,
194
,
132
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearconv_scale_1D_test
)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_scale_1D_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
5
,
5
}};
std
::
vector
<
uint8_t
>
x_data
=
{
0
,
11
,
21
,
32
,
42
,
53
,
64
,
74
,
85
,
96
,
106
,
117
,
128
,
138
,
149
,
159
,
170
,
181
,
191
,
202
,
212
,
223
,
234
,
244
,
255
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
int8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
// # (1, 2, 3, 3) output tensor
std
::
vector
<
int8_t
>
gold
=
{
-
43
,
-
29
,
-
15
,
28
,
42
,
56
,
99
,
113
,
127
,
-
43
,
-
29
,
-
15
,
28
,
42
,
56
,
99
,
113
,
127
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
resize_downsample_f_test
)
TEST_CASE
(
resize_downsample_f_test
)
{
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"resize_downsample_f_test.onnx"
);
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"resize_downsample_f_test.onnx"
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment