Unverified Commit a41cd5c0 authored by Zakor Gyula's avatar Zakor Gyula Committed by GitHub
Browse files

Add QLinearAveragePool op (#2458)

parent 57f734a5
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_POOLING_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_POOLING_HPP
#include <migraphx/config.hpp>
#include <migraphx/onnx/onnx_parser.hpp>
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
value handle_pooling_values(const op_desc& opd,
onnx_parser::node_info info,
const shape& in_shape,
value values);
instruction_ref add_pooling_op(const op_desc& opd, onnx_parser::node_info info, instruction_ref l0);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -22,14 +22,8 @@
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/padding.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/onnx/pooling.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -39,76 +33,14 @@ struct parse_pooling : op_parser<parse_pooling>
{
std::vector<op_desc> operators() const
{
return {{"AveragePool", "average"},
{"GlobalAveragePool", "average"},
{"GlobalMaxPool", "max"},
{"MaxPool", "max"},
{"LpPool", "lpnorm"},
{"GlobalLpPool", "lpnorm"}};
}
value handle_values(const op_desc& opd,
onnx_parser::node_info info,
const shape& in_shape,
value values) const
{
auto kdims = in_shape.ndim() - 2;
if(starts_with(opd.onnx_name, "Global"))
{
// if spatial dimensions are dynamic use dyn_global flag
if(in_shape.dynamic() and std::any_of(in_shape.dyn_dims().cbegin() + 2,
in_shape.dyn_dims().cend(),
[](auto dd) { return not dd.is_fixed(); }))
{
values["dyn_global"] = true;
values["lengths"] = std::vector<size_t>();
}
else
{
// works with static and fixed dynamic shape
auto m_lens = in_shape.max_lens();
values["lengths"] = std::vector<size_t>(m_lens.begin() + 2, m_lens.end());
}
}
if(contains(info.attributes, "ceil_mode"))
{
values["ceil_mode"] = static_cast<bool>(info.attributes.at("ceil_mode").i());
}
if(contains(info.attributes, "strides"))
{
values["stride"].clear();
copy(info.attributes["strides"].ints(), std::back_inserter(values["stride"]));
check_attr_sizes(kdims, values["stride"].size(), "PARSE_POOLING: inconsistent strides");
}
if(contains(info.attributes, "kernel_shape"))
{
values["lengths"].clear();
copy(info.attributes["kernel_shape"].ints(), std::back_inserter(values["lengths"]));
check_attr_sizes(
kdims, values["lengths"].size(), "PARSE_POOLING: inconsistent lengths");
}
if(contains(info.attributes, "dilations"))
{
values["dilations"].clear();
copy(info.attributes["dilations"].ints(), std::back_inserter(values["dilations"]));
check_attr_sizes(
kdims, values["dilations"].size(), "PARSE_POOLING: inconsistent dilations");
}
// lp_order attribute
if(contains(info.attributes, "p"))
{
values["lp_order"] = info.attributes.at("p").i();
}
// ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "POOLING");
return values;
return {
{"AveragePool", "average"},
{"GlobalAveragePool", "average"},
{"GlobalMaxPool", "max"},
{"MaxPool", "max"},
{"LpPool", "lpnorm"},
{"GlobalLpPool", "lpnorm"},
};
}
instruction_ref parse(const op_desc& opd,
......@@ -116,148 +48,8 @@ struct parse_pooling : op_parser<parse_pooling>
onnx_parser::node_info info,
std::vector<instruction_ref> args) const
{
std::string mode = opd.op_name;
const std::unordered_map<std::string, op::pooling_mode> mode_map = {
{"max", op::pooling_mode::max},
{"average", op::pooling_mode::average},
{"lpnorm", op::pooling_mode::lpnorm}};
if(not contains(mode_map, mode))
{
MIGRAPHX_THROW(
"PARSE_POOLING: onnx pooling mode must be [\"max\", \"average\", \"lpnorm\"]");
}
operation op = make_op("pooling", {{"mode", mode_map.at(mode)}});
value values = op.to_value();
auto l0 = args[0];
auto in_shape = l0->get_shape();
assert(in_shape.ndim() > 2);
auto kdims = in_shape.ndim() - 2;
values = handle_values(opd, info, in_shape, values);
// count include padding, if count include pad is 1, we always use
// explicit pad
int count_include_pad = 0;
if(contains(info.attributes, "count_include_pad"))
{
if(in_shape.dynamic())
{
MIGRAPHX_THROW("PARSE_POOLING: count_include_pad attribute is not supported for "
"dynamic input shape");
}
count_include_pad = info.attributes.at("count_include_pad").i();
}
std::vector<int64_t> paddings;
float pad_val = ((mode == "max") ? std::numeric_limits<float>::lowest() : 0.0f);
if(contains(info.attributes, "pads"))
{
values["padding"].clear();
copy(info.attributes["pads"].ints(), std::back_inserter(paddings));
check_attr_sizes(
kdims, paddings.size() / 2, "PARSE_POOLING: inconsistent explicit paddings");
}
if(paddings.size() != 2 * kdims)
{
paddings.resize(kdims * 2);
std::fill_n(paddings.begin(), 2 * kdims, 0);
}
if(values["padding"].size() != kdims)
{
values["padding"].resize(kdims);
std::fill_n(values["padding"].begin(), kdims, 0);
}
if(values["stride"].size() != kdims)
{
values["stride"].resize(kdims);
std::fill_n(values["stride"].begin(), kdims, 1);
}
if(values["dilations"].size() != kdims)
{
values["dilations"].resize(kdims);
std::fill_n(values["dilations"].begin(), kdims, 1);
}
// used to calculate the supposed output shape
std::vector<int64_t> orig_padding = paddings;
if(contains(info.attributes, "auto_pad") and
to_upper(info.attributes["auto_pad"].s()) != "NOTSET")
{
auto auto_pad = to_upper(info.attributes["auto_pad"].s());
// don't use the given padding sizes, if any
// values["padding"].clear();
if(in_shape.dynamic())
{
// set padding_mode to trigger auto padding at runtime
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
values["padding_mode"] = is_same_upper ? to_value(op::padding_mode_t::same_upper)
: to_value(op::padding_mode_t::same_lower);
}
else
{
// Calculate auto padding
cal_auto_padding_size(info,
values,
values["lengths"].to_vector<std::size_t>(),
values["dilations"].to_vector<std::size_t>(),
in_shape.lens(),
paddings);
values["padding"] = paddings;
// default padding_mode indicates that padding sizes are not calculated dynamically
values["padding_mode"] = migraphx::op::padding_mode_t::default_;
}
}
std::vector<int64_t> slice_start;
std::vector<int64_t> slice_end;
tune_padding_size(values, paddings, count_include_pad, slice_start);
if(not slice_start.empty())
{
if(in_shape.dynamic())
{
MIGRAPHX_THROW(
"PARSE_POOLING: asymmetric padding not supported for dynamic input shape");
}
// calculate expected output shape
orig_padding.insert(orig_padding.begin() + kdims, 2, 0);
orig_padding.insert(orig_padding.begin(), 2, 0);
op::pad pad{orig_padding, 0.0f};
shape padded_shape = pad.compute_shape({l0->get_shape()});
// make an op just to get its output shape
auto out_lens = make_op("pooling", values).compute_shape({padded_shape}).lens();
// compute slice_end information
slice_end.resize(slice_start.size());
std::transform(out_lens.begin() + 2,
out_lens.end(),
slice_start.begin(),
slice_end.begin(),
[](auto i, auto j) { return i + j; });
}
values["padding"] = std::vector<size_t>(paddings.begin(), paddings.end());
check_asym_padding(info, l0, paddings, values, count_include_pad, pad_val);
op.from_value(values);
auto l1 = info.add_instruction(op, l0);
if(not slice_start.empty())
{
std::vector<int64_t> axes(kdims);
std::iota(axes.begin(), axes.end(), 2);
l1 = info.add_instruction(
make_op("slice", {{"axes", axes}, {"starts", slice_start}, {"ends", slice_end}}),
l1);
}
return l1;
}
return add_pooling_op(opd, std::move(info), args[0]);
};
};
} // namespace onnx
......
......@@ -23,6 +23,7 @@
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/onnx/pooling.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/make_op.hpp>
......@@ -36,90 +37,56 @@ namespace onnx {
/*
*********************************************************************************
* Reference: see QLinearGlobalAveragePool in *
* Reference: see QLinearAveragePool and QLinearGlobalAveragePool in *
* github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md *
*********************************************************************************
*/
QLinearGlobalAveragePool consumes an input tensor X and applies
Average pooling across the values in the same channel. This is
equivalent to AveragePool with kernel size equal to the spatial
dimension of input tensor. Input is of type uint8_t or int8_t.
Version
This version of the operator has been available since version 1 of the 'com.microsoft' operator set.
Attributes
channels_last : int
Inputs
X : T
Input data tensor from the previous operator; According to channels_last, dimensions for image case
are (N x C x H x W), or (N x H x W x C) where N is the batch size, C is the number of channels, and
H and W are the height and the width of the data. For non image case, the dimensions are in the form
of (N x C x D1 x D2 ... Dn), or (N x D1 X D2 ... Dn x C) where N is the batch size.
x_scale : tensor(float)
Scale of quantized input 'X'. It must be a scalar.
x_zero_point : T
Zero point tensor for input 'X'. It must be a scalar.
y_scale : tensor(float)
Scale of quantized output 'Y'. It must be a scalar.
y_zero_point : T
Zero point tensor for output 'Y'. It must be a scalar.
Outputs
Y : T
Output data tensor from pooling across the input tensor. The output tensor has the same rank as the
input. with the N and C value keep it value, while the other dimensions are all 1. Type Constraints
T : tensor(uint8), tensor(int8)
Constrain input and output types to signed/unsigned int8 tensors.
*/
struct parse_qlinearglobalaveragepool : op_parser<parse_qlinearglobalaveragepool>
struct parse_qlinearpooling : op_parser<parse_qlinearpooling>
{
std::vector<op_desc> operators() const { return {{"QLinearGlobalAveragePool"}}; }
// basic type checking for QLinearGlobalAveragePool Operator
void check_inputs(const std::vector<instruction_ref>& args) const
std::vector<op_desc> operators() const
{
if(args.size() < 5)
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: missing inputs");
return {{"QLinearGlobalAveragePool", "average"}, {"QLinearAveragePool", "average"}};
}
const auto& in_x = args[0];
const auto& zero_pt_x = args[2];
const auto& zero_pt_y = args[4];
void check_inputs(const op_desc& opd, const std::vector<instruction_ref>& args) const
{
const auto& in_x = args[0];
const auto onnx_name = opd.onnx_name;
if(in_x->get_shape().ndim() <= 2)
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: input dimensions too small");
MIGRAPHX_THROW(onnx_name + ": input dimensions too small");
auto type_x = in_x->get_shape().type();
if(type_x != migraphx::shape::int8_type and type_x != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: unsupported input type");
MIGRAPHX_THROW(onnx_name + ": unsupported input type");
const auto& zero_pt_x = args[2];
if(type_x != zero_pt_x->get_shape().type())
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: mismatched type: input zero point");
if(type_x != zero_pt_y->get_shape().type())
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: mismatched type: output zero point");
MIGRAPHX_THROW(onnx_name + ": mismatched type: input zero point");
if(args.size() == 5)
{
const auto& zero_pt_y = args[4];
if(type_x != zero_pt_y->get_shape().type())
MIGRAPHX_THROW(onnx_name + ": mismatched type: output zero point");
}
}
instruction_ref parse(const op_desc& /* opd */,
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
int channels_last =
parser.parse_value(info.attributes.at("channels_last")).template at<int>();
if(channels_last != 0)
MIGRAPHX_THROW(
"QLINEARGLOBALAVERAGEPOOL: channels_last (N x D1..Dn x C) is not supported");
if(contains(info.attributes, "channel_last"))
{
int channels_last =
parser.parse_value(info.attributes.at("channels_last")).template at<int>();
if(channels_last != 0)
MIGRAPHX_THROW(opd.onnx_name + ": channels_last (N x D1..Dn x C) is not supported");
}
check_inputs(args);
check_inputs(opd, args);
// Input: X
......@@ -128,21 +95,18 @@ struct parse_qlinearglobalaveragepool : op_parser<parse_qlinearglobalaveragepool
const auto& zero_pt_x = args[2];
auto dquant_x = bcast_qdq_instr("dequantizelinear", in_x, scale_x, zero_pt_x, info);
// Output Y = globalaveragepool(X)
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average};
auto lens = in_x->get_shape().lens();
std::vector<size_t> lengths(lens.begin() + 2, lens.end());
op.lengths = lengths;
op.padding = std::vector<size_t>(lens.size());
auto out_y = info.add_instruction(op, dquant_x);
// Output Y = pooling_op(X)
const auto& scale_y = args[3];
const auto& zero_pt_y = args[4];
auto out_y = add_pooling_op(opd, info, dquant_x);
auto out_quant_y = bcast_qdq_instr("quantizelinear", out_y, scale_y, zero_pt_y, info);
const auto& in_scale_y = args[3];
// zero_pt for Y is supplied as the last optional argument..
if(args.size() == 5)
return (bcast_qdq_instr("quantizelinear", out_y, in_scale_y, args[4], info));
return out_quant_y;
// if no zero_pt: just broadcast the scale..
auto bcast_scale_y = bcast_scalar_instr(out_y->get_shape(), in_scale_y, info);
return (info.add_instruction(migraphx::make_op("quantizelinear"), out_y, bcast_scale_y));
}
};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/pooling.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/padding.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/ranges.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
value handle_pooling_values(const op_desc& opd,
onnx_parser::node_info info,
const shape& in_shape,
value values)
{
auto kdims = in_shape.ndim() - 2;
if(starts_with(opd.onnx_name, "Global") or starts_with(opd.onnx_name, "QLinearGlobal"))
{
// if spatial dimensions are dynamic use dyn_global flag
if(in_shape.dynamic() and std::any_of(in_shape.dyn_dims().cbegin() + 2,
in_shape.dyn_dims().cend(),
[](auto dd) { return not dd.is_fixed(); }))
{
values["dyn_global"] = true;
values["lengths"] = std::vector<size_t>();
}
else
{
// works with static and fixed dynamic shape
auto m_lens = in_shape.max_lens();
values["lengths"] = std::vector<size_t>(m_lens.begin() + 2, m_lens.end());
}
}
if(contains(info.attributes, "ceil_mode"))
{
values["ceil_mode"] = static_cast<bool>(info.attributes.at("ceil_mode").i());
}
if(contains(info.attributes, "strides"))
{
values["stride"].clear();
copy(info.attributes["strides"].ints(), std::back_inserter(values["stride"]));
check_attr_sizes(kdims, values["stride"].size(), "PARSE_POOLING: inconsistent strides");
}
if(contains(info.attributes, "kernel_shape"))
{
values["lengths"].clear();
copy(info.attributes["kernel_shape"].ints(), std::back_inserter(values["lengths"]));
check_attr_sizes(kdims, values["lengths"].size(), "PARSE_POOLING: inconsistent lengths");
}
if(contains(info.attributes, "dilations"))
{
values["dilations"].clear();
copy(info.attributes["dilations"].ints(), std::back_inserter(values["dilations"]));
check_attr_sizes(
kdims, values["dilations"].size(), "PARSE_POOLING: inconsistent dilations");
}
// lp_order attribute
if(contains(info.attributes, "p"))
{
values["lp_order"] = info.attributes.at("p").i();
}
// ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "POOLING");
return values;
}
instruction_ref add_pooling_op(const op_desc& opd, onnx_parser::node_info info, instruction_ref l0)
{
std::string mode = opd.op_name;
const std::unordered_map<std::string, op::pooling_mode> mode_map = {
{"max", op::pooling_mode::max},
{"average", op::pooling_mode::average},
{"lpnorm", op::pooling_mode::lpnorm}};
if(not contains(mode_map, mode))
{
MIGRAPHX_THROW(
"PARSE_POOLING: onnx pooling mode must be [\"max\", \"average\", \"lpnorm\"]");
}
operation op = make_op("pooling", {{"mode", mode_map.at(mode)}});
value values = op.to_value();
auto in_shape = l0->get_shape();
assert(in_shape.ndim() > 2);
auto kdims = in_shape.ndim() - 2;
values = handle_pooling_values(opd, info, in_shape, values);
// count include padding, if count include pad is 1, we always use
// explicit pad
int count_include_pad = 0;
if(contains(info.attributes, "count_include_pad"))
{
if(in_shape.dynamic())
{
MIGRAPHX_THROW("PARSE_POOLING: count_include_pad attribute is not supported for "
"dynamic input shape");
}
count_include_pad = info.attributes.at("count_include_pad").i();
}
std::vector<int64_t> paddings;
float pad_val = ((mode == "max") ? std::numeric_limits<float>::lowest() : 0.0f);
if(contains(info.attributes, "pads"))
{
values["padding"].clear();
copy(info.attributes["pads"].ints(), std::back_inserter(paddings));
check_attr_sizes(
kdims, paddings.size() / 2, "PARSE_POOLING: inconsistent explicit paddings");
}
if(paddings.size() != 2 * kdims)
{
paddings.resize(kdims * 2);
std::fill_n(paddings.begin(), 2 * kdims, 0);
}
if(values["padding"].size() != kdims)
{
values["padding"].resize(kdims);
std::fill_n(values["padding"].begin(), kdims, 0);
}
if(values["stride"].size() != kdims)
{
values["stride"].resize(kdims);
std::fill_n(values["stride"].begin(), kdims, 1);
}
if(values["dilations"].size() != kdims)
{
values["dilations"].resize(kdims);
std::fill_n(values["dilations"].begin(), kdims, 1);
}
// used to calculate the supposed output shape
std::vector<int64_t> orig_padding = paddings;
// TODO: add parsing for dilations
if(contains(info.attributes, "auto_pad") and
to_upper(info.attributes["auto_pad"].s()) != "NOTSET")
{
auto auto_pad = to_upper(info.attributes["auto_pad"].s());
// don't use the given padding sizes, if any
// values["padding"].clear();
if(in_shape.dynamic())
{
// set padding_mode to trigger auto padding at runtime
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
values["padding_mode"] = is_same_upper ? to_value(op::padding_mode_t::same_upper)
: to_value(op::padding_mode_t::same_lower);
}
else
{
// Calculate auto padding
// dilations (argument 4) not supported; default to all 1's
cal_auto_padding_size(info,
values,
values["lengths"].to_vector<std::size_t>(),
values["dilations"].to_vector<std::size_t>(),
in_shape.lens(),
paddings);
values["padding"] = paddings;
// default padding_mode indicates that padding sizes are not calculated dynamically
values["padding_mode"] = migraphx::op::padding_mode_t::default_;
}
}
std::vector<int64_t> slice_start;
std::vector<int64_t> slice_end;
tune_padding_size(values, paddings, count_include_pad, slice_start);
if(not slice_start.empty())
{
if(in_shape.dynamic())
{
MIGRAPHX_THROW(
"PARSE_POOLING: asymmetric padding not supported for dynamic input shape");
}
// calculate expected output shape
orig_padding.insert(orig_padding.begin() + kdims, 2, 0);
orig_padding.insert(orig_padding.begin(), 2, 0);
op::pad pad{orig_padding, 0.0f};
shape padded_shape = pad.compute_shape({l0->get_shape()});
// make an op just to get its output shape
auto out_lens = make_op("pooling", values).compute_shape({padded_shape}).lens();
// compute slice_end information
slice_end.resize(slice_start.size());
std::transform(out_lens.begin() + 2,
out_lens.end(),
slice_start.begin(),
slice_end.begin(),
[](auto i, auto j) { return i + j; });
}
values["padding"] = std::vector<size_t>(paddings.begin(), paddings.end());
check_asym_padding(info, l0, paddings, values, count_include_pad, pad_val);
op.from_value(values);
auto l1 = info.add_instruction(op, l0);
if(not slice_start.empty())
{
std::vector<int64_t> axes(kdims);
std::iota(axes.begin(), axes.end(), 2);
l1 = info.add_instruction(
make_op("slice", {{"axes", axes}, {"starts", slice_start}, {"ends", slice_end}}), l1);
}
return l1;
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -5994,6 +5994,263 @@ def qlinearadd_bcast_test():
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearaveragepool_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 32])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 3, 31])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[16])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 3, 3])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.015])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[16])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_ceil_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 1, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 1, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_dilations_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 1, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 1, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.25])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[84])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
strides=[1, 1],
dilations=[2, 2],
ceil_mode=True,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_pads_count_include_pad_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 6, 6])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.01])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[32])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[3, 3],
pads=[2, 2, 2, 2],
count_include_pad=1,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_same_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 3, 4, 4])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad="SAME_LOWER",
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[32])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 4, 4])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.25])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad="SAME_UPPER",
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_strides_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 8, 8])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[8])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[5, 5],
strides=[2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 3, 3, 3])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 2, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.02])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 1, 5, 5])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 1, 1, 1])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[10])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
channels_last=0,
auto_pad='NOTSET')
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_nt_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 1, 5, 5])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 1, 1, 1])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[10])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
channels_last=0,
auto_pad='NOTSET',
count_include_pad=1)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearconv_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
......
......@@ -5595,6 +5595,54 @@ TEST_CASE(qlinearadd_test)
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearaveragepool_notset_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {10}});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::int8_type, {1, 1, 5, 5}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y =
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
fp_x);
fp_y = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), fp_y);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearaveragepool_notset_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(qlinearconv_test)
{
migraphx::program p;
......
......@@ -1686,6 +1686,252 @@ TEST_CASE(qlinearadd_bcast_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_1d_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_1d_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {
-31, 51, 125, 30, -17, -125, 121, -19, -13, 52, 18, -70, 97, 15, 56, 42,
-65, -26, 40, -109, -70, 83, 110, -94, 34, 70, 5, -23, -60, -68, 19, 48,
-113, 3, -44, 20, -99, -103, -49, -38, 122, 75, 38, -7, -65, -56, 96, 99,
50, -27, -114, 49, -65, 105, -3, 54, 8, 38, -81, -46, -86, -46, -104, 36,
22, -51, 48, 59, -116, 6, 93, 16, -111, 98, 51, -87, -111, -74, -39, 7,
107, 115, 59, 60, -66, -14, -106, -23, 119, -122, -51, -100, 26, 125, 45, 90};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 3, 32}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {
26, 104, 94, 22, -55, 14, 67, 0, 36, 51, -10, 29, 72, 52, 65, 5,
-30, 23, -19, -74, 23, 112, 24, -14, 68, 54, 7, -26, -48, -8, 50, -39,
-4, 4, -24, -85, -60, -28, 58, 114, 72, 31, -20, -44, 36, 114, 90, 28,
-54, -16, 8, 36, 67, 42, 47, 39, -6, -48, -50, -50, -59, -18, 2, 15,
70, -13, -39, 66, 71, -32, 9, 90, -2, -83, -76, -40, 0, 73, 127, 103,
75, 13, -24, -44, -48, 64, 15, -70, -60, -21, 92, 101, 84};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_2d_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_2d_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {84, -73, 117, -2, -97, 72, 67, 27, 1, -44, 110, 51,
9, 7, 58, 113, -34, 34, 124, -20, 6, 66, 68, 98,
31, -84, 25, 101, -69, -100, -68, 116, 33, -121, 78, 49,
102, -86, 65, 69, -87, -89, 16, -125, 51, -54, -86, 79};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 3, 4, 4}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {4, 127, 127, -41, 127, 127, -6, 125, 127,
76, 127, 127, 32, 78, 127, -128, -128, 127,
-44, -37, 127, -117, -62, 37, -128, -128, -81};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_2d_ceil_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_2d_ceil_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<uint8_t> data_x = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32};
migraphx::shape s_x{migraphx::shape::uint8_type, {1, 1, 4, 4}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {120, 150, 240, 255};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_2d_dilations_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_2d_dilations_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 1, 4, 4}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {108, 112, 124, 127};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_2d_pads_count_include_pad_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_2d_pads_count_include_pad_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {-30, 50, 91, -87, -21, -113, -16, 6, -128, 104, 82, -126,
54, 41, -71, 62, -11, -111, 13, 104, -43, -48, 30, 85,
-62, -33, -27, -114, 32, -17, 30, -26, -18, 15, 17, 100,
-122, 115, 84, -34, -86, 82, 102, -117, -91, -105, 112, 91};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 3, 4, 4}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {
15, 43, 94, 62, 34, -16, 4, -31, 10, -6, 29, -13, -67, -45, 43, 27, 4, -83,
-21, -3, -6, 15, -3, 0, -9, 71, 78, 83, 3, -4, 62, 85, 45, 50, 27, 66,
26, -36, -29, 35, 97, 90, 2, -86, -62, 73, 127, 127, -32, -128, -128, -24, 83, 74,
-9, -63, -45, -35, 20, 1, 15, -12, -11, -72, -44, -46, 50, 40, 57, 25, 34, 18,
22, 30, 40, 105, 97, 88, -46, 26, 83, 127, 125, 69, -94, 24, 127, 127, 116, 4,
-128, -83, 83, 127, 127, -1, -66, -79, 40, 124, 127, 18, -19, -77, -15, 86, 127, 83};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_2d_same_lower_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_2d_same_lower_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<uint8_t> data_x = {195, 102, 250, 61, 222, 6, 243, 218, 230, 105, 36, 116,
194, 31, 113, 85, 126, 204, 80, 38, 115, 167, 221, 67,
69, 140, 11, 209, 136, 120, 39, 96, 29, 5, 167, 40,
58, 51, 157, 179, 244, 149, 76, 243, 126, 144, 192, 199};
migraphx::shape s_x{migraphx::shape::uint8_type, {1, 3, 4, 4}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {195, 148, 176, 156, 208, 131, 150, 193, 226, 141, 98, 153,
212, 140, 71, 88, 126, 165, 142, 59, 120, 153, 168, 102,
92, 123, 135, 127, 102, 116, 78, 89, 29, 17, 86, 104,
44, 36, 95, 136, 151, 126, 108, 164, 185, 166, 140, 178};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_2d_same_upper_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_2d_same_upper_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {-61, 102, -6, 61, -34, 6, -13, -38, -26, 105, 36, 116,
-62, 31, 113, 85, 126, -52, 80, 38, 115, -89, -35, 67,
69, -116, 11, -47, -120, 120, 39, 96, 29, 5, -89, 40,
58, 51, -99, -77, -12, -107, 76, -13, 126, -112, -64, -57};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 3, 4, 4}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {
-58, -20, -62, -41, -38, 3, -14, 14, -40, 78, 111, 127, -95, 80, 127, 106,
-14, -112, 11, 41, -74, -128, -66, -44, -88, -37, -14, -15, -64, 95, 71, 127,
8, -128, -128, -101, -69, -104, -120, -128, -116, -128, -93, -128, -50, -128, -128, -128};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_2d_strides_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_2d_strides_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {
84, -73, 117, -2, -97, 72, 67, 27, 1, -44, 110, 51, 9, 7, 58, 113,
-34, 34, 124, -20, 6, 66, 68, 98, 31, -84, 25, 101, -69, -100, -68, 116,
33, -121, 78, 49, 102, -86, 65, 69, -87, -89, 16, -125, 51, -54, -86, 79,
-112, -37, -6, 74, 118, -75, -41, 52, 101, -22, -28, -92, -59, -128, 32, 78,
-20, 121, 11, -107, -92, -31, 81, 117, -55, -3, 80, 119, 126, -98, -11, 52,
-4, -66, 37, -57, -16, -33, -12, 100, 55, 2, 27, 62, -15, 64, -74, -21,
-123, 22, -45, 12, 30, 24, 20, 120, -36, -102, -75, -39, -76, 55, 74, -120,
103, 67, -80, -89, -112, 36, 69, 98, 110, -82, 60, 119, 98, 88, 5, 42,
-88, -86, -58, -33, 93, 80, -57, -56, 87, 7, -4, 114, -73, -91, -12, -123,
96, -99, -31, -99, 85, 34, -126, 106, 88, 126, -60, 14, 75, -117, -15, 6,
55, -14, 117, -87, -75, -50, -85, 54, 70, 125, 74, -100, 25, -112, 74, -66,
-116, -102, 1, -75, -107, 83, -120, -66, 57, 29, 62, -45, -103, -56, 90, -53};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 3, 8, 8}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {24, 37, 10, 17, 12, 12, -13, -1, 14, -10, 7, -19};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_3d_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_3d_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {
-61, 102, -6, 61, -34, 6, -13, -38, -26, 105, 36, 116, -62, 31, 113, 85, 126,
-52, 80, 38, 115, -89, -35, 67, 69, -116, 11, -47, -120, 120, 39, 96, 29, 5,
-89, 40, 58, 51, -99, -77, -12, -107, 76, -13, 126, -112, -64, -57, 99, -54, 27,
99, 126, -46, -7, 109, 17, 77, 94, -92, 84, -92, 48, 71, 45, -102, 95, 118,
24, 13, -70, 33, 35, -60, 102, 81, 34, 108, -79, 14, -42};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 3, 3, 3, 3}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {56, 114, 49, 39, 32, 127, 3, 45, -4, -13, 8, 22,
-35, -98, 76, 15, 127, 67, 100, 20, 127, 84, 64, 68};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_notset_test)
{
auto p = migraphx::parse_onnx("qlinearaveragepool_notset_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<int8_t> data_x = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
migraphx::shape s_x{migraphx::shape::int8_type, {1, 1, 5, 5}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {22};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearaveragepool_nt_cip_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QLinearAveragePool
auto p = migraphx::parse_onnx("qlinearaveragepool_nt_cip_test.onnx");
p.compile(migraphx::make_target("ref"));
std::vector<uint8_t> data_x = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
migraphx::shape s_x{migraphx::shape::uint8_type, {1, 1, 5, 5}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {18};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_test)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment