"test/vscode:/vscode.git/clone" did not exist on "ec1ac8c0440202c501df405b1c8e4c5f16dfffbc"
Commit a22ec139 authored by Manupa Karunaratne's avatar Manupa Karunaratne
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into mlir-attention

parents 9898823d 650ba45f
......@@ -77,11 +77,26 @@ struct random_uniform
using type = typename decltype(output)::value_type;
if constexpr(std::is_integral<type>{})
{
// default range for all integer types is
// (0, std::uniform_int_distribution<type>::max()).
// Todo: enable different ranges
std::uniform_int_distribution<type> dis;
std::generate(output.begin(), output.end(), [&] { return dis(gen); });
#ifdef _MSC_VER
// According to the C++ specification, the effect is undefined if the result type
// for the generator is not one of short, int, long, long long, unsigned short,
// unsigned int, unsigned long, or unsigned long long. See
// https://en.cppreference.com/w/cpp/numeric/random/uniform_int_distribution.
if constexpr(sizeof(type) == 1)
{
std::uniform_int_distribution<int> dis{std::numeric_limits<type>::min(),
std::numeric_limits<type>::max()};
std::generate(output.begin(), output.end(), [&] { return dis(gen); });
}
else
#endif
{
// default range for all integer types is
// (0, std::uniform_int_distribution<type>::max()).
// Todo: enable different ranges
std::uniform_int_distribution<type> dis;
std::generate(output.begin(), output.end(), [&] { return dis(gen); });
}
}
else
{
......
......@@ -33,6 +33,7 @@
#include <migraphx/dfor.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/shape_for_each.hpp>
#include <array>
#include <cmath>
#include <numeric>
#include <utility>
......
......@@ -66,7 +66,7 @@ struct scatter : op_name<Derived>
shape normalize_compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(3).standard();
check_shapes{inputs, *this}.has(3);
// If non-packed, this converts to a packed output while preserving permutation of tensor
return inputs.front().with_lens(inputs.front().lens());
}
......
......@@ -29,6 +29,17 @@
#if defined(CPPCHECK)
#define MIGRAPHX_HAS_OPTIONAL 1
#define MIGRAPHX_HAS_OPTIONAL_TS 1
#elif defined(_WIN32)
#if _MSC_VER >= 1920
#define MIGRAPHX_HAS_OPTIONAL 1
#define MIGRAPHX_HAS_OPTIONAL_TS 0
#elif _MSC_VER >= 1900
#define MIGRAPHX_HAS_OPTIONAL 0
#define MIGRAPHX_HAS_OPTIONAL_TS 1
#else
#define MIGRAPHX_HAS_OPTIONAL 0
#define MIGRAPHX_HAS_OPTIONAL_TS 0
#endif
#elif defined(__has_include)
#if __has_include(<optional>) && __cplusplus >= 201703L
#define MIGRAPHX_HAS_OPTIONAL 1
......
......@@ -31,6 +31,7 @@
#include <migraphx/module.hpp>
#include <migraphx/config.hpp>
#include <migraphx/ranges.hpp>
#include <array>
#include <string>
namespace migraphx {
......
......@@ -24,6 +24,7 @@
#ifndef MIGRAPHX_GUARD_MIGRAPHX_SOURCE_LOCATION_HPP
#define MIGRAPHX_GUARD_MIGRAPHX_SOURCE_LOCATION_HPP
#include <cstdint>
#include <migraphx/config.hpp>
#if defined(CPPCHECK)
......
......@@ -34,6 +34,7 @@ struct MIGRAPHX_EXPORT tmp_dir
{
fs::path path;
tmp_dir(const std::string& prefix = "");
tmp_dir(tmp_dir&&) = default;
void execute(const std::string& exe, const std::string& args) const;
......
......@@ -34,7 +34,7 @@ template <class PrivateMigraphTypeNameProbe>
std::string compute_type_name()
{
std::string name;
#ifdef _MSC_VER
#if defined(_MSC_VER) && !defined(__clang__)
name = typeid(PrivateMigraphTypeNameProbe).name();
name = name.substr(7);
#else
......
......@@ -244,7 +244,7 @@ void onnx_parser::parse_from(std::istream& is, std::string name)
this->filename = std::move(name);
auto parent_path = fs::path(this->filename).parent_path();
if(not parent_path.empty())
this->path = parent_path;
this->path = parent_path.string();
onnx::ModelProto model;
if(model.ParseFromIstream(&is))
......
......@@ -47,7 +47,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
return;
}
auto auto_pad = info.attributes["auto_pad"].s();
auto auto_pad = to_upper(info.attributes["auto_pad"].s());
if(auto_pad.find("SAME") != std::string::npos)
{
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
......
......@@ -87,8 +87,7 @@ struct parse_depthtospace : op_parser<parse_depthtospace>
auto temp1 = info.add_instruction(make_op("reshape", {{"dims", lens1}}), args[0]);
auto temp2 = info.add_instruction(make_op("transpose", {{"permutation", perm}}), temp1);
return info.add_instruction(make_op("reshape", {{"dims", lens2}}),
info.make_contiguous(temp2));
return info.add_instruction(make_op("reshape", {{"dims", lens2}}), temp2);
}
};
......
......@@ -97,7 +97,7 @@ struct parse_pooling : op_parser<parse_pooling>
values["lp_order"] = info.attributes.at("p").i();
}
// ensure pads availabe only when auto_pad is "NOT_SET"
// ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "POOLING");
return values;
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/onnx/padding.hpp>
#include <migraphx/onnx/conv.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/broadcast_qdq.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/stringutils.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
/*
*********************************************************************************
* Reference: see QLinearConv in *
* https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md *
*********************************************************************************
com.microsoft.QLinearConv
Version
This version of the operator has been available since version 1 of the 'com.microsoft' operator set.
ATTRIBUTES:
auto_pad : string
channels_last : int
dilations : list of ints
group : int
kernel_shape : list of ints
pads : list of ints
strides : list of ints
INPUTS (8 - 9):
x : T1
x_scale : tensor(float)
x_zero_point : T1
w : T2
w_scale : tensor(float)
w_zero_point : T2
y_scale : tensor(float)
y_zero_point : T3
B (optional) : T4
OUTPUTS:
y : T3
Type Constraints:
T1 : tensor(int8), tensor(uint8)
T2 : tensor(int8), tensor(uint8)
T3 : tensor(int8), tensor(uint8)
T4 : tensor(int32)
More details also at:
https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
*/
struct parse_qlinearconv : op_parser<parse_qlinearconv>
{
std::vector<op_desc> operators() const { return {{"QLinearConv"}}; }
// basic type checking for QLinearConv Operator
void check_inputs(const std::vector<instruction_ref>& inp_arg) const
{
if(inp_arg.size() < 8)
MIGRAPHX_THROW("QLINEARCONV: missing inputs");
const instruction_ref& in_x = inp_arg[0];
const instruction_ref& in_scale_x = inp_arg[1];
const instruction_ref& in_w = inp_arg[3];
const instruction_ref& in_scale_w = inp_arg[4];
const instruction_ref& in_scale_y = inp_arg[6];
auto sh_x = in_x->get_shape();
auto sh_w = in_w->get_shape();
auto type_x = sh_x.type();
auto type_w = sh_w.type();
assert(in_x->get_shape().ndim() > 2);
if(type_x != shape::int8_type and type_x != shape::uint8_type)
MIGRAPHX_THROW("QLINEARCONV: unsupported input type");
if(type_w != shape::int8_type and type_w != shape::uint8_type)
MIGRAPHX_THROW("QLINEARCONV: unsupported weight type");
if(in_scale_x->get_shape().type() != shape::float_type)
MIGRAPHX_THROW("QLINEARCONV x scale type should be float");
if(in_scale_w->get_shape().type() != shape::float_type)
MIGRAPHX_THROW("QLINEARCONV: wt scale type should be float");
if(in_scale_y->get_shape().type() != shape::float_type)
MIGRAPHX_THROW("QLINEARCONV: y scale type should be float");
if(inp_arg.size() > 8 and inp_arg[8]->get_shape().type() != shape::int32_type)
MIGRAPHX_THROW("QLINEARCONV y bias should be int32");
}
// process all attributes of QLinearConv Operator..
value process_attributes(const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
value values;
const auto& in_x = args[0];
const auto& wt = args[3];
size_t kdims = in_x->get_shape().ndim() - 2;
check_padding_mode(info, "QLINEARCONV");
values["stride"] = std::vector<int>(kdims, 1);
values["dilation"] = std::vector<int>(kdims, 1);
values["padding"] = std::vector<int>(kdims, 0);
values["group"] = 1;
if(contains(info.attributes, "group"))
values["group"] = parser.parse_value(info.attributes.at("group")).template at<int>();
if(contains(info.attributes, "strides"))
{
std::vector<int> st;
copy(info.attributes.at("strides").ints(), std::back_inserter(st));
check_attr_sizes(kdims, st.size(), "QLINEARCONV: inconsistent strides");
values["stride"] = st;
}
if(contains(info.attributes, "dilations"))
{
std::vector<int> dil;
copy(info.attributes.at("dilations").ints(), std::back_inserter(dil));
check_attr_sizes(kdims, dil.size(), "QLINEARCONV: inconsistent dilations");
values["dilation"] = dil;
}
if(contains(info.attributes, "pads"))
{
std::vector<int> pads;
copy(info.attributes.at("pads").ints(), std::back_inserter(pads));
check_attr_sizes(kdims, pads.size() / 2, "QLINEARCONV: inconsistent padding");
values["padding"] = pads;
}
else if(contains(info.attributes, "auto_pad"))
{
auto in_lens = in_x->get_shape().lens();
auto wt_lens = wt->get_shape().lens();
std::vector<std::size_t> k_lens(wt_lens.begin() + 2, wt_lens.end());
std::vector<int64_t> pads = values["padding"].to_vector<std::int64_t>();
cal_auto_padding_size(
info, values, k_lens, values["dilation"].to_vector<std::size_t>(), in_lens, pads);
values["padding"] = pads;
}
recalc_conv_attributes(values, kdims);
return values;
}
instruction_ref add_bias_to_conv(const instruction_ref bias_arg,
const instruction_ref conv_instr,
const onnx_parser::node_info& info) const
{
auto conv_sh = conv_instr->get_shape();
auto conv_lens = conv_sh.lens();
auto conv_type = conv_sh.type();
auto broadcast_bias = info.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", conv_lens}}), bias_arg);
auto f_bias =
info.add_instruction(make_op("convert", {{"target_type", conv_type}}), broadcast_bias);
return info.add_instruction(migraphx::make_op("add"), conv_instr, f_bias);
};
instruction_ref parse(const op_desc& /* opd */,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
check_inputs(args);
auto values = process_attributes(parser, info, args);
// input: quantized x, scale, zero_pt
const instruction_ref& in_x = args[0];
const instruction_ref& in_scale_x = args[1];
const instruction_ref& in_zero_pt_x = args[2];
// input: quantized weights, scale, zero_pt
const instruction_ref& in_w = args[3];
const instruction_ref& in_scale_w = args[4];
const instruction_ref& in_zero_pt_w = args[5];
// for the dequantized output y: scale & zero_pt
const instruction_ref& in_scale_y = args[6];
const instruction_ref& in_zero_pt_y = args[7];
auto dquant_x = bcast_qdq_instr("dequantizelinear", in_x, in_scale_x, in_zero_pt_x, info);
auto dquant_w = bcast_qdq_instr("dequantizelinear", in_w, in_scale_w, in_zero_pt_w, info);
auto conv_op = migraphx::make_op("convolution", values);
auto conv_x_w = info.add_instruction(conv_op, dquant_x, dquant_w);
// Biases, if any.. : is an optional argument.
if(args.size() > 8)
conv_x_w = add_bias_to_conv(args[8], conv_x_w, info);
auto quant_conv =
bcast_qdq_instr("quantizelinear", conv_x_w, in_scale_y, in_zero_pt_y, info);
return quant_conv;
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/broadcast_qdq.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
/*
*********************************************************************************
* Reference: see QLinearGlobalAveragePool in *
* github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md *
*********************************************************************************
QLinearGlobalAveragePool consumes an input tensor X and applies
Average pooling across the values in the same channel. This is
equivalent to AveragePool with kernel size equal to the spatial
dimension of input tensor. Input is of type uint8_t or int8_t.
Version
This version of the operator has been available since version 1 of the 'com.microsoft' operator set.
Attributes
channels_last : int
Inputs
X : T
Input data tensor from the previous operator; According to channels_last, dimensions for image case
are (N x C x H x W), or (N x H x W x C) where N is the batch size, C is the number of channels, and
H and W are the height and the width of the data. For non image case, the dimensions are in the form
of (N x C x D1 x D2 ... Dn), or (N x D1 X D2 ... Dn x C) where N is the batch size.
x_scale : tensor(float)
Scale of quantized input 'X'. It must be a scalar.
x_zero_point : T
Zero point tensor for input 'X'. It must be a scalar.
y_scale : tensor(float)
Scale of quantized output 'Y'. It must be a scalar.
y_zero_point : T
Zero point tensor for output 'Y'. It must be a scalar.
Outputs
Y : T
Output data tensor from pooling across the input tensor. The output tensor has the same rank as the
input. with the N and C value keep it value, while the other dimensions are all 1. Type Constraints
T : tensor(uint8), tensor(int8)
Constrain input and output types to signed/unsigned int8 tensors.
*/
struct parse_qlinearglobalaveragepool : op_parser<parse_qlinearglobalaveragepool>
{
std::vector<op_desc> operators() const { return {{"QLinearGlobalAveragePool"}}; }
// basic type checking for QLinearGlobalAveragePool Operator
void check_inputs(const std::vector<instruction_ref>& args) const
{
if(args.size() < 5)
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: missing inputs");
const auto& in_x = args[0];
const auto& zero_pt_x = args[2];
const auto& zero_pt_y = args[4];
if(in_x->get_shape().ndim() <= 2)
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: input dimensions too small");
auto type_x = in_x->get_shape().type();
if(type_x != migraphx::shape::int8_type and type_x != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: unsupported input type");
if(type_x != zero_pt_x->get_shape().type())
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: mismatched type: input zero point");
if(type_x != zero_pt_y->get_shape().type())
MIGRAPHX_THROW("QLINEARGLOBALAVERAGEPOOL: mismatched type: output zero point");
}
instruction_ref parse(const op_desc& /* opd */,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
int channels_last =
parser.parse_value(info.attributes.at("channels_last")).template at<int>();
if(channels_last != 0)
MIGRAPHX_THROW(
"QLINEARGLOBALAVERAGEPOOL: channels_last (N x D1..Dn x C) is not supported");
check_inputs(args);
// Input: X
const auto& in_x = args[0];
const auto& scale_x = args[1];
const auto& zero_pt_x = args[2];
auto dquant_x = bcast_qdq_instr("dequantizelinear", in_x, scale_x, zero_pt_x, info);
// Output Y = globalaveragepool(X)
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average};
auto lens = in_x->get_shape().lens();
std::vector<size_t> lengths(lens.begin() + 2, lens.end());
op.lengths = lengths;
op.padding = std::vector<size_t>(lens.size());
auto out_y = info.add_instruction(op, dquant_x);
const auto& scale_y = args[3];
const auto& zero_pt_y = args[4];
auto out_quant_y = bcast_qdq_instr("quantizelinear", out_y, scale_y, zero_pt_y, info);
return out_quant_y;
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/broadcast_qdq.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
/*
*********************************************************************************
* Reference: see QLinearMatMul in *
* https://onnx.ai/onnx/operators/onnx__QLinearMatMul.html *
*********************************************************************************
Matrix product that behaves like numpy.matmul:
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. It consumes two
quantized input tensors, their scales and zero points, scale and zero point of output, and computes
the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). For (x
/ y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding
for details. Scale and zero point must have same shape. They must be either scalar (per tensor) or
N-D tensor (per row for ‘a’ and per column for ‘b’). Scalar refers to per tensor quantization
whereas N-D refers to per row or per column quantization. If the input is 2D of shape [M, K] then
zero point and scale tensor may be an M element vector [v_1, v_2, …, v_M] for per row quantization
and K element vector of shape [v_1, v_2, …, v_K] for per column quantization. If the input is N-D
tensor with shape [D1, D2, M, K] then zero point and scale tensor may have shape [D1, D2, M, 1] for
per row quantization and shape [D1, D2, 1, K] for per column quantization. Production must never
overflow, and accumulation may overflow if and only if in 32 bits.
Inputs
a (heterogeneous) - T1: N-dimensional quantized matrix a
a_scale (heterogeneous) - tensor(float): scale of quantized input a
a_zero_point (heterogeneous) - T1: zero point of quantized input a
b (heterogeneous) - T2: N-dimensional quantized matrix b
b_scale (heterogeneous) - tensor(float): scale of quantized input b
b_zero_point (heterogeneous) - T2: zero point of quantized input b
y_scale (heterogeneous) - tensor(float): scale of quantized output y
y_zero_point (heterogeneous) - T3: zero point of quantized output y
Outputs
y (heterogeneous) - T3: Quantized matrix multiply results from a * b
Type Constraints
T1 in ( tensor(int8), tensor(uint8) ): Constrain input a and its zero point data type to 8-bit
integer tensor.
T2 in ( tensor(int8), tensor(uint8) ): Constrain input b and its zero point data type to 8-bit
integer tensor.
T3 in ( tensor(int8), tensor(uint8) ): Constrain output y and its zero point data type to 8-bit
integer tensor.
*/
struct parse_qlinearmatmul : op_parser<parse_qlinearmatmul>
{
std::vector<op_desc> operators() const { return {{"QLinearMatMul"}}; }
// basic type checking for QLinearMatMul Operator
void check_inputs(const std::vector<instruction_ref>& args) const
{
if(args.size() < 8)
MIGRAPHX_THROW("QLINEARMATMUL: missing inputs");
const auto& in_a = args[0];
const auto& in_b = args[3];
auto sh_a = in_a->get_shape();
auto sh_b = in_b->get_shape();
auto type_a = sh_a.type();
auto type_b = sh_b.type();
if(type_a != migraphx::shape::int8_type and type_a != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARMATMUL: unsupported input type");
if(type_b != migraphx::shape::int8_type and type_b != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARMATMUL: unsupported input type");
auto lens_a = sh_a.lens();
auto lens_b = sh_b.lens();
size_t dim_a = lens_a.size();
size_t dim_b = lens_b.size();
if(dim_a == 0 or dim_b == 0)
MIGRAPHX_THROW("QLINEARMATMUL: empty input");
// broadcast supported if either is 1-D -- the other can be a 2-D tensor.
// if it is 1-D, just prepend/append that lens and check further constraints..
if(dim_a == 1)
{
lens_a.insert(lens_a.begin(), 1);
dim_a++;
}
if(dim_b == 1)
{
lens_b.push_back(1);
dim_b++;
}
// 2-D or higher-order mat mul
if(dim_a != dim_b or *lens_a.rbegin() != *(lens_b.rbegin() + 1) or
not std::equal(lens_a.rbegin() + 2, lens_a.rend(), lens_b.rbegin() + 2, lens_b.rend()))
MIGRAPHX_THROW("QLINEARMATMUL: mismatched input dimensions");
if(migraphx::any_of({args[1], args[2], args[4], args[5]},
[](auto arg) { return not arg->get_shape().scalar(); }))
MIGRAPHX_THROW("QLINEARMATMUL: unsupported row/column quantization");
}
instruction_ref parse(const op_desc& /* opd */,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
check_inputs(args);
// A
const auto& in_a = args[0];
const auto& in_scale_a = args[1];
const auto& in_zero_pt_a = args[2];
auto dquant_a = bcast_qdq_instr("dequantizelinear", in_a, in_scale_a, in_zero_pt_a, info);
// B
const auto& in_b = args[3];
const auto& in_scale_b = args[4];
const auto& in_zero_pt_b = args[5];
auto dquant_b = bcast_qdq_instr("dequantizelinear", in_b, in_scale_b, in_zero_pt_b, info);
bool is_a_prepended = false;
bool is_b_appended = false;
// un-squeeze either tensor if 1-D.
if(in_a->get_shape().ndim() == 1)
{
is_a_prepended = true;
dquant_a = info.add_instruction(make_op("unsqueeze", {{"axes", {0}}}), dquant_a);
}
if(in_b->get_shape().ndim() == 1)
{
is_b_appended = true;
dquant_b = info.add_instruction(make_op("unsqueeze", {{"axes", {1}}}), dquant_b);
}
// Y = A * B
auto out_y = info.add_instruction(migraphx::make_op("dot"), dquant_a, dquant_b);
// squeeze just once if necessary.. not twice.
if(is_a_prepended)
out_y = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), out_y);
else if(is_b_appended)
out_y = info.add_instruction(make_op("squeeze", {{"axes", {1}}}), out_y);
const auto& scale_y = args[6];
const auto& zero_pt_y = args[7];
return bcast_qdq_instr("quantizelinear", out_y, scale_y, zero_pt_y, info);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -53,8 +53,7 @@ struct parse_reshape : op_parser<parse_reshape>
s.visit([&](auto v) { copy(v, std::back_inserter(dims)); });
}
auto cont = info.add_instruction(make_op("contiguous"), args[0]);
return info.add_instruction(make_op("reshape", {{"dims", dims}}), cont);
return info.add_instruction(make_op("reshape", {{"dims", dims}}), args[0]);
}
};
......
......@@ -73,8 +73,7 @@ struct parse_spacetodepth : op_parser<parse_spacetodepth>
std::vector<int64_t> perm = {0, 3, 5, 1, 2, 4};
auto temp1 = info.add_instruction(make_op("reshape", {{"dims", trans_lens}}), args[0]);
auto temp2 = info.add_instruction(make_op("transpose", {{"permutation", perm}}), temp1);
return info.add_instruction(make_op("reshape", {{"dims", res_lens}}),
info.make_contiguous(temp2));
return info.add_instruction(make_op("reshape", {{"dims", res_lens}}), temp2);
}
};
......
......@@ -26,13 +26,23 @@
#include <migraphx/env.hpp>
#include <functional>
#include <iostream>
#include <optional>
#ifdef _WIN32
// cppcheck-suppress definePrefix
#define WIN32_LEAN_AND_MEAN
#include <Windows.h>
#else
#include <unistd.h>
#endif
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_TRACE_CMD_EXECUTE)
#ifndef _WIN32
std::function<void(const char*)> redirect_to(std::ostream& os)
{
return [&](const char* x) { os << x; };
......@@ -74,6 +84,155 @@ int exec(const std::string& cmd, std::function<void(process::writer)> std_in)
});
}
#else
constexpr std::size_t MIGRAPHX_PROCESS_BUFSIZE = 4096;
class pipe
{
public:
explicit pipe(bool inherit_handle = true)
{
SECURITY_ATTRIBUTES attrs;
attrs.nLength = sizeof(SECURITY_ATTRIBUTES);
attrs.bInheritHandle = inherit_handle ? TRUE : FALSE;
attrs.lpSecurityDescriptor = nullptr;
if(CreatePipe(&m_read, &m_write, &attrs, 0) == FALSE)
throw GetLastError();
if(SetHandleInformation(&m_read, HANDLE_FLAG_INHERIT, 0) == FALSE)
throw GetLastError();
}
pipe(const pipe&) = delete;
pipe& operator=(const pipe&) = delete;
pipe(pipe&&) = default;
~pipe()
{
CloseHandle(m_read);
m_read = nullptr;
CloseHandle(m_write);
m_write = nullptr;
}
std::optional<std::pair<bool, DWORD>> read(LPVOID buffer, DWORD length) const
{
DWORD bytes_read;
if(ReadFile(m_read, buffer, length, &bytes_read, nullptr) == FALSE)
{
DWORD error{GetLastError()};
if(error != ERROR_MORE_DATA)
{
return std::nullopt;
}
return {{true, bytes_read}};
}
return {{false, bytes_read}};
}
HANDLE get_read_handle() const { return m_read; }
bool write(LPCVOID buffer, DWORD length) const
{
DWORD bytes_written;
return WriteFile(m_write, buffer, length, &bytes_written, nullptr) == TRUE;
}
HANDLE get_write_handle() const { return m_write; }
private:
HANDLE m_write = nullptr, m_read = nullptr;
};
template <typename F>
int exec(const std::string& cmd, F f)
{
try
{
if(enabled(MIGRAPHX_TRACE_CMD_EXECUTE{}))
std::cout << cmd << std::endl;
STARTUPINFO info;
PROCESS_INFORMATION process_info;
pipe in{}, out{};
ZeroMemory(&info, sizeof(STARTUPINFO));
info.cb = sizeof(STARTUPINFO);
info.hStdError = out.get_write_handle();
info.hStdOutput = out.get_write_handle();
info.hStdInput = in.get_read_handle();
info.dwFlags |= STARTF_USESTDHANDLES;
ZeroMemory(&process_info, sizeof(process_info));
if(CreateProcess(nullptr,
const_cast<LPSTR>(cmd.c_str()),
nullptr,
nullptr,
TRUE,
0,
nullptr,
nullptr,
&info,
&process_info) == FALSE)
{
return GetLastError();
}
f(in, out);
WaitForSingleObject(process_info.hProcess, INFINITE);
DWORD status{};
GetExitCodeProcess(process_info.hProcess, &status);
CloseHandle(process_info.hProcess);
CloseHandle(process_info.hThread);
return static_cast<int>(status);
}
// cppcheck-suppress catchExceptionByValue
catch(DWORD last_error)
{
return last_error;
}
}
int exec(const std::string& cmd)
{
TCHAR buffer[MIGRAPHX_PROCESS_BUFSIZE];
HANDLE std_out{GetStdHandle(STD_OUTPUT_HANDLE)};
return (std_out == nullptr or std_out == INVALID_HANDLE_VALUE)
? GetLastError()
: exec(cmd, [&](const pipe&, const pipe& out) {
for(;;)
{
if(auto result = out.read(buffer, MIGRAPHX_PROCESS_BUFSIZE))
{
auto [more_data, bytes_read] = *result;
if(not more_data or bytes_read == 0)
break;
DWORD written;
if(WriteFile(std_out, buffer, bytes_read, &written, nullptr) == FALSE)
break;
}
}
});
}
int exec(const std::string& cmd, std::function<void(process::writer)> std_in)
{
return exec(cmd, [&](const pipe& in, const pipe&) {
std_in([&](const char* buffer, std::size_t n) { in.write(buffer, n); });
});
}
#endif
struct process_impl
{
std::string command{};
......@@ -119,7 +278,14 @@ process& process::cwd(const fs::path& p)
return *this;
}
void process::exec() { impl->check_exec(impl->get_command(), redirect_to(std::cout)); }
void process::exec()
{
#ifndef _WIN32
impl->check_exec(impl->get_command(), redirect_to(std::cout));
#else
impl->check_exec(impl->get_command());
#endif
}
void process::write(std::function<void(process::writer)> pipe_in)
{
......
......@@ -22,27 +22,24 @@
# THE SOFTWARE.
#####################################################################################
option(MIGRAPHX_ENABLE_PYTHON "Enable python bindings" ON)
add_library(migraphx_py py_loader.cpp)
migraphx_generate_export_header(migraphx_py)
target_include_directories(migraphx_py PRIVATE include)
target_link_libraries(migraphx_py PUBLIC migraphx)
rocm_install_targets(TARGETS migraphx_py INCLUDE include)
if(MIGRAPHX_ENABLE_PYTHON)
include(PythonModules)
include(PythonModules)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
py_add_module(migraphx_pybind_${PYTHON_VERSION} migraphx_py.cpp PYTHON_VERSION ${PYTHON_VERSION} PYTHON_MODULE migraphx)
target_link_libraries(migraphx_pybind_${PYTHON_VERSION} PRIVATE migraphx migraphx_tf migraphx_onnx migraphx_all_targets)
rocm_install_targets(TARGETS migraphx_pybind_${PYTHON_VERSION})
add_dependencies(migraphx_py migraphx_pybind_${PYTHON_VERSION})
add_library(migraphx_py_${PYTHON_VERSION} py.cpp)
target_include_directories(migraphx_py_${PYTHON_VERSION} PRIVATE include)
target_link_libraries(migraphx_py_${PYTHON_VERSION} PUBLIC migraphx)
target_link_libraries(migraphx_py_${PYTHON_VERSION} PRIVATE pybind11::pybind11 python${PYTHON_VERSION}::runtime)
rocm_install_targets(TARGETS migraphx_py_${PYTHON_VERSION})
add_dependencies(migraphx_py migraphx_py_${PYTHON_VERSION})
endforeach()
endif()
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
py_add_module(migraphx_pybind_${PYTHON_VERSION} migraphx_py.cpp PYTHON_VERSION ${PYTHON_VERSION} PYTHON_MODULE migraphx)
target_link_libraries(migraphx_pybind_${PYTHON_VERSION} PRIVATE migraphx migraphx_tf migraphx_onnx migraphx_all_targets)
rocm_install_targets(TARGETS migraphx_pybind_${PYTHON_VERSION})
add_dependencies(migraphx_py migraphx_pybind_${PYTHON_VERSION})
add_library(migraphx_py_${PYTHON_VERSION} py.cpp)
target_include_directories(migraphx_py_${PYTHON_VERSION} PRIVATE include)
target_link_libraries(migraphx_py_${PYTHON_VERSION} PUBLIC migraphx)
target_link_libraries(migraphx_py_${PYTHON_VERSION} PRIVATE pybind11::pybind11 python${PYTHON_VERSION}::runtime)
rocm_install_targets(TARGETS migraphx_py_${PYTHON_VERSION})
add_dependencies(migraphx_py migraphx_py_${PYTHON_VERSION})
endforeach()
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -521,6 +521,27 @@ struct find_inner_broadcast
}) < (lens.size() - 1);
}))
return;
if(broadcasts.size() > 1)
{
auto bcast_strides = broadcasts.front()->get_shape().strides().size();
std::vector<size_t> common_axis(bcast_strides, 0);
// go through the strides of each broadcast,
// keep track of values that are equal to 0 in a dimension
for(auto i = 0; i < bcast_strides; i++)
{
for(const auto& broadcast : broadcasts)
{
if(broadcast->get_shape().strides()[i] == 0)
common_axis[i]++;
}
}
// if no common broadcast axis, transformation is not useful
if(std::find_if(common_axis.begin(), common_axis.end(), [](auto num_common) {
return num_common > 1;
}) == common_axis.end())
return;
}
std::vector<instruction_ref> inputs;
std::transform(broadcasts.begin(),
broadcasts.end(),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment