Unverified Commit eba1e778 authored by Umang Yadav's avatar Umang Yadav Committed by GitHub
Browse files

Merge branch 'develop' into perk-kernel

parents 6ee87f92 5bf4dee6
......@@ -40,6 +40,12 @@ static void update_op(const instruction_ref& input, const instruction_ref& ins,
auto val = op.to_value();
auto op_padding = val.at("padding").to_vector<size_t>();
// skip if shape is dynamic
if(input->get_shape().dynamic())
{
return;
}
auto kdims = input->get_shape().lens().size() - 2;
if(std::equal(op_padding.begin(),
op_padding.begin() + kdims,
......
......@@ -445,8 +445,8 @@ operation instruction::normalized_operator() const
operation o = this->get_operator();
if(this->need_normalization())
{
auto lens = this->inputs().front()->get_shape().lens();
if(!normalize_attributes(o, lens))
auto s = this->inputs().front()->get_shape();
if(!normalize_attributes(o, s.max_lens()))
return this->get_operator();
}
return o;
......
......@@ -43,9 +43,9 @@ void normalize_ops::apply(module& m) const
if(inputs.empty())
continue;
auto lens = inputs[0]->get_shape().lens();
auto s = inputs[0]->get_shape();
migraphx::operation tuned_op = ins->get_operator();
if(normalize_attributes(tuned_op, lens))
if(normalize_attributes(tuned_op, s.max_lens()))
{
m.replace_instruction(ins, tuned_op, inputs);
ins->set_normalized();
......
......@@ -93,9 +93,10 @@ struct onnx_parser
onnx_parser&, const node_info&, std::vector<instruction_ref>)>;
node_map nodes;
std::unordered_map<std::string, instruction_ref> instructions;
program prog = program();
std::size_t default_dim_value = 1;
program prog = program();
shape::dynamic_dimension default_dyn_dim_value = {1, 1, 0};
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims;
std::unordered_map<std::string, std::vector<shape::dynamic_dimension>> map_dyn_input_dims;
bool skip_unknown_operators = false;
int64_t max_loop_iterations = 10;
int64_t opset_version = 13;
......@@ -118,6 +119,7 @@ struct onnx_parser
};
shape::type_t get_type(int dtype);
bool is_type_float(shape::type_t dtype);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -41,8 +41,25 @@ template <class... Ts>
program parse_onnx_from(const onnx_options& options, Ts&&... xs)
{
onnx::onnx_parser parser;
parser.map_input_dims = options.map_input_dims;
parser.default_dim_value = options.default_dim_value;
parser.map_input_dims = options.map_input_dims;
parser.map_dyn_input_dims = options.map_dyn_input_dims;
auto dim_val = options.default_dim_value;
if(dim_val != 0)
{
if(options.default_dyn_dim_value != shape::dynamic_dimension{1, 1, 0})
{
MIGRAPHX_THROW("PARSE_ONNX_FROM: both default_dim_value and default_dyn_dim_value"
"set to non-default value");
}
else
{
parser.default_dyn_dim_value = {dim_val, dim_val, 0};
}
}
else
{
parser.default_dyn_dim_value = options.default_dyn_dim_value;
}
parser.skip_unknown_operators = options.skip_unknown_operators;
parser.max_loop_iterations = options.max_loop_iterations;
......
......@@ -28,16 +28,17 @@
#include <migraphx/stringutils.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pad_calc.hpp>
#include <migraphx/common.hpp>
#include <migraphx/type_traits.hpp>
#include <migraphx/float_equal.hpp>
#include <migraphx/file_buffer.hpp>
#include <migraphx/filesystem.hpp>
#include <migraphx/op/unknown.hpp>
#include <migraphx/env.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
static onnx_parser::attribute_map get_attributes(const onnx::NodeProto& node)
......@@ -58,7 +59,7 @@ create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>());
if(elem_num == 0)
{
return {};
return literal{shape_type};
}
// in case of scalar constants in onnx file, use dims=1 to fill initializer data
......@@ -75,7 +76,7 @@ static literal create_literal(shape::type_t shape_type, const std::vector<size_t
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>());
if(elem_num == 0)
{
return {};
return literal{shape_type};
}
// scalar input
......@@ -255,6 +256,11 @@ int64_t onnx_parser::get_opset_version(const onnx::ModelProto& model)
void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
{
if(not map_input_dims.empty() and not map_dyn_input_dims.empty())
{
MIGRAPHX_THROW("PARSE_GRAPH: both map_input_dims and map_dyn_input_dims non-empty, only"
"one should be used");
}
std::unordered_map<std::string, instruction_ref> mod_insts;
for(auto&& f : graph.initializer())
{
......@@ -268,7 +274,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
// input not in initializer_data, so it is a real input
if(!contains(mod_insts, name))
{
// ONNX specification does not specify hwo to deal with the
// ONNX specification does not specify how to deal with the
// scenario that a nested subgraph contains a parameter with the
// name existed in its parent graph.
// In the current implementation, MIGraphX throws an exception for that.
......@@ -278,13 +284,22 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
"\" existing in parent graph!");
}
shape s;
std::vector<std::size_t> dims;
if(map_input_dims.count(name) > 0)
{
dims = map_input_dims.at(name);
s = parse_type(input.type(), dims);
}
else if(map_dyn_input_dims.count(name) > 0)
{
shape::type_t shape_type = get_type(input.type().tensor_type().elem_type());
s = {shape_type, map_dyn_input_dims.at(name)};
}
else
{
s = parse_type(input.type(), dims);
}
shape s = parse_type(input.type(), dims);
mod_insts[name] = mod->add_parameter(name, s);
}
}
......@@ -439,30 +454,41 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t,
return {shape_type, input_dims};
}
std::vector<std::size_t> dims;
std::vector<shape::dynamic_dimension> dynamic_dims;
auto&& tensor_dims = t.tensor_type().shape().dim();
std::transform(tensor_dims.begin(),
tensor_dims.end(),
std::back_inserter(dims),
[&](auto&& d) -> std::size_t {
std::back_inserter(dynamic_dims),
[&](auto&& d) -> shape::dynamic_dimension {
if(d.has_dim_value())
{
if(static_cast<int>(d.dim_value()) <= 0)
{
return default_dim_value;
return default_dyn_dim_value;
}
return d.dim_value();
std::size_t tmp = d.dim_value();
return {tmp, tmp, 0};
}
else
{
return default_dim_value;
return default_dyn_dim_value;
}
});
if(dims.empty())
if(dynamic_dims.empty())
{
return {shape_type};
return {shape_type, dims};
}
if(std::all_of(dynamic_dims.begin(), dynamic_dims.end(), [](auto dd) { return dd.is_fixed(); }))
{
std::vector<std::size_t> dims;
std::transform(dynamic_dims.begin(),
dynamic_dims.end(),
std::back_inserter(dims),
[](auto d) { return d.max; });
return {shape_type, dims};
}
return {shape_type, dynamic_dims};
}
shape::type_t get_type(int dtype)
......@@ -487,6 +513,16 @@ shape::type_t get_type(int dtype)
}
}
bool is_type_float(shape::type_t dtype)
{
bool r = false;
if(dtype == shape::float_type || dtype == shape::double_type || dtype == shape::half_type)
{
r = true;
}
return r;
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -43,7 +43,7 @@ struct parse_constant : op_parser<parse_constant>
// return empty literal
if(v.get_shape().elements() == 0)
{
return info.add_literal(literal{});
return info.add_literal(literal{v.get_shape().type()});
}
auto dim_size = info.attributes.at("value").t().dims_size();
......
......@@ -47,15 +47,17 @@ struct parse_convolution : op_parser<parse_convolution>
onnx_parser::node_info info,
std::vector<instruction_ref> args) const
{
auto op = make_op(opd.op_name);
auto values = op.to_value();
auto l0 = args[0];
auto weights = args[1];
auto in_lens = l0->get_shape().lens();
auto op = make_op(opd.op_name);
auto values = op.to_value();
auto l0 = args[0];
auto weights = args[1];
auto l0_shape = l0->get_shape();
auto w_shape = weights->get_shape();
auto in_lens = l0_shape.max_lens();
assert(in_lens.size() > 2);
auto kdims = in_lens.size() - 2;
// ensure pads availabe only when auto_pad is "NOT_SET"
// ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "CONV");
if(contains(info.attributes, "strides"))
......@@ -79,21 +81,65 @@ struct parse_convolution : op_parser<parse_convolution>
copy(info.attributes["pads"].ints(), std::back_inserter(padding));
check_attr_sizes(kdims, padding.size() / 2, "PARSE_CONV: inconsistent paddings");
}
if(contains(info.attributes, "auto_pad"))
{
auto weight_lens = weights->get_shape().lens();
std::vector<std::size_t> k_lens(weight_lens.begin() + 2, weight_lens.end());
cal_auto_padding_size(info,
values,
k_lens,
values["dilation"].to_vector<std::size_t>(),
in_lens,
padding);
auto auto_pad = info.attributes["auto_pad"].s();
bool is_same_padding = false;
auto auto_pad = info.attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos)
{
values["padding_mode"] = to_value(op::padding_mode_t::same);
is_same_padding = true;
}
// check if image shape is dynamic
bool image_shape_dynamic = false;
if(l0_shape.dynamic())
{
auto dyn_dims = l0_shape.dyn_dims();
std::for_each(dyn_dims.begin() + 2, dyn_dims.end(), [&](auto dyn_dim) {
if(not dyn_dim.is_fixed())
{
image_shape_dynamic = true;
}
});
}
// check if kernel shape is dynamic
bool kernel_shape_dynamic = false;
if(w_shape.dynamic())
{
auto dyn_dims = w_shape.dyn_dims();
std::for_each(dyn_dims.begin() + 2, dyn_dims.end(), [&](auto dyn_dim) {
if(not dyn_dim.is_fixed())
{
kernel_shape_dynamic = true;
}
});
}
if(is_same_padding)
{
if(image_shape_dynamic or kernel_shape_dynamic)
{
// must calculate "same" padding with input shape data
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
values["padding_mode"] = is_same_upper
? to_value(op::padding_mode_t::same_upper)
: to_value(op::padding_mode_t::same_lower);
values["use_dynamic_same_auto_pad"] = true;
}
else
{
values["padding_mode"] = to_value(op::padding_mode_t::same);
// kernel shape will be fixed, so max_lens() == min_len() for kernel lengths
auto weight_lens = weights->get_shape().max_lens();
std::vector<std::size_t> k_lens(weight_lens.begin() + 2, weight_lens.end());
cal_auto_padding_size(info,
values,
k_lens,
values["dilation"].to_vector<std::size_t>(),
in_lens,
padding);
}
}
}
values["padding"] = std::vector<size_t>(padding.begin(), padding.end());
......
......@@ -47,7 +47,8 @@ struct parse_if : op_parser<parse_if>
if(args.front()->get_shape().elements() != 1)
{
MIGRAPHX_THROW("PARSE_IF: condition input can have only one element!");
MIGRAPHX_THROW("PARSE_IF: " + info.name +
" condition input can have only one element!");
}
std::string then_name = info.name + "_if";
......@@ -69,7 +70,8 @@ struct parse_if : op_parser<parse_if>
else_out_shapes.begin(),
else_out_shapes.end()))
{
MIGRAPHX_THROW("PARSE_IF: then and else sub_grahps must have same output shapes!");
MIGRAPHX_THROW("PARSE_IF: " + info.name +
" then and else sub_grahps must have same output shapes!");
}
auto if_ret = info.add_instruction(make_op("if"), args, {then_mdl, else_mdl});
......
......@@ -32,9 +32,12 @@ namespace onnx {
struct parse_instancenorm : op_parser<parse_instancenorm>
{
const std::set<shape::type_t> valid_types = {
shape::float_type, shape::half_type, shape::double_type};
std::vector<op_desc> operators() const { return {{"InstanceNormalization"}}; }
instruction_ref parse(const op_desc& /*opd*/,
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
onnx_parser::node_info info,
std::vector<instruction_ref> args) const
......@@ -52,6 +55,11 @@ struct parse_instancenorm : op_parser<parse_instancenorm>
auto scale = args[1];
auto bias = args[2];
auto dims = x->get_shape().lens();
auto dtype = x->get_shape().type();
if(not contains(valid_types, dtype))
MIGRAPHX_THROW(opd.op_name + ": invalid output type: " + std::to_string(dtype) +
". Valid types are 1 (float), 10 (half), and 11 (double).");
auto ndims = dims.size();
assert(ndims >= 2);
auto kdims = ndims - 2;
......@@ -65,7 +73,7 @@ struct parse_instancenorm : op_parser<parse_instancenorm>
auto l0 = info.add_instruction(make_op("sqdiff"), x, mean_bcast);
auto variance = info.add_instruction(make_op("reduce_mean", {{"axes", axes}}), l0);
auto l1 = info.add_instruction(make_op("sub"), x, mean_bcast);
auto epsilon_literal = info.add_literal(epsilon);
auto epsilon_literal = info.add_literal(literal{shape{dtype}, {epsilon}});
auto epsilon_bcast =
info.add_instruction(make_op("multibroadcast", {{"out_lens", dims}}), epsilon_literal);
auto variance_bcast =
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_mod : op_parser<parse_mod>
{
std::vector<op_desc> operators() const { return {{"Mod"}}; }
instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& parser,
onnx_parser::node_info info,
std::vector<instruction_ref> args) const
{
std::string mod = "mod";
if(is_type_float(args[0]->get_shape().type()) || is_type_float(args[1]->get_shape().type()))
{
if(!contains(info.attributes, "fmod"))
{
MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid");
}
}
if(contains(info.attributes, "fmod"))
{
if(parser.parse_value(info.attributes.at("fmod")).at<int>() == 1)
{
mod = "fmod";
}
}
return info.add_common_op(mod, args[0], args[1]);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/pad_calc.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
void calculate_padding(int64_t idx,
std::vector<int64_t>& pads,
int64_t input_dim,
int64_t stride,
int64_t dilation,
int64_t weight_dim,
bool is_same_upper)
{
int64_t output_dim = (input_dim + stride - 1) / stride; // round up result
int64_t new_weight_dim = weight_dim + (weight_dim - 1) * (dilation - 1);
int64_t pad =
std::max(static_cast<int64_t>(0), (output_dim - 1) * stride + new_weight_dim - input_dim);
auto pad_ndims = pads.size() / 2;
if(is_same_upper)
{
pads[idx] = pad / 2;
pads[idx + pad_ndims] = pad - pad / 2;
}
else
{
pads[idx + pad_ndims] = pad / 2;
pads[idx] = pad - pad / 2;
}
}
std::vector<std::size_t> calc_dyn_auto_pad(std::vector<std::size_t> tensor_lens,
std::vector<std::size_t> k_lens,
std::vector<std::size_t> strides,
std::vector<std::size_t> dilations,
bool use_upper)
{
std::vector<std::size_t> padding;
padding.resize(2 * k_lens.size());
for(size_t i = 0; i < padding.size() / 2; i++)
{
std::ptrdiff_t input_dim = tensor_lens[i];
std::ptrdiff_t stride = strides[i];
std::ptrdiff_t weight_dim = k_lens[i];
std::ptrdiff_t dilation = dilations[i];
std::ptrdiff_t output_dim = (input_dim + stride - 1) / stride; // round up result
std::ptrdiff_t new_weight_dim = weight_dim + (weight_dim - 1) * (dilation - 1);
std::size_t pad = std::max(static_cast<std::ptrdiff_t>(0),
(output_dim - 1) * stride + new_weight_dim - input_dim);
auto pad_ndims = padding.size() / 2;
if(use_upper)
{
padding[i] = pad / 2;
padding[i + pad_ndims] = pad - pad / 2;
}
else
{
padding[i + pad_ndims] = pad / 2;
padding[i] = pad - pad / 2;
}
}
return padding;
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -159,6 +159,25 @@ instruction_ref program::validate() const
return mm->validate();
}
target_assignments program::get_target_assignments(const std::vector<target>& targets,
assignment_options options)
{
const auto m = options.metric;
target_assignments p;
const auto* mod = get_main_module();
for(auto it : iterator_for(*mod))
{
auto t = std::max_element(
targets.begin(), targets.end(), [it, m](const target& lhs, const target& rhs) {
return lhs.is_supported(it, m) < rhs.is_supported(it, m);
});
p.add_assignment(it, t->name());
}
return p;
}
bool program::is_compiled() const { return not this->impl->target_name.empty(); }
void program::compile(const target& t, compile_options options)
......@@ -288,9 +307,12 @@ std::vector<argument> generic_eval(const module* mod,
if(not contains(params, param_name))
MIGRAPHX_THROW("Parameter not found: " + param_name);
auto param = params[param_name];
if(param.get_shape() != ins->get_shape())
// TODO: may want to check correct number of dimensions and/or was within bounds
if(not ins->get_shape().dynamic() and param.get_shape() != ins->get_shape())
{
MIGRAPHX_THROW("Incorrect shape {" + to_string(param.get_shape()) +
"} for parameter: " + param_name);
}
return param;
}));
}
......@@ -333,7 +355,10 @@ std::vector<argument> generic_eval(const module* mod,
}));
}
assert(results.find(ins) != results.end());
assert(results.at(ins).get_shape() == ins->get_shape());
if(not ins->get_shape().dynamic())
{
assert(results.at(ins).get_shape() == ins->get_shape());
}
}
return {results.at(std::prev(mod->end()))};
}
......@@ -683,11 +708,13 @@ void program::perf_report(std::ostream& os,
double overhead_percent = overhead_time * 100.0 / total_time;
double total_instruction_time = 0.0;
std::unordered_map<std::string, double> op_times;
std::unordered_map<std::string, std::size_t> op_n;
for(auto&& p : ins_vec)
{
double avg = common_average(p.second);
op_times[perf_group(p.first->get_operator())] += avg;
total_instruction_time += avg;
op_n[perf_group(p.first->get_operator())]++;
}
double calculate_overhead_time = total_time - total_instruction_time;
double calculate_overhead_percent = calculate_overhead_time * 100.0 / total_time;
......@@ -708,18 +735,19 @@ void program::perf_report(std::ostream& os,
os << std::endl;
os << "Summary:" << std::endl;
std::vector<std::pair<double, std::string>> op_times_sorted;
std::transform(op_times.begin(),
op_times.end(),
std::back_inserter(op_times_sorted),
[](auto p) { return std::make_pair(p.second, p.first); });
std::vector<std::tuple<double, std::size_t, std::string>> op_times_sorted;
std::transform(
op_times.begin(), op_times.end(), std::back_inserter(op_times_sorted), [&](auto p) {
auto&& name = p.first;
return std::make_tuple(p.second, op_n.at(name), name);
});
std::sort(op_times_sorted.begin(), op_times_sorted.end(), std::greater<>{});
for(auto&& p : op_times_sorted)
for(auto&& [avg, nn, name] : op_times_sorted)
{
auto&& name = p.second;
double avg = p.first;
double percent = std::ceil(100.0 * avg / total_instruction_time);
os << name << ": " << avg << "ms, " << percent << "%" << std::endl;
double per_ins = avg / nn;
os << name << ": " << avg << "ms / " << nn << " = " << per_ins << "ms, " << percent << "%"
<< std::endl;
}
os << std::endl;
......
......@@ -26,6 +26,7 @@
#include <migraphx/stringutils.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/permutation.hpp>
#include <migraphx/ranges.hpp>
#include <numeric>
#include <algorithm>
#include <functional>
......@@ -65,13 +66,21 @@ struct shape_impl
std::is_sorted(m_strides.rbegin(), m_strides.rend());
}
shape_impl(shape::type_t t, std::vector<shape::dynamic_dimension> dims)
: m_type(t), m_dyn_dims(std::move(dims))
{
}
shape_impl(const std::vector<shape>& subs) : m_type(shape::tuple_type), m_shapes(subs) {}
shape::type_t m_type;
std::vector<std::size_t> m_lens = {};
std::vector<std::size_t> m_strides = {};
std::vector<shape> m_shapes = {};
bool m_standard = false;
std::vector<shape::dynamic_dimension> m_dyn_dims = {};
void calculate_strides()
{
m_strides.clear();
......@@ -87,6 +96,12 @@ struct shape_impl
std::size_t element_space() const
{
if(not m_dyn_dims.empty())
{
auto maxes = max_lens();
return std::accumulate(maxes.begin(), maxes.end(), std::size_t{1}, std::multiplies<>());
}
assert(m_lens.size() == m_strides.size());
if(m_lens.empty())
return 0;
......@@ -101,6 +116,11 @@ struct shape_impl
std::size_t elements() const
{
if(not m_dyn_dims.empty())
{
MIGRAPHX_THROW("SHAPE: elements() called on dynamic shape");
}
assert(m_lens.size() == m_strides.size());
if(m_lens.empty())
return 0;
......@@ -108,6 +128,35 @@ struct shape_impl
m_lens.begin(), m_lens.end(), std::size_t{1}, std::multiplies<std::size_t>());
}
std::vector<std::size_t> min_lens() const
{
std::vector<std::size_t> ret(m_dyn_dims.size());
std::transform(m_dyn_dims.cbegin(),
m_dyn_dims.cend(),
ret.begin(),
[](shape::dynamic_dimension x) { return x.min; });
return ret;
}
std::vector<std::size_t> max_lens() const
{
std::vector<std::size_t> ret(m_dyn_dims.size());
std::transform(m_dyn_dims.cbegin(),
m_dyn_dims.cend(),
ret.begin(),
[](shape::dynamic_dimension x) { return x.max; });
return ret;
}
std::vector<std::size_t> opt_lens() const
{
std::vector<std::size_t> ret(m_dyn_dims.size());
std::transform(m_dyn_dims.cbegin(),
m_dyn_dims.cend(),
ret.begin(),
[](shape::dynamic_dimension x) { return x.opt; });
return ret;
}
// Does the shape skip over elements?
bool skips() const
{
......@@ -165,6 +214,16 @@ shape::shape(type_t t, std::vector<std::size_t> l, std::vector<std::size_t> s)
{
}
shape::shape(type_t t, std::initializer_list<std::size_t> d)
: shape::shape(t, std::vector<std::size_t>{d.begin(), d.end()})
{
}
shape::shape(type_t t, std::vector<shape::dynamic_dimension> dims)
: impl(std::make_shared<shape_impl>(t, std::move(dims)))
{
}
shape::shape(const std::vector<shape>& subs) : impl(std::make_shared<shape_impl>(subs)) {}
shape::shape(std::shared_ptr<shape_impl> pimpl) : impl(std::move(pimpl)) {}
......@@ -180,9 +239,13 @@ shape shape::from_permutation(type_t t,
}
shape::type_t shape::type() const { return impl->m_type; }
const std::vector<std::size_t>& shape::lens() const { return impl->m_lens; }
const std::vector<std::size_t>& shape::strides() const { return impl->m_strides; }
std::size_t shape::elements() const { return impl->elements(); }
std::size_t shape::bytes() const
{
if(this->sub_shapes().empty())
......@@ -199,6 +262,7 @@ std::size_t shape::bytes() const
[&](auto x, auto y) { return x + y.bytes(); });
}
}
std::size_t shape::type_size() const
{
std::size_t n = 0;
......@@ -206,20 +270,35 @@ std::size_t shape::type_size() const
this->visit_type([&](auto as) { n = as.size(); });
return n;
}
std::size_t shape::index(std::initializer_list<std::size_t> l) const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(l.size() <= this->lens().size());
assert(this->lens().size() == this->strides().size());
return std::inner_product(l.begin(), l.end(), this->strides().begin(), std::size_t{0});
}
std::size_t shape::index(const std::vector<std::size_t>& l) const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(l.size() <= this->lens().size());
assert(this->lens().size() == this->strides().size());
return std::inner_product(l.begin(), l.end(), this->strides().begin(), std::size_t{0});
}
std::size_t shape::index(std::size_t i) const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(this->lens().size() == this->strides().size());
if(this->standard())
return i;
......@@ -267,12 +346,20 @@ void shape::multi_copy(std::size_t i, std::size_t* start, const std::size_t* end
bool shape::packed() const
{
if(this->dynamic())
{
return false;
}
return this->sub_shapes().empty() and not impl->skips() and
this->elements() == this->element_space();
}
bool shape::transposed() const
{
if(this->dynamic())
{
return false;
}
if(this->broadcasted())
{
// TODO: Use a filter_iterator instead
......@@ -292,6 +379,10 @@ bool shape::transposed() const
bool shape::broadcasted() const
{
if(this->dynamic())
{
return false;
}
assert(this->lens().size() == this->strides().size());
return std::any_of(
this->strides().begin(), this->strides().end(), [](auto x) { return x == 0; });
......@@ -299,6 +390,10 @@ bool shape::broadcasted() const
bool shape::scalar() const
{
if(this->dynamic())
{
return false;
}
assert(this->lens().size() == this->strides().size());
// if any stride > 0, then accumulate will return false
return this->sub_shapes().empty() and
......@@ -317,6 +412,10 @@ shape shape::normalize_standard() const
shape shape::with_lens(type_t t, const std::vector<std::size_t>& l) const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: with_lens() called on dynamic shape");
}
assert(l.size() == this->lens().size());
auto perm = find_permutation(*this);
return shape::from_permutation(t, l, perm);
......@@ -324,6 +423,10 @@ shape shape::with_lens(type_t t, const std::vector<std::size_t>& l) const
shape shape::with_lens(const std::vector<std::size_t>& l) const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: with_lens() called on dynamic shape");
}
return this->with_lens(this->type(), l);
}
......@@ -338,20 +441,80 @@ std::size_t shape::element_space() const { return impl->element_space(); }
std::string shape::type_string() const { return name(this->type()); }
bool shape::dynamic() const { return not impl->m_dyn_dims.empty(); }
const std::vector<shape::dynamic_dimension>& shape::dyn_dims() const { return impl->m_dyn_dims; }
std::vector<std::size_t> shape::min_lens() const
{
return this->dynamic() ? impl->min_lens() : this->lens();
}
std::vector<std::size_t> shape::max_lens() const
{
return this->dynamic() ? impl->max_lens() : this->lens();
}
std::vector<std::size_t> shape::opt_lens() const
{
return this->dynamic() ? impl->opt_lens() : this->lens();
}
bool shape::dynamic_dimension::is_fixed() const { return this->min == this->max; }
bool shape::dynamic_dimension::has_optimal() const { return opt != 0; }
template <class Self, class F>
auto shape::dynamic_dimension::reflect(Self& self, F f)
{
return pack(f(self.min, "min"), f(self.max, "max"), f(self.opt, "opt"));
}
bool operator==(const shape::dynamic_dimension& x, const shape::dynamic_dimension& y)
{
return (x.min == y.min and x.max == y.max and x.opt == y.opt);
}
bool operator!=(const shape::dynamic_dimension& x, const shape::dynamic_dimension& y)
{
return !(x == y);
}
std::ostream& operator<<(std::ostream& os, const shape::dynamic_dimension& x)
{
os << "[" << x.min << ", " << x.max << ", " << x.opt << "]";
return os;
}
bool operator==(const shape& x, const shape& y)
{
return x.impl == y.impl or (x.type() == y.type() and x.lens() == y.lens() and
x.strides() == y.strides() and x.sub_shapes() == y.sub_shapes());
if(x.dynamic() and y.dynamic())
{
return x.impl == y.impl or (x.type() == y.type() and x.dyn_dims() == y.dyn_dims() and
x.sub_shapes() == y.sub_shapes());
}
return x.impl == y.impl or
(x.dynamic() == y.dynamic() and x.type() == y.type() and x.lens() == y.lens() and
x.strides() == y.strides() and x.sub_shapes() == y.sub_shapes());
}
bool operator!=(const shape& x, const shape& y) { return !(x == y); }
std::ostream& operator<<(std::ostream& os, const shape& x)
{
if(x.sub_shapes().empty())
{
os << x.type_string() << ", ";
os << "{" << to_string_range(x.lens()) << "}, ";
os << "{" << to_string_range(x.strides()) << "}";
if(x.dynamic())
{
os << "dynamic, ";
os << x.type_string() << ", ";
os << "{" << to_string_range(x.dyn_dims()) << "}";
}
else
{
os << x.type_string() << ", ";
os << "{" << to_string_range(x.lens()) << "}, ";
os << "{" << to_string_range(x.strides()) << "}";
}
}
else
{
......@@ -375,12 +538,14 @@ const std::vector<shape>& shape::sub_shapes() const { return impl->m_shapes; }
void migraphx_to_value(value& v, const shape& s)
{
value result;
result["type"] = migraphx::to_value(s.type_string());
result["lens"] = migraphx::to_value(s.lens());
result["strides"] = migraphx::to_value(s.strides());
result["sub_shapes"] = migraphx::to_value(s.sub_shapes());
v = result;
result["type"] = migraphx::to_value(s.type_string());
result["lens"] = migraphx::to_value(s.lens());
result["strides"] = migraphx::to_value(s.strides());
result["sub_shapes"] = migraphx::to_value(s.sub_shapes());
result["dynamic_dimensions"] = migraphx::to_value(s.dyn_dims());
v = result;
}
void migraphx_from_value(const value& v, shape& s)
{
auto t = v.at("type").get_string();
......@@ -390,9 +555,25 @@ void migraphx_from_value(const value& v, shape& s)
}
else
{
s = shape{shape::parse_type(t),
v.at("lens").to_vector<std::size_t>(),
v.at("strides").to_vector<std::size_t>()};
if(v.at("dynamic_dimensions").empty())
{
s = shape{shape::parse_type(t),
v.at("lens").to_vector<std::size_t>(),
v.at("strides").to_vector<std::size_t>()};
}
else
{
auto v_dd = v.at("dynamic_dimensions");
std::vector<shape::dynamic_dimension> dyn_dims(v.at("dynamic_dimensions").size());
std::transform(v_dd.begin(), v_dd.end(), dyn_dims.begin(), [](migraphx::value x) {
auto x_min = x.at("min").template to<size_t>();
auto x_max = x.at("max").template to<size_t>();
auto x_opt = x.at("opt").template to<size_t>();
return shape::dynamic_dimension{x_min, x_max, x_opt};
});
s = shape{shape::parse_type(t), dyn_dims};
}
}
}
......
......@@ -272,7 +272,7 @@ struct find_concat_transpose
{
auto matcher() const
{
return match::name("concat")(match::all_of[match::inputs()](match::transpose_shape()));
return match::name("concat")(match::all_of[match::inputs()](match::name("transpose")));
}
void apply(module& m, const match::matcher_result& mr) const
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/sqlite.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/errors.hpp>
#include <sqlite3.h>
#include <algorithm>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
using sqlite3_ptr = MIGRAPHX_MANAGE_PTR(sqlite3*, sqlite3_close);
struct sqlite_impl
{
sqlite3* get() const { return ptr.get(); }
void open(const fs::path& p, int flags)
{
sqlite3* ptr_tmp = nullptr;
int rc = sqlite3_open_v2(p.string().c_str(), &ptr_tmp, flags, nullptr);
ptr = sqlite3_ptr{ptr_tmp};
if(rc != 0)
MIGRAPHX_THROW("error opening " + p.string() + ": " + error_message());
}
template <class F>
void exec(const char* sql, F f)
{
auto callback = [](void* obj, auto... xs) -> int {
try
{
const auto* g = static_cast<const F*>(obj);
(*g)(xs...);
return 0;
}
catch(...)
{
return -1;
}
};
int rc = sqlite3_exec(get(), sql, callback, &f, nullptr);
if(rc != 0)
MIGRAPHX_THROW(error_message());
}
std::string error_message() const
{
std::string msg = "sqlite3: ";
return msg + sqlite3_errmsg(get());
}
sqlite3_ptr ptr;
};
sqlite sqlite::read(const fs::path& p)
{
sqlite r;
r.impl = std::make_shared<sqlite_impl>();
r.impl->open(p, SQLITE_OPEN_READONLY);
return r;
}
sqlite sqlite::write(const fs::path& p)
{
sqlite r;
r.impl = std::make_shared<sqlite_impl>();
// Using '+' instead of bitwise '|' to avoid compilation warning
r.impl->open(p, SQLITE_OPEN_READWRITE + SQLITE_OPEN_CREATE);
return r;
}
std::vector<std::unordered_map<std::string, std::string>> sqlite::execute(const std::string& s)
{
std::vector<std::unordered_map<std::string, std::string>> result;
impl->exec(s.c_str(), [&](int n, char** texts, char** names) {
std::unordered_map<std::string, std::string> row;
row.reserve(n);
std::transform(
names,
names + n,
texts,
std::inserter(row, row.begin()),
[&](const char* name, const char* text) { return std::make_pair(name, text); });
result.push_back(row);
});
return result;
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/target_assignments.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
void target_assignments::add_assignment(instruction_ref ins, const std::string& target)
{
assignments.emplace(ins, target);
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
add_library(migraphx_fpga
target.cpp
lowering.cpp
subgraph.cpp
vitis_ai_adapter.cpp
)
set_target_properties(migraphx_fpga PROPERTIES EXPORT_NAME fpga)
rocm_set_soversion(migraphx_fpga ${MIGRAPHX_SO_VERSION})
rocm_clang_tidy_check(migraphx_fpga)
target_link_libraries(migraphx_fpga migraphx)
rocm_install_targets(
TARGETS migraphx_fpga
INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/include
)
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_FPGA_CONTEXT_HPP
#define MIGRAPHX_GUARD_FPGA_CONTEXT_HPP
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
struct context
{
int id = 0;
void finish() const {}
};
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_FPGA_CONTEXT_HPP
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_FPGA_LOWERING_HPP
#define MIGRAPHX_GUARD_FPGA_LOWERING_HPP
#include <migraphx/program.hpp>
#include <migraphx/config.hpp>
#include <migraphx/fpga/context.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
struct lowering
{
context* ctx = nullptr;
std::string name() const { return "fpga::lowering"; }
void apply(module& m) const;
};
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_FPGA_LOWERING_HPP
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment