Commit 3a4d36cf authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents 6bec381f e19f78ae
...@@ -64,5 +64,10 @@ operation make_op_from_value(const std::string& name, const value& v) ...@@ -64,5 +64,10 @@ operation make_op_from_value(const std::string& name, const value& v)
}); });
} }
operation make_json_op(const std::string& name, const std::string& s)
{
return make_op(name, from_json_string(convert_to_json(s)));
}
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -141,12 +141,12 @@ void module::set_bypass(bool b) { impl->bypass = b; } ...@@ -141,12 +141,12 @@ void module::set_bypass(bool b) { impl->bypass = b; }
void module::assign(const module& m) void module::assign(const module& m)
{ {
// copy the impl // copy the impl
if(!impl) if(not impl)
impl = std::make_unique<module_impl>(); impl = std::make_unique<module_impl>();
*impl = *m.impl; *impl = *m.impl;
// clear instructions // clear instructions
if(!impl->instructions.empty()) if(not impl->instructions.empty())
{ {
impl->clear(); impl->clear();
} }
...@@ -346,7 +346,7 @@ instruction_ref module::replace_instruction(instruction_ref ins, instruction_ref ...@@ -346,7 +346,7 @@ instruction_ref module::replace_instruction(instruction_ref ins, instruction_ref
assert(out->valid(begin())); assert(out->valid(begin()));
} }
// Replacement should not be dead code unless its the last instruction // Replacement should not be dead code unless its the last instruction
assert(!rep->outputs().empty() or rep == std::prev(end())); assert(not rep->outputs().empty() or rep == std::prev(end()));
// Output of the original instruction should only be the replacement or empty // Output of the original instruction should only be the replacement or empty
assert(ins->outputs().empty() or std::all_of(ins->outputs().begin(), assert(ins->outputs().empty() or std::all_of(ins->outputs().begin(),
ins->outputs().end(), ins->outputs().end(),
...@@ -385,9 +385,13 @@ instruction_ref module::move_instruction(instruction_ref src, instruction_ref ds ...@@ -385,9 +385,13 @@ instruction_ref module::move_instruction(instruction_ref src, instruction_ref ds
instruction_ref module::move_instructions(instruction_ref src, instruction_ref dst) instruction_ref module::move_instructions(instruction_ref src, instruction_ref dst)
{ {
this->move_instruction(src, dst);
for(auto ins : src->inputs()) for(auto ins : src->inputs())
this->move_instruction(ins, src); {
if(not contains(this->impl->instructions, ins))
continue;
this->move_instructions(ins, dst);
}
this->move_instruction(src, dst);
return src; return src;
} }
...@@ -598,7 +602,7 @@ instruction_ref module::validate() const ...@@ -598,7 +602,7 @@ instruction_ref module::validate() const
auto inputs = i.inputs(); auto inputs = i.inputs();
bool check_order = std::all_of( bool check_order = std::all_of(
inputs.begin(), inputs.end(), [&](auto in) { return has_instruction(in); }); inputs.begin(), inputs.end(), [&](auto in) { return has_instruction(in); });
return !i.valid(impl->instructions.begin(), check_order); return not i.valid(impl->instructions.begin(), check_order);
}); });
} }
...@@ -754,7 +758,7 @@ void module::print_graph(std::ostream& os, bool brief) const ...@@ -754,7 +758,7 @@ void module::print_graph(std::ostream& os, bool brief) const
label = to_string(ins->get_operator()); label = to_string(ins->get_operator());
os << "\t" << enclose_name(ins_names.at(ins)) << "[label=" << enclose_name(label) << "]"; os << "\t" << enclose_name(ins_names.at(ins)) << "[label=" << enclose_name(label) << "]";
os << ";" << std::endl; os << ";" << std::endl;
if(!ins->inputs().empty()) if(not ins->inputs().empty())
{ {
for(auto&& arg : ins->inputs()) for(auto&& arg : ins->inputs())
{ {
...@@ -788,12 +792,15 @@ static std::string cpp_var_name(const std::string& name) ...@@ -788,12 +792,15 @@ static std::string cpp_var_name(const std::string& name)
static void print_make_op(std::ostream& os, const operation& op) static void print_make_op(std::ostream& os, const operation& op)
{ {
os << "migraphx::make_op(" << enclose_name(op.name());
auto v = op.to_value(); auto v = op.to_value();
if(not v.empty()) if(not v.empty())
{ {
os << ", " os << "migraphx::make_json_op(" << enclose_name(op.name());
<< "migraphx::from_json_string(" << enclose_name(to_json_string(v)) << ")"; os << ", " << enclose_name(to_json_string(v));
}
else
{
os << "migraphx::make_op(" << enclose_name(op.name());
} }
os << ")"; os << ")";
} }
...@@ -905,7 +912,7 @@ module& module::sort() ...@@ -905,7 +912,7 @@ module& module::sort()
this->move_instruction(ins, this->begin()); this->move_instruction(ins, this->begin());
for(auto child : ins->inputs()) for(auto child : ins->inputs())
{ {
if(!contains(this->impl->instructions, child)) if(not contains(this->impl->instructions, child))
{ {
continue; continue;
} }
......
...@@ -79,14 +79,14 @@ auto tune_attribute(const std::vector<int64_t>& vec, ...@@ -79,14 +79,14 @@ auto tune_attribute(const std::vector<int64_t>& vec,
{ {
if(contains(vec_attrs, op::normalize_attribute::include_max)) if(contains(vec_attrs, op::normalize_attribute::include_max))
{ {
if(!std::equal(result.begin(), result.end(), max_vals.begin(), std::less_equal<>{})) if(not std::equal(result.begin(), result.end(), max_vals.begin(), std::less_equal<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: value out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: value out of range!");
} }
} }
else else
{ {
if(!std::equal(result.begin(), result.end(), max_vals.begin(), std::less<>{})) if(not std::equal(result.begin(), result.end(), max_vals.begin(), std::less<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: value out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: value out of range!");
} }
...@@ -118,14 +118,15 @@ auto tune_attribute(const std::vector<int64_t>& vec, ...@@ -118,14 +118,15 @@ auto tune_attribute(const std::vector<int64_t>& vec,
{ {
if(contains(vec_attrs, op::normalize_attribute::include_min)) if(contains(vec_attrs, op::normalize_attribute::include_min))
{ {
if(!std::equal(min_vals.begin(), min_vals.end(), result.begin(), std::less_equal<>{})) if(not std::equal(
min_vals.begin(), min_vals.end(), result.begin(), std::less_equal<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!");
} }
} }
else else
{ {
if(!std::equal(result.begin(), result.end(), min_vals.begin(), std::less<>{})) if(not std::equal(result.begin(), result.end(), min_vals.begin(), std::less<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!");
} }
...@@ -174,7 +175,7 @@ bool normalize_attributes(operation& op, const std::vector<std::size_t>& lens) ...@@ -174,7 +175,7 @@ bool normalize_attributes(operation& op, const std::vector<std::size_t>& lens)
tuned = true; tuned = true;
} }
} }
if(!attrs.contains("normalize_axes")) if(not attrs.contains("normalize_axes"))
{ {
return tuned; return tuned;
} }
......
...@@ -97,6 +97,7 @@ struct onnx_parser ...@@ -97,6 +97,7 @@ struct onnx_parser
shape::dynamic_dimension default_dyn_dim_value = {1, 1, 0}; shape::dynamic_dimension default_dyn_dim_value = {1, 1, 0};
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims; std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims;
std::unordered_map<std::string, std::vector<shape::dynamic_dimension>> map_dyn_input_dims; std::unordered_map<std::string, std::vector<shape::dynamic_dimension>> map_dyn_input_dims;
bool use_dyn_output = false;
bool skip_unknown_operators = false; bool skip_unknown_operators = false;
int64_t max_loop_iterations = 10; int64_t max_loop_iterations = 10;
int64_t opset_version = 13; int64_t opset_version = 13;
......
...@@ -60,8 +60,14 @@ program parse_onnx_from(const onnx_options& options, Ts&&... xs) ...@@ -60,8 +60,14 @@ program parse_onnx_from(const onnx_options& options, Ts&&... xs)
{ {
parser.default_dyn_dim_value = options.default_dyn_dim_value; parser.default_dyn_dim_value = options.default_dyn_dim_value;
} }
if(not options.map_input_dims.empty() and not options.map_dyn_input_dims.empty())
{
MIGRAPHX_THROW("PARSE_ONNX_FROM: both map_input_dims and map_dyn_input_dims non-empty, only"
"one should be used");
}
parser.skip_unknown_operators = options.skip_unknown_operators; parser.skip_unknown_operators = options.skip_unknown_operators;
parser.max_loop_iterations = options.max_loop_iterations; parser.max_loop_iterations = options.max_loop_iterations;
parser.use_dyn_output = options.use_dyn_output;
if(options.print_program_on_error) if(options.print_program_on_error)
{ {
...@@ -80,6 +86,7 @@ program parse_onnx_from(const onnx_options& options, Ts&&... xs) ...@@ -80,6 +86,7 @@ program parse_onnx_from(const onnx_options& options, Ts&&... xs)
{ {
parser.parse_from(std::forward<Ts>(xs)...); parser.parse_from(std::forward<Ts>(xs)...);
} }
return std::move(parser.prog); return std::move(parser.prog);
} }
......
...@@ -187,7 +187,7 @@ operation onnx_parser::load(const std::string& name, const node_info& info) cons ...@@ -187,7 +187,7 @@ operation onnx_parser::load(const std::string& name, const node_info& info) cons
void onnx_parser::parse_undefined(module* mod, const std::string& name) void onnx_parser::parse_undefined(module* mod, const std::string& name)
{ {
if(!contains(instructions, name)) if(not contains(instructions, name))
{ {
auto ins = mod->add_instruction(make_op("undefined")); auto ins = mod->add_instruction(make_op("undefined"));
instructions[name] = ins; instructions[name] = ins;
...@@ -256,11 +256,6 @@ int64_t onnx_parser::get_opset_version(const onnx::ModelProto& model) ...@@ -256,11 +256,6 @@ int64_t onnx_parser::get_opset_version(const onnx::ModelProto& model)
void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph) void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
{ {
if(not map_input_dims.empty() and not map_dyn_input_dims.empty())
{
MIGRAPHX_THROW("PARSE_GRAPH: both map_input_dims and map_dyn_input_dims non-empty, only"
"one should be used");
}
std::unordered_map<std::string, instruction_ref> mod_insts; std::unordered_map<std::string, instruction_ref> mod_insts;
for(auto&& f : graph.initializer()) for(auto&& f : graph.initializer())
{ {
...@@ -272,7 +267,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph) ...@@ -272,7 +267,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
{ {
const std::string& name = input.name(); const std::string& name = input.name();
// input not in initializer_data, so it is a real input // input not in initializer_data, so it is a real input
if(!contains(mod_insts, name)) if(not contains(mod_insts, name))
{ {
// ONNX specification does not specify how to deal with the // ONNX specification does not specify how to deal with the
// scenario that a nested subgraph contains a parameter with the // scenario that a nested subgraph contains a parameter with the
...@@ -359,7 +354,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph) ...@@ -359,7 +354,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
all_output_names.begin(), all_output_names.begin(),
all_output_names.end(), all_output_names.end(),
std::back_inserter(prog_output_names), std::back_inserter(prog_output_names),
[&](const auto& name) { return !(name.empty() or instructions.count(name) == 0); }); [&](const auto& name) { return not(name.empty() or instructions.count(name) == 0); });
std::vector<instruction_ref> output_ins; std::vector<instruction_ref> output_ins;
std::transform(prog_output_names.begin(), std::transform(prog_output_names.begin(),
...@@ -449,7 +444,7 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t, ...@@ -449,7 +444,7 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t,
const std::vector<std::size_t>& input_dims) const const std::vector<std::size_t>& input_dims) const
{ {
shape::type_t shape_type = get_type(t.tensor_type().elem_type()); shape::type_t shape_type = get_type(t.tensor_type().elem_type());
if(!input_dims.empty()) if(not input_dims.empty())
{ {
return {shape_type, input_dims}; return {shape_type, input_dims};
} }
...@@ -516,7 +511,7 @@ shape::type_t get_type(int dtype) ...@@ -516,7 +511,7 @@ shape::type_t get_type(int dtype)
bool is_type_float(shape::type_t dtype) bool is_type_float(shape::type_t dtype)
{ {
bool r = false; bool r = false;
if(dtype == shape::float_type || dtype == shape::double_type || dtype == shape::half_type) if(dtype == shape::float_type or dtype == shape::double_type or dtype == shape::half_type)
{ {
r = true; r = true;
} }
......
...@@ -42,7 +42,7 @@ void cal_auto_padding_size(onnx_parser::node_info info, ...@@ -42,7 +42,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
size_t kdims = in_lens.size() - 2; size_t kdims = in_lens.size() - 2;
assert(k_lens.size() == kdims and dilation.size() == kdims); assert(k_lens.size() == kdims and dilation.size() == kdims);
if(!contains(info.attributes, "auto_pad")) if(not contains(info.attributes, "auto_pad"))
{ {
return; return;
} }
...@@ -124,7 +124,7 @@ void tune_padding_size(const value& v, ...@@ -124,7 +124,7 @@ void tune_padding_size(const value& v,
} }
// if padding is symmetric, return directly // if padding is symmetric, return directly
if(!is_asym_padding(padding)) if(not is_asym_padding(padding))
{ {
return; return;
} }
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <migraphx/onnx/op_parser.hpp> #include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp> #include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp> #include <migraphx/instruction.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -36,28 +36,63 @@ struct parse_batchnorm : op_parser<parse_batchnorm> ...@@ -36,28 +36,63 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
instruction_ref parse(const op_desc& /*opd*/, instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& parser, const onnx_parser& parser,
onnx_parser::node_info info, const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const std::vector<instruction_ref> args) const
{ {
float epsilon = 1e-5f; float epsilon = 1e-5f;
float momentum = 0.9f;
op::batch_norm_inference::bn_infer_mode_t bn_mode = op::batch_norm_inference::spatial;
if(contains(info.attributes, "epsilon")) if(contains(info.attributes, "epsilon"))
{ {
epsilon = parser.parse_value(info.attributes.at("epsilon")).at<float>(); epsilon = parser.parse_value(info.attributes.at("epsilon")).at<float>();
} }
if(contains(info.attributes, "momentum")) auto x_lens = args[0]->get_shape().lens();
auto x_type = args[0]->get_shape().type();
if(std::any_of(args.cbegin() + 1, args.cend(), [](auto a) {
return a->get_shape().lens().size() != 1;
}))
{
MIGRAPHX_THROW("PARSE_BATCHNORM: argument scale, bias, mean, or var rank != 1");
}
if(x_lens.size() == 1)
{
auto rt = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {0.5}});
auto eps = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {epsilon}});
auto n0 = info.add_broadcastable_binary_op("sub", args[0], args[3]);
auto d0 = info.add_broadcastable_binary_op("add", args[4], eps);
auto d1 = info.add_broadcastable_binary_op("pow", d0, rt);
auto div0 = info.add_broadcastable_binary_op("div", n0, d1);
auto r0 = info.add_broadcastable_binary_op("mul", div0, args[1]);
return info.add_broadcastable_binary_op("add", r0, args[2]);
}
else if(x_lens.size() > 2)
{ {
momentum = parser.parse_value(info.attributes.at("momentum")).at<float>(); // unsqueeze tensors of shape (C) to broadcast correctly
std::vector<int64_t> unsqueeze_axes(x_lens.size() - 2);
std::iota(unsqueeze_axes.begin(), unsqueeze_axes.end(), 1);
auto rt = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {0.5}});
auto eps = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {epsilon}});
auto scale_unsqueeze = info.add_instruction(
migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), args[1]);
auto bias_unsqueeze = info.add_instruction(
migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), args[2]);
auto mean_unsqueeze = info.add_instruction(
migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), args[3]);
auto var_unsqueeze = info.add_instruction(
migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), args[4]);
auto numer = info.add_broadcastable_binary_op("sub", args[0], mean_unsqueeze);
auto var_eps = info.add_broadcastable_binary_op("add", var_unsqueeze, eps);
auto denom = info.add_broadcastable_binary_op("pow", var_eps, rt);
auto div0 = info.add_broadcastable_binary_op("div", numer, denom);
auto r0 = info.add_broadcastable_binary_op("mul", div0, scale_unsqueeze);
return info.add_broadcastable_binary_op("add", r0, bias_unsqueeze);
} }
if(contains(info.attributes, "spatial")) else
{ {
bn_mode = (parser.parse_value(info.attributes.at("spatial")).at<uint64_t>() > 0) // num dims either 0 or 2
? op::batch_norm_inference::spatial MIGRAPHX_THROW("PARSE_BATCHNORM: rank " + std::to_string(x_lens.size()) +
: op::batch_norm_inference::per_activation; " input tensor, unhandled data format");
} }
op::batch_norm_inference op{epsilon, momentum, bn_mode};
return info.add_instruction(op, args);
} }
}; };
......
...@@ -38,7 +38,7 @@ struct parse_cast : op_parser<parse_cast> ...@@ -38,7 +38,7 @@ struct parse_cast : op_parser<parse_cast>
onnx_parser::node_info info, onnx_parser::node_info info,
const std::vector<instruction_ref>& args) const const std::vector<instruction_ref>& args) const
{ {
if(!contains(info.attributes, "to")) if(not contains(info.attributes, "to"))
{ {
MIGRAPHX_THROW("PARSE_CAST: missing to type attribute!"); MIGRAPHX_THROW("PARSE_CAST: missing to type attribute!");
} }
......
...@@ -93,7 +93,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill> ...@@ -93,7 +93,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill>
} }
else if(input_as_shape == 0) else if(input_as_shape == 0)
{ {
if(!contains(info.attributes, "shape")) if(not contains(info.attributes, "shape"))
{ {
MIGRAPHX_THROW("ConstantFill: attribute output shape is needed"); MIGRAPHX_THROW("ConstantFill: attribute output shape is needed");
} }
......
...@@ -94,7 +94,7 @@ struct parse_gemm : op_parser<parse_gemm> ...@@ -94,7 +94,7 @@ struct parse_gemm : op_parser<parse_gemm>
out_lens.back() = l2->get_shape().lens().back(); out_lens.back() = l2->get_shape().lens().back();
auto l3 = args[2]; auto l3 = args[2];
auto l3_lens = l3->get_shape().lens(); auto l3_lens = l3->get_shape().lens();
if(!std::equal(out_lens.begin(), out_lens.end(), l3_lens.begin(), l3_lens.end())) if(not std::equal(out_lens.begin(), out_lens.end(), l3_lens.begin(), l3_lens.end()))
{ {
l3 = info.add_instruction(make_op("multibroadcast", {{"out_lens", out_lens}}), l3 = info.add_instruction(make_op("multibroadcast", {{"out_lens", out_lens}}),
args[2]); args[2]);
......
...@@ -58,7 +58,6 @@ struct parse_generic_op : op_parser<parse_generic_op> ...@@ -58,7 +58,6 @@ struct parse_generic_op : op_parser<parse_generic_op>
{"Log", "log"}, {"Log", "log"},
{"LRN", "lrn"}, {"LRN", "lrn"},
{"Neg", "neg"}, {"Neg", "neg"},
{"NonMaxSuppression", "nonmaxsuppression"},
{"Reciprocal", "recip"}, {"Reciprocal", "recip"},
{"Relu", "relu"}, {"Relu", "relu"},
{"Round", "round"}, {"Round", "round"},
...@@ -75,7 +74,7 @@ struct parse_generic_op : op_parser<parse_generic_op> ...@@ -75,7 +74,7 @@ struct parse_generic_op : op_parser<parse_generic_op>
bool needs_contiguous(const std::string& op_name) const bool needs_contiguous(const std::string& op_name) const
{ {
return contains({"flatten", "gather", "nonmaxsuppression", "scatter"}, op_name); return contains({"flatten", "gather", "scatter"}, op_name);
} }
instruction_ref parse(const op_desc& opd, instruction_ref parse(const op_desc& opd,
......
...@@ -31,7 +31,7 @@ namespace migraphx { ...@@ -31,7 +31,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace onnx { namespace onnx {
//! Parser for LpNormalization ONNX operator. // Parser for LpNormalization ONNX operator.
/*! /*!
Normalizes a tensor by the L1 or L2 norms along a given axis. Normalizes a tensor by the L1 or L2 norms along a given axis.
Norms that evaluate to 0 are changed to 1 to prevent division by zero. Norms that evaluate to 0 are changed to 1 to prevent division by zero.
......
...@@ -67,7 +67,8 @@ struct parse_matmul : op_parser<parse_matmul> ...@@ -67,7 +67,8 @@ struct parse_matmul : op_parser<parse_matmul>
instruction_ref bl0 = l0; instruction_ref bl0 = l0;
instruction_ref bl1 = l1; instruction_ref bl1 = l1;
if(!std::equal(l0_lens.rbegin() + 2, l0_lens.rend(), l1_lens.rbegin() + 2, l1_lens.rend())) if(not std::equal(
l0_lens.rbegin() + 2, l0_lens.rend(), l1_lens.rbegin() + 2, l1_lens.rend()))
{ {
auto l0_it = l0_lens.begin() + l0_lens.size() - 2; auto l0_it = l0_lens.begin() + l0_lens.size() - 2;
std::vector<std::size_t> l0_broadcasted_lens(l0_lens.begin(), l0_it); std::vector<std::size_t> l0_broadcasted_lens(l0_lens.begin(), l0_it);
......
...@@ -40,9 +40,9 @@ struct parse_mod : op_parser<parse_mod> ...@@ -40,9 +40,9 @@ struct parse_mod : op_parser<parse_mod>
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
std::string mod = "mod"; std::string mod = "mod";
if(is_type_float(args[0]->get_shape().type()) || is_type_float(args[1]->get_shape().type())) if(is_type_float(args[0]->get_shape().type()) or is_type_float(args[1]->get_shape().type()))
{ {
if(!contains(info.attributes, "fmod")) if(not contains(info.attributes, "fmod"))
{ {
MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid"); MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid");
} }
......
...@@ -21,22 +21,29 @@ ...@@ -21,22 +21,29 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#ifndef MIGRAPHX_GUARD_RTGLIB_ACOS_HPP #include <migraphx/onnx/op_parser.hpp>
#define MIGRAPHX_GUARD_RTGLIB_ACOS_HPP #include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/acos.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace onnx {
struct hip_acos : unary_device<hip_acos, device::acos> struct parse_nonmaxsuppression : op_parser<parse_nonmaxsuppression>
{ {
std::vector<op_desc> operators() const { return {{"NonMaxSuppression", "nonmaxsuppression"}}; }
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
auto op = parser.load(opd.op_name, info);
op.from_value({{"use_dyn_output", parser.use_dyn_output}});
return info.add_instruction(op, args);
}
}; };
} // namespace gpu } // namespace onnx
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
#endif
...@@ -37,7 +37,7 @@ static std::vector<std::size_t> nonzero_indices(const std::vector<T>& data) ...@@ -37,7 +37,7 @@ static std::vector<std::size_t> nonzero_indices(const std::vector<T>& data)
std::vector<std::size_t> indices; std::vector<std::size_t> indices;
for(std::size_t i = 0; i < data.size(); ++i) for(std::size_t i = 0; i < data.size(); ++i)
{ {
if(!float_equal(data[i], 0)) if(not float_equal(data[i], 0))
indices.push_back(i); indices.push_back(i);
} }
......
...@@ -160,7 +160,7 @@ struct parse_pad : op_parser<parse_pad> ...@@ -160,7 +160,7 @@ struct parse_pad : op_parser<parse_pad>
if(args.size() == 3) if(args.size() == 3)
{ {
auto val_ins = args.at(2); auto val_ins = args.at(2);
if(!val_ins->can_eval()) if(not val_ins->can_eval())
{ {
MIGRAPHX_THROW("PARSE_PAD: input value must be constant"); MIGRAPHX_THROW("PARSE_PAD: input value must be constant");
} }
......
...@@ -157,7 +157,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -157,7 +157,7 @@ struct parse_pooling : op_parser<parse_pooling>
std::vector<int64_t> slice_end; std::vector<int64_t> slice_end;
tune_padding_size(values, paddings, count_include_pad, slice_start); tune_padding_size(values, paddings, count_include_pad, slice_start);
if(!slice_start.empty()) if(not slice_start.empty())
{ {
// calculate expected output shape // calculate expected output shape
orig_padding.insert(orig_padding.begin() + kdims, 2, 0); orig_padding.insert(orig_padding.begin() + kdims, 2, 0);
...@@ -180,7 +180,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -180,7 +180,7 @@ struct parse_pooling : op_parser<parse_pooling>
op.from_value(values); op.from_value(values);
auto l1 = info.add_instruction(op, l0); auto l1 = info.add_instruction(op, l0);
if(!slice_start.empty()) if(not slice_start.empty())
{ {
std::vector<int64_t> axes(kdims); std::vector<int64_t> axes(kdims);
std::iota(axes.begin(), axes.end(), 2); std::iota(axes.begin(), axes.end(), 2);
......
...@@ -46,7 +46,7 @@ auto compute_type(shape::type_t t1, shape::type_t t2) ...@@ -46,7 +46,7 @@ auto compute_type(shape::type_t t1, shape::type_t t2)
int it1 = t1; int it1 = t1;
int it2 = t2; int it2 = t2;
if(!contains(op_order, it1) or !contains(op_order, it2)) if(not contains(op_order, it1) or not contains(op_order, it2))
{ {
MIGRAPHX_THROW("PARSE_POW: Input data type not supported!"); MIGRAPHX_THROW("PARSE_POW: Input data type not supported!");
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment