Commit 5ec8f913 authored by Ted Themistokleous's avatar Ted Themistokleous Committed by Ted Themistokleous
Browse files

Merge branch 'develop' into simplify_1_mul_div_ops

parents 32d69e8e d78bcdfb
...@@ -42,7 +42,7 @@ void cal_auto_padding_size(onnx_parser::node_info info, ...@@ -42,7 +42,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
size_t kdims = in_lens.size() - 2; size_t kdims = in_lens.size() - 2;
assert(k_lens.size() == kdims and dilation.size() == kdims); assert(k_lens.size() == kdims and dilation.size() == kdims);
if(!contains(info.attributes, "auto_pad")) if(not contains(info.attributes, "auto_pad"))
{ {
return; return;
} }
...@@ -124,7 +124,7 @@ void tune_padding_size(const value& v, ...@@ -124,7 +124,7 @@ void tune_padding_size(const value& v,
} }
// if padding is symmetric, return directly // if padding is symmetric, return directly
if(!is_asym_padding(padding)) if(not is_asym_padding(padding))
{ {
return; return;
} }
......
...@@ -38,7 +38,7 @@ struct parse_cast : op_parser<parse_cast> ...@@ -38,7 +38,7 @@ struct parse_cast : op_parser<parse_cast>
onnx_parser::node_info info, onnx_parser::node_info info,
const std::vector<instruction_ref>& args) const const std::vector<instruction_ref>& args) const
{ {
if(!contains(info.attributes, "to")) if(not contains(info.attributes, "to"))
{ {
MIGRAPHX_THROW("PARSE_CAST: missing to type attribute!"); MIGRAPHX_THROW("PARSE_CAST: missing to type attribute!");
} }
......
...@@ -93,7 +93,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill> ...@@ -93,7 +93,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill>
} }
else if(input_as_shape == 0) else if(input_as_shape == 0)
{ {
if(!contains(info.attributes, "shape")) if(not contains(info.attributes, "shape"))
{ {
MIGRAPHX_THROW("ConstantFill: attribute output shape is needed"); MIGRAPHX_THROW("ConstantFill: attribute output shape is needed");
} }
......
...@@ -94,7 +94,7 @@ struct parse_gemm : op_parser<parse_gemm> ...@@ -94,7 +94,7 @@ struct parse_gemm : op_parser<parse_gemm>
out_lens.back() = l2->get_shape().lens().back(); out_lens.back() = l2->get_shape().lens().back();
auto l3 = args[2]; auto l3 = args[2];
auto l3_lens = l3->get_shape().lens(); auto l3_lens = l3->get_shape().lens();
if(!std::equal(out_lens.begin(), out_lens.end(), l3_lens.begin(), l3_lens.end())) if(not std::equal(out_lens.begin(), out_lens.end(), l3_lens.begin(), l3_lens.end()))
{ {
l3 = info.add_instruction(make_op("multibroadcast", {{"out_lens", out_lens}}), l3 = info.add_instruction(make_op("multibroadcast", {{"out_lens", out_lens}}),
args[2]); args[2]);
......
...@@ -58,7 +58,6 @@ struct parse_generic_op : op_parser<parse_generic_op> ...@@ -58,7 +58,6 @@ struct parse_generic_op : op_parser<parse_generic_op>
{"Log", "log"}, {"Log", "log"},
{"LRN", "lrn"}, {"LRN", "lrn"},
{"Neg", "neg"}, {"Neg", "neg"},
{"NonMaxSuppression", "nonmaxsuppression"},
{"Reciprocal", "recip"}, {"Reciprocal", "recip"},
{"Relu", "relu"}, {"Relu", "relu"},
{"Round", "round"}, {"Round", "round"},
...@@ -75,7 +74,7 @@ struct parse_generic_op : op_parser<parse_generic_op> ...@@ -75,7 +74,7 @@ struct parse_generic_op : op_parser<parse_generic_op>
bool needs_contiguous(const std::string& op_name) const bool needs_contiguous(const std::string& op_name) const
{ {
return contains({"flatten", "gather", "nonmaxsuppression", "scatter"}, op_name); return contains({"flatten", "gather", "scatter"}, op_name);
} }
instruction_ref parse(const op_desc& opd, instruction_ref parse(const op_desc& opd,
......
...@@ -31,7 +31,7 @@ namespace migraphx { ...@@ -31,7 +31,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace onnx { namespace onnx {
//! Parser for LpNormalization ONNX operator. // Parser for LpNormalization ONNX operator.
/*! /*!
Normalizes a tensor by the L1 or L2 norms along a given axis. Normalizes a tensor by the L1 or L2 norms along a given axis.
Norms that evaluate to 0 are changed to 1 to prevent division by zero. Norms that evaluate to 0 are changed to 1 to prevent division by zero.
......
...@@ -67,7 +67,8 @@ struct parse_matmul : op_parser<parse_matmul> ...@@ -67,7 +67,8 @@ struct parse_matmul : op_parser<parse_matmul>
instruction_ref bl0 = l0; instruction_ref bl0 = l0;
instruction_ref bl1 = l1; instruction_ref bl1 = l1;
if(!std::equal(l0_lens.rbegin() + 2, l0_lens.rend(), l1_lens.rbegin() + 2, l1_lens.rend())) if(not std::equal(
l0_lens.rbegin() + 2, l0_lens.rend(), l1_lens.rbegin() + 2, l1_lens.rend()))
{ {
auto l0_it = l0_lens.begin() + l0_lens.size() - 2; auto l0_it = l0_lens.begin() + l0_lens.size() - 2;
std::vector<std::size_t> l0_broadcasted_lens(l0_lens.begin(), l0_it); std::vector<std::size_t> l0_broadcasted_lens(l0_lens.begin(), l0_it);
......
...@@ -40,9 +40,9 @@ struct parse_mod : op_parser<parse_mod> ...@@ -40,9 +40,9 @@ struct parse_mod : op_parser<parse_mod>
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
std::string mod = "mod"; std::string mod = "mod";
if(is_type_float(args[0]->get_shape().type()) || is_type_float(args[1]->get_shape().type())) if(is_type_float(args[0]->get_shape().type()) or is_type_float(args[1]->get_shape().type()))
{ {
if(!contains(info.attributes, "fmod")) if(not contains(info.attributes, "fmod"))
{ {
MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid"); MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid");
} }
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_nonmaxsuppression : op_parser<parse_nonmaxsuppression>
{
std::vector<op_desc> operators() const { return {{"NonMaxSuppression", "nonmaxsuppression"}}; }
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
auto op = parser.load(opd.op_name, info);
op.from_value({{"use_dyn_output", parser.use_dyn_output}});
return info.add_instruction(op, args);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -37,7 +37,7 @@ static std::vector<std::size_t> nonzero_indices(const std::vector<T>& data) ...@@ -37,7 +37,7 @@ static std::vector<std::size_t> nonzero_indices(const std::vector<T>& data)
std::vector<std::size_t> indices; std::vector<std::size_t> indices;
for(std::size_t i = 0; i < data.size(); ++i) for(std::size_t i = 0; i < data.size(); ++i)
{ {
if(!float_equal(data[i], 0)) if(not float_equal(data[i], 0))
indices.push_back(i); indices.push_back(i);
} }
......
...@@ -160,7 +160,7 @@ struct parse_pad : op_parser<parse_pad> ...@@ -160,7 +160,7 @@ struct parse_pad : op_parser<parse_pad>
if(args.size() == 3) if(args.size() == 3)
{ {
auto val_ins = args.at(2); auto val_ins = args.at(2);
if(!val_ins->can_eval()) if(not val_ins->can_eval())
{ {
MIGRAPHX_THROW("PARSE_PAD: input value must be constant"); MIGRAPHX_THROW("PARSE_PAD: input value must be constant");
} }
......
...@@ -157,7 +157,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -157,7 +157,7 @@ struct parse_pooling : op_parser<parse_pooling>
std::vector<int64_t> slice_end; std::vector<int64_t> slice_end;
tune_padding_size(values, paddings, count_include_pad, slice_start); tune_padding_size(values, paddings, count_include_pad, slice_start);
if(!slice_start.empty()) if(not slice_start.empty())
{ {
// calculate expected output shape // calculate expected output shape
orig_padding.insert(orig_padding.begin() + kdims, 2, 0); orig_padding.insert(orig_padding.begin() + kdims, 2, 0);
...@@ -180,7 +180,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -180,7 +180,7 @@ struct parse_pooling : op_parser<parse_pooling>
op.from_value(values); op.from_value(values);
auto l1 = info.add_instruction(op, l0); auto l1 = info.add_instruction(op, l0);
if(!slice_start.empty()) if(not slice_start.empty())
{ {
std::vector<int64_t> axes(kdims); std::vector<int64_t> axes(kdims);
std::iota(axes.begin(), axes.end(), 2); std::iota(axes.begin(), axes.end(), 2);
......
...@@ -46,7 +46,7 @@ auto compute_type(shape::type_t t1, shape::type_t t2) ...@@ -46,7 +46,7 @@ auto compute_type(shape::type_t t1, shape::type_t t2)
int it1 = t1; int it1 = t1;
int it2 = t2; int it2 = t2;
if(!contains(op_order, it1) or !contains(op_order, it2)) if(not contains(op_order, it1) or not contains(op_order, it2))
{ {
MIGRAPHX_THROW("PARSE_POW: Input data type not supported!"); MIGRAPHX_THROW("PARSE_POW: Input data type not supported!");
} }
......
...@@ -56,7 +56,7 @@ const auto& get_nearest_op(const std::string& mode) ...@@ -56,7 +56,7 @@ const auto& get_nearest_op(const std::string& mode)
return static_cast<std::size_t>(std::ceil((val))); return static_cast<std::size_t>(std::ceil((val)));
}}}; }}};
if(!contains(nearest_ops, mode)) if(not contains(nearest_ops, mode))
{ {
MIGRAPHX_THROW("PARSE_RESIZE: nearest_mode " + mode + " not supported!"); MIGRAPHX_THROW("PARSE_RESIZE: nearest_mode " + mode + " not supported!");
} }
...@@ -86,7 +86,7 @@ const auto& get_original_idx_op(const std::string& mode) ...@@ -86,7 +86,7 @@ const auto& get_original_idx_op(const std::string& mode)
return (idx + 0.5) / scale; return (idx + 0.5) / scale;
}}}; }}};
if(!contains(idx_ops, mode)) if(not contains(idx_ops, mode))
{ {
MIGRAPHX_THROW("PARSE_RESIZE: coordinate_transformation_mode " + mode + " not supported!"); MIGRAPHX_THROW("PARSE_RESIZE: coordinate_transformation_mode " + mode + " not supported!");
} }
......
...@@ -31,7 +31,7 @@ namespace migraphx { ...@@ -31,7 +31,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace onnx { namespace onnx {
//! Parser for ReverseSequence ONNX operator. // Parser for ReverseSequence ONNX operator.
/*! /*!
Reverses the data along the time axis for the batches along the batch axis. Reverses the data along the time axis for the batches along the batch axis.
The sequence lengths can be given to reverse up to the given length for each batch, keeping the The sequence lengths can be given to reverse up to the given length for each batch, keeping the
......
...@@ -29,7 +29,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -29,7 +29,7 @@ inline namespace MIGRAPHX_INLINE_NS {
void memory_coloring::apply(module& m) const void memory_coloring::apply(module& m) const
{ {
if(!enabled(MIGRAPHX_DISABLE_MEMORY_COLORING{})) if(not enabled(MIGRAPHX_DISABLE_MEMORY_COLORING{}))
{ {
memory_coloring_impl opt(&m, allocation_op, verify); memory_coloring_impl opt(&m, allocation_op, verify);
opt.run(); opt.run();
......
...@@ -42,7 +42,7 @@ void memory_coloring_impl::run() ...@@ -42,7 +42,7 @@ void memory_coloring_impl::run()
{ {
MIGRAPHX_DEBUG(dump_intervals()); MIGRAPHX_DEBUG(dump_intervals());
// Coloring // Coloring
while(!alloc_queue.empty()) while(not alloc_queue.empty())
{ {
interval_ptr interval = alloc_queue.top(); interval_ptr interval = alloc_queue.top();
allocate(interval); allocate(interval);
...@@ -96,7 +96,7 @@ bool memory_coloring_impl::allocate(interval_ptr interval) ...@@ -96,7 +96,7 @@ bool memory_coloring_impl::allocate(interval_ptr interval)
} }
std::size_t offset = 0; std::size_t offset = 0;
while(!conflict_queue.empty()) while(not conflict_queue.empty())
{ {
live_range* range = conflict_queue.top(); live_range* range = conflict_queue.top();
std::size_t iter_offset = range->offset; std::size_t iter_offset = range->offset;
...@@ -149,7 +149,7 @@ void memory_coloring_impl::build() ...@@ -149,7 +149,7 @@ void memory_coloring_impl::build()
{ {
def_interval = instr2_live[p_iter]; def_interval = instr2_live[p_iter];
bool is_lit = is_literal(iter); bool is_lit = is_literal(iter);
if(is_allocate(iter) || is_lit) if(is_allocate(iter) or is_lit)
{ {
live_range& range = def_interval->segment; live_range& range = def_interval->segment;
def_interval->result = iter->get_shape(); def_interval->result = iter->get_shape();
...@@ -157,12 +157,12 @@ void memory_coloring_impl::build() ...@@ -157,12 +157,12 @@ void memory_coloring_impl::build()
range.begin = cur_points; range.begin = cur_points;
def_interval->def_point = cur_points; def_interval->def_point = cur_points;
range.size = (iter->get_shape()).bytes(); range.size = (iter->get_shape()).bytes();
if(!is_lit || unify_literals) if(not is_lit or unify_literals)
alloc_queue.push(def_interval); alloc_queue.push(def_interval);
live_set.erase(range.vn); live_set.erase(range.vn);
} }
} }
else if(!is_param(iter) && !is_outline(iter) && !is_check_context(iter)) else if(not is_param(iter) && not is_outline(iter) && not is_check_context(iter))
{ {
is_dead = true; is_dead = true;
} }
...@@ -179,7 +179,7 @@ void memory_coloring_impl::build() ...@@ -179,7 +179,7 @@ void memory_coloring_impl::build()
if(not p_mod->has_instruction(arg)) if(not p_mod->has_instruction(arg))
continue; continue;
if(is_param(arg) || is_outline(arg)) if(is_param(arg) or is_outline(arg))
{ {
if(is_output_param(arg)) if(is_output_param(arg))
is_dead = false; is_dead = false;
...@@ -235,7 +235,7 @@ void memory_coloring_impl::rewrite() ...@@ -235,7 +235,7 @@ void memory_coloring_impl::rewrite()
if(interval->get_begin() == invalid_offset) if(interval->get_begin() == invalid_offset)
continue; continue;
if(!unify_literals && interval->is_literal) if(not unify_literals && interval->is_literal)
continue; continue;
std::size_t offset = 0; std::size_t offset = 0;
...@@ -272,7 +272,7 @@ void memory_coloring_impl::verify() ...@@ -272,7 +272,7 @@ void memory_coloring_impl::verify()
if(segment.begin == invalid_offset) if(segment.begin == invalid_offset)
{ {
// if(!interval.is_live_on_entry) // if(not interval.is_live_on_entry)
// MIGRAPHX_THROW("interval is not live on entry"); // MIGRAPHX_THROW("interval is not live on entry");
continue; continue;
} }
...@@ -290,7 +290,7 @@ void memory_coloring_impl::verify() ...@@ -290,7 +290,7 @@ void memory_coloring_impl::verify()
live_range* range = live_ranges[iter]; live_range* range = live_ranges[iter];
if(range->offset == invalid_offset) if(range->offset == invalid_offset)
continue; continue;
if(!is_disjoin(*range, segment)) if(not is_disjoin(*range, segment))
MIGRAPHX_THROW("range and segment is not disjoined"); MIGRAPHX_THROW("range and segment is not disjoined");
} }
} }
......
...@@ -125,11 +125,11 @@ struct memory_coloring_impl ...@@ -125,11 +125,11 @@ struct memory_coloring_impl
static bool is_disjoin(const live_range& range1, const live_range& range2) static bool is_disjoin(const live_range& range1, const live_range& range2)
{ {
if((range1.size == 0) || (range2.size == 0)) if((range1.size == 0) or (range2.size == 0))
return false; return false;
auto end1 = range1.offset + range1.size - 1; auto end1 = range1.offset + range1.size - 1;
auto end2 = range2.offset + range2.size - 1; auto end2 = range2.offset + range2.size - 1;
return ((end1 < range2.offset) || (end2 < range1.offset)); return ((end1 < range2.offset) or (end2 < range1.offset));
} }
void verify(); void verify();
#ifdef MIGRAPHX_DEBUG_OPT #ifdef MIGRAPHX_DEBUG_OPT
......
...@@ -60,7 +60,7 @@ std::vector<std::size_t> calc_dyn_auto_pad(std::vector<std::size_t> tensor_lens, ...@@ -60,7 +60,7 @@ std::vector<std::size_t> calc_dyn_auto_pad(std::vector<std::size_t> tensor_lens,
{ {
std::vector<std::size_t> padding; std::vector<std::size_t> padding;
padding.resize(2 * k_lens.size()); padding.resize(2 * k_lens.size());
for(size_t i = 0; i < padding.size() / 2; i++) for(std::size_t i = 0; i < padding.size() / 2; i++)
{ {
std::ptrdiff_t input_dim = tensor_lens[i]; std::ptrdiff_t input_dim = tensor_lens[i];
std::ptrdiff_t stride = strides[i]; std::ptrdiff_t stride = strides[i];
......
...@@ -50,7 +50,7 @@ int exec(const std::string& cmd, const std::function<void(const char*)>& std_out ...@@ -50,7 +50,7 @@ int exec(const std::string& cmd, const std::function<void(const char*)>& std_out
{ {
// TODO: Use execve instead of popen // TODO: Use execve instead of popen
std::unique_ptr<FILE, decltype(closer)> pipe(popen(cmd.c_str(), "r"), closer); // NOLINT std::unique_ptr<FILE, decltype(closer)> pipe(popen(cmd.c_str(), "r"), closer); // NOLINT
if(!pipe) if(not pipe)
MIGRAPHX_THROW("popen() failed: " + cmd); MIGRAPHX_THROW("popen() failed: " + cmd);
std::array<char, 128> buffer; std::array<char, 128> buffer;
while(fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) while(fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment