Commit 9d2cdf25 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into develop

parents 66d50268 ed2c73ac
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define MIGRAPHX_GUARD_MIGRAPHX_ASSIGNMENT_HPP #define MIGRAPHX_GUARD_MIGRAPHX_ASSIGNMENT_HPP
#include <unordered_map> #include <unordered_map>
#include <string>
#include <migraphx/instruction_ref.hpp> #include <migraphx/instruction_ref.hpp>
......
...@@ -67,7 +67,7 @@ struct tensor_view ...@@ -67,7 +67,7 @@ struct tensor_view
const shape& get_shape() const { return this->m_shape; } const shape& get_shape() const { return this->m_shape; }
bool empty() const { return m_data == nullptr || m_shape.lens().empty(); } bool empty() const { return m_data == nullptr or m_shape.lens().empty(); }
std::size_t size() const { return m_shape.elements(); } std::size_t size() const { return m_shape.elements(); }
...@@ -109,37 +109,37 @@ struct tensor_view ...@@ -109,37 +109,37 @@ struct tensor_view
T& operator[](std::size_t i) T& operator[](std::size_t i)
{ {
assert(!this->empty() && i < this->size()); assert(not this->empty() && i < this->size());
return m_data[m_shape.index(i)]; return m_data[m_shape.index(i)];
} }
const T& operator[](std::size_t i) const const T& operator[](std::size_t i) const
{ {
assert(!this->empty() && i < this->size()); assert(not this->empty() && i < this->size());
return m_data[m_shape.index(i)]; return m_data[m_shape.index(i)];
} }
T& front() T& front()
{ {
assert(!this->empty()); assert(not this->empty());
return m_data[0]; return m_data[0];
} }
const T& front() const const T& front() const
{ {
assert(!this->empty()); assert(not this->empty());
return m_data[0]; return m_data[0];
} }
T& back() T& back()
{ {
assert(!this->empty()); assert(not this->empty());
return m_data[m_shape.index(this->size() - 1)]; return m_data[m_shape.index(this->size() - 1)];
} }
const T& back() const const T& back() const
{ {
assert(!this->empty()); assert(not this->empty());
return m_data[m_shape.index(this->size() - 1)]; return m_data[m_shape.index(this->size() - 1)];
} }
...@@ -159,7 +159,7 @@ struct tensor_view ...@@ -159,7 +159,7 @@ struct tensor_view
friend std::ostream& operator<<(std::ostream& os, const tensor_view<T>& x) friend std::ostream& operator<<(std::ostream& os, const tensor_view<T>& x)
{ {
if(!x.empty()) if(not x.empty())
{ {
os << as_number(x.front()); os << as_number(x.front());
for(std::size_t i = 1; i < x.m_shape.elements(); i++) for(std::size_t i = 1; i < x.m_shape.elements(); i++)
...@@ -182,7 +182,7 @@ bool operator==(const tensor_view<T>& x, const tensor_view<U>& y) ...@@ -182,7 +182,7 @@ bool operator==(const tensor_view<T>& x, const tensor_view<U>& y)
{ {
for(std::size_t i = 0; i < x.get_shape().elements(); i++) for(std::size_t i = 0; i < x.get_shape().elements(); i++)
{ {
if(!float_equal(x[i], y[i])) if(not float_equal(x[i], y[i]))
return false; return false;
} }
return true; return true;
...@@ -193,7 +193,7 @@ bool operator==(const tensor_view<T>& x, const tensor_view<U>& y) ...@@ -193,7 +193,7 @@ bool operator==(const tensor_view<T>& x, const tensor_view<U>& y)
template <class T, class U> template <class T, class U>
bool operator!=(const tensor_view<T>& x, const tensor_view<U>& y) bool operator!=(const tensor_view<T>& x, const tensor_view<U>& y)
{ {
return !(x == y); return not(x == y);
} }
template <class T> template <class T>
......
...@@ -34,7 +34,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -34,7 +34,7 @@ inline namespace MIGRAPHX_INLINE_NS {
inline int tune_axis(const int n_dim, const int axis, const std::string& op_name = "OPERATOR") inline int tune_axis(const int n_dim, const int axis, const std::string& op_name = "OPERATOR")
{ {
if(axis >= n_dim || std::abs(axis) > n_dim) if(axis >= n_dim or std::abs(axis) > n_dim)
{ {
MIGRAPHX_THROW(to_upper(op_name) + ": axis is out of range."); MIGRAPHX_THROW(to_upper(op_name) + ": axis is out of range.");
} }
......
...@@ -176,13 +176,13 @@ bool operator==(const instruction& x, const instruction& y) ...@@ -176,13 +176,13 @@ bool operator==(const instruction& x, const instruction& y)
return true; return true;
} }
bool operator!=(const instruction& x, const instruction& y) { return !(x == y); } bool operator!=(const instruction& x, const instruction& y) { return not(x == y); }
bool operator==(instruction_ref ref, const instruction& i) { return i == ref; } bool operator==(instruction_ref ref, const instruction& i) { return i == ref; }
bool operator!=(const instruction& i, instruction_ref ref) { return !(i == ref); } bool operator!=(const instruction& i, instruction_ref ref) { return not(i == ref); }
bool operator!=(instruction_ref ref, const instruction& i) { return !(i == ref); } bool operator!=(instruction_ref ref, const instruction& i) { return not(i == ref); }
void instruction::add_output(instruction_ref ins) void instruction::add_output(instruction_ref ins)
{ {
...@@ -361,7 +361,7 @@ void instruction::print(std::ostream& os, ...@@ -361,7 +361,7 @@ void instruction::print(std::ostream& os,
os << "{" << ins->get_literal() << "}"; os << "{" << ins->get_literal() << "}";
} }
if(!ins->inputs().empty()) if(not ins->inputs().empty())
{ {
char delim = '('; char delim = '(';
for(auto&& arg : ins->inputs()) for(auto&& arg : ins->inputs())
...@@ -374,7 +374,7 @@ void instruction::print(std::ostream& os, ...@@ -374,7 +374,7 @@ void instruction::print(std::ostream& os,
} }
// print module inputs // print module inputs
if(!ins->module_inputs().empty()) if(not ins->module_inputs().empty())
{ {
std::string delim = ", ["; std::string delim = ", [";
for(auto&& mod_arg : ins->module_inputs()) for(auto&& mod_arg : ins->module_inputs())
...@@ -446,7 +446,7 @@ operation instruction::normalized_operator() const ...@@ -446,7 +446,7 @@ operation instruction::normalized_operator() const
if(this->need_normalization()) if(this->need_normalization())
{ {
auto s = this->inputs().front()->get_shape(); auto s = this->inputs().front()->get_shape();
if(!normalize_attributes(o, s.max_lens())) if(not normalize_attributes(o, s.max_lens()))
return this->get_operator(); return this->get_operator();
} }
return o; return o;
......
...@@ -141,12 +141,12 @@ void module::set_bypass(bool b) { impl->bypass = b; } ...@@ -141,12 +141,12 @@ void module::set_bypass(bool b) { impl->bypass = b; }
void module::assign(const module& m) void module::assign(const module& m)
{ {
// copy the impl // copy the impl
if(!impl) if(not impl)
impl = std::make_unique<module_impl>(); impl = std::make_unique<module_impl>();
*impl = *m.impl; *impl = *m.impl;
// clear instructions // clear instructions
if(!impl->instructions.empty()) if(not impl->instructions.empty())
{ {
impl->clear(); impl->clear();
} }
...@@ -346,7 +346,7 @@ instruction_ref module::replace_instruction(instruction_ref ins, instruction_ref ...@@ -346,7 +346,7 @@ instruction_ref module::replace_instruction(instruction_ref ins, instruction_ref
assert(out->valid(begin())); assert(out->valid(begin()));
} }
// Replacement should not be dead code unless its the last instruction // Replacement should not be dead code unless its the last instruction
assert(!rep->outputs().empty() or rep == std::prev(end())); assert(not rep->outputs().empty() or rep == std::prev(end()));
// Output of the original instruction should only be the replacement or empty // Output of the original instruction should only be the replacement or empty
assert(ins->outputs().empty() or std::all_of(ins->outputs().begin(), assert(ins->outputs().empty() or std::all_of(ins->outputs().begin(),
ins->outputs().end(), ins->outputs().end(),
...@@ -598,7 +598,7 @@ instruction_ref module::validate() const ...@@ -598,7 +598,7 @@ instruction_ref module::validate() const
auto inputs = i.inputs(); auto inputs = i.inputs();
bool check_order = std::all_of( bool check_order = std::all_of(
inputs.begin(), inputs.end(), [&](auto in) { return has_instruction(in); }); inputs.begin(), inputs.end(), [&](auto in) { return has_instruction(in); });
return !i.valid(impl->instructions.begin(), check_order); return not i.valid(impl->instructions.begin(), check_order);
}); });
} }
...@@ -754,7 +754,7 @@ void module::print_graph(std::ostream& os, bool brief) const ...@@ -754,7 +754,7 @@ void module::print_graph(std::ostream& os, bool brief) const
label = to_string(ins->get_operator()); label = to_string(ins->get_operator());
os << "\t" << enclose_name(ins_names.at(ins)) << "[label=" << enclose_name(label) << "]"; os << "\t" << enclose_name(ins_names.at(ins)) << "[label=" << enclose_name(label) << "]";
os << ";" << std::endl; os << ";" << std::endl;
if(!ins->inputs().empty()) if(not ins->inputs().empty())
{ {
for(auto&& arg : ins->inputs()) for(auto&& arg : ins->inputs())
{ {
...@@ -908,7 +908,7 @@ module& module::sort() ...@@ -908,7 +908,7 @@ module& module::sort()
this->move_instruction(ins, this->begin()); this->move_instruction(ins, this->begin());
for(auto child : ins->inputs()) for(auto child : ins->inputs())
{ {
if(!contains(this->impl->instructions, child)) if(not contains(this->impl->instructions, child))
{ {
continue; continue;
} }
......
...@@ -79,14 +79,14 @@ auto tune_attribute(const std::vector<int64_t>& vec, ...@@ -79,14 +79,14 @@ auto tune_attribute(const std::vector<int64_t>& vec,
{ {
if(contains(vec_attrs, op::normalize_attribute::include_max)) if(contains(vec_attrs, op::normalize_attribute::include_max))
{ {
if(!std::equal(result.begin(), result.end(), max_vals.begin(), std::less_equal<>{})) if(not std::equal(result.begin(), result.end(), max_vals.begin(), std::less_equal<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: value out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: value out of range!");
} }
} }
else else
{ {
if(!std::equal(result.begin(), result.end(), max_vals.begin(), std::less<>{})) if(not std::equal(result.begin(), result.end(), max_vals.begin(), std::less<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: value out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: value out of range!");
} }
...@@ -118,14 +118,15 @@ auto tune_attribute(const std::vector<int64_t>& vec, ...@@ -118,14 +118,15 @@ auto tune_attribute(const std::vector<int64_t>& vec,
{ {
if(contains(vec_attrs, op::normalize_attribute::include_min)) if(contains(vec_attrs, op::normalize_attribute::include_min))
{ {
if(!std::equal(min_vals.begin(), min_vals.end(), result.begin(), std::less_equal<>{})) if(not std::equal(
min_vals.begin(), min_vals.end(), result.begin(), std::less_equal<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!");
} }
} }
else else
{ {
if(!std::equal(result.begin(), result.end(), min_vals.begin(), std::less<>{})) if(not std::equal(result.begin(), result.end(), min_vals.begin(), std::less<>{}))
{ {
MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!"); MIGRAPHX_THROW("TUNE_VECTOR: attribute out of range!");
} }
...@@ -174,7 +175,7 @@ bool normalize_attributes(operation& op, const std::vector<std::size_t>& lens) ...@@ -174,7 +175,7 @@ bool normalize_attributes(operation& op, const std::vector<std::size_t>& lens)
tuned = true; tuned = true;
} }
} }
if(!attrs.contains("normalize_axes")) if(not attrs.contains("normalize_axes"))
{ {
return tuned; return tuned;
} }
......
...@@ -187,7 +187,7 @@ operation onnx_parser::load(const std::string& name, const node_info& info) cons ...@@ -187,7 +187,7 @@ operation onnx_parser::load(const std::string& name, const node_info& info) cons
void onnx_parser::parse_undefined(module* mod, const std::string& name) void onnx_parser::parse_undefined(module* mod, const std::string& name)
{ {
if(!contains(instructions, name)) if(not contains(instructions, name))
{ {
auto ins = mod->add_instruction(make_op("undefined")); auto ins = mod->add_instruction(make_op("undefined"));
instructions[name] = ins; instructions[name] = ins;
...@@ -267,7 +267,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph) ...@@ -267,7 +267,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
{ {
const std::string& name = input.name(); const std::string& name = input.name();
// input not in initializer_data, so it is a real input // input not in initializer_data, so it is a real input
if(!contains(mod_insts, name)) if(not contains(mod_insts, name))
{ {
// ONNX specification does not specify how to deal with the // ONNX specification does not specify how to deal with the
// scenario that a nested subgraph contains a parameter with the // scenario that a nested subgraph contains a parameter with the
...@@ -354,7 +354,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph) ...@@ -354,7 +354,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
all_output_names.begin(), all_output_names.begin(),
all_output_names.end(), all_output_names.end(),
std::back_inserter(prog_output_names), std::back_inserter(prog_output_names),
[&](const auto& name) { return !(name.empty() or instructions.count(name) == 0); }); [&](const auto& name) { return not(name.empty() or instructions.count(name) == 0); });
std::vector<instruction_ref> output_ins; std::vector<instruction_ref> output_ins;
std::transform(prog_output_names.begin(), std::transform(prog_output_names.begin(),
...@@ -444,7 +444,7 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t, ...@@ -444,7 +444,7 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t,
const std::vector<std::size_t>& input_dims) const const std::vector<std::size_t>& input_dims) const
{ {
shape::type_t shape_type = get_type(t.tensor_type().elem_type()); shape::type_t shape_type = get_type(t.tensor_type().elem_type());
if(!input_dims.empty()) if(not input_dims.empty())
{ {
return {shape_type, input_dims}; return {shape_type, input_dims};
} }
...@@ -511,7 +511,7 @@ shape::type_t get_type(int dtype) ...@@ -511,7 +511,7 @@ shape::type_t get_type(int dtype)
bool is_type_float(shape::type_t dtype) bool is_type_float(shape::type_t dtype)
{ {
bool r = false; bool r = false;
if(dtype == shape::float_type || dtype == shape::double_type || dtype == shape::half_type) if(dtype == shape::float_type or dtype == shape::double_type or dtype == shape::half_type)
{ {
r = true; r = true;
} }
......
...@@ -42,7 +42,7 @@ void cal_auto_padding_size(onnx_parser::node_info info, ...@@ -42,7 +42,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
size_t kdims = in_lens.size() - 2; size_t kdims = in_lens.size() - 2;
assert(k_lens.size() == kdims and dilation.size() == kdims); assert(k_lens.size() == kdims and dilation.size() == kdims);
if(!contains(info.attributes, "auto_pad")) if(not contains(info.attributes, "auto_pad"))
{ {
return; return;
} }
...@@ -124,7 +124,7 @@ void tune_padding_size(const value& v, ...@@ -124,7 +124,7 @@ void tune_padding_size(const value& v,
} }
// if padding is symmetric, return directly // if padding is symmetric, return directly
if(!is_asym_padding(padding)) if(not is_asym_padding(padding))
{ {
return; return;
} }
......
...@@ -38,7 +38,7 @@ struct parse_cast : op_parser<parse_cast> ...@@ -38,7 +38,7 @@ struct parse_cast : op_parser<parse_cast>
onnx_parser::node_info info, onnx_parser::node_info info,
const std::vector<instruction_ref>& args) const const std::vector<instruction_ref>& args) const
{ {
if(!contains(info.attributes, "to")) if(not contains(info.attributes, "to"))
{ {
MIGRAPHX_THROW("PARSE_CAST: missing to type attribute!"); MIGRAPHX_THROW("PARSE_CAST: missing to type attribute!");
} }
......
...@@ -93,7 +93,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill> ...@@ -93,7 +93,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill>
} }
else if(input_as_shape == 0) else if(input_as_shape == 0)
{ {
if(!contains(info.attributes, "shape")) if(not contains(info.attributes, "shape"))
{ {
MIGRAPHX_THROW("ConstantFill: attribute output shape is needed"); MIGRAPHX_THROW("ConstantFill: attribute output shape is needed");
} }
......
...@@ -94,7 +94,7 @@ struct parse_gemm : op_parser<parse_gemm> ...@@ -94,7 +94,7 @@ struct parse_gemm : op_parser<parse_gemm>
out_lens.back() = l2->get_shape().lens().back(); out_lens.back() = l2->get_shape().lens().back();
auto l3 = args[2]; auto l3 = args[2];
auto l3_lens = l3->get_shape().lens(); auto l3_lens = l3->get_shape().lens();
if(!std::equal(out_lens.begin(), out_lens.end(), l3_lens.begin(), l3_lens.end())) if(not std::equal(out_lens.begin(), out_lens.end(), l3_lens.begin(), l3_lens.end()))
{ {
l3 = info.add_instruction(make_op("multibroadcast", {{"out_lens", out_lens}}), l3 = info.add_instruction(make_op("multibroadcast", {{"out_lens", out_lens}}),
args[2]); args[2]);
......
...@@ -31,7 +31,7 @@ namespace migraphx { ...@@ -31,7 +31,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace onnx { namespace onnx {
//! Parser for LpNormalization ONNX operator. // Parser for LpNormalization ONNX operator.
/*! /*!
Normalizes a tensor by the L1 or L2 norms along a given axis. Normalizes a tensor by the L1 or L2 norms along a given axis.
Norms that evaluate to 0 are changed to 1 to prevent division by zero. Norms that evaluate to 0 are changed to 1 to prevent division by zero.
......
...@@ -67,7 +67,8 @@ struct parse_matmul : op_parser<parse_matmul> ...@@ -67,7 +67,8 @@ struct parse_matmul : op_parser<parse_matmul>
instruction_ref bl0 = l0; instruction_ref bl0 = l0;
instruction_ref bl1 = l1; instruction_ref bl1 = l1;
if(!std::equal(l0_lens.rbegin() + 2, l0_lens.rend(), l1_lens.rbegin() + 2, l1_lens.rend())) if(not std::equal(
l0_lens.rbegin() + 2, l0_lens.rend(), l1_lens.rbegin() + 2, l1_lens.rend()))
{ {
auto l0_it = l0_lens.begin() + l0_lens.size() - 2; auto l0_it = l0_lens.begin() + l0_lens.size() - 2;
std::vector<std::size_t> l0_broadcasted_lens(l0_lens.begin(), l0_it); std::vector<std::size_t> l0_broadcasted_lens(l0_lens.begin(), l0_it);
......
...@@ -40,9 +40,9 @@ struct parse_mod : op_parser<parse_mod> ...@@ -40,9 +40,9 @@ struct parse_mod : op_parser<parse_mod>
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
std::string mod = "mod"; std::string mod = "mod";
if(is_type_float(args[0]->get_shape().type()) || is_type_float(args[1]->get_shape().type())) if(is_type_float(args[0]->get_shape().type()) or is_type_float(args[1]->get_shape().type()))
{ {
if(!contains(info.attributes, "fmod")) if(not contains(info.attributes, "fmod"))
{ {
MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid"); MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid");
} }
......
...@@ -37,7 +37,7 @@ static std::vector<std::size_t> nonzero_indices(const std::vector<T>& data) ...@@ -37,7 +37,7 @@ static std::vector<std::size_t> nonzero_indices(const std::vector<T>& data)
std::vector<std::size_t> indices; std::vector<std::size_t> indices;
for(std::size_t i = 0; i < data.size(); ++i) for(std::size_t i = 0; i < data.size(); ++i)
{ {
if(!float_equal(data[i], 0)) if(not float_equal(data[i], 0))
indices.push_back(i); indices.push_back(i);
} }
......
...@@ -160,7 +160,7 @@ struct parse_pad : op_parser<parse_pad> ...@@ -160,7 +160,7 @@ struct parse_pad : op_parser<parse_pad>
if(args.size() == 3) if(args.size() == 3)
{ {
auto val_ins = args.at(2); auto val_ins = args.at(2);
if(!val_ins->can_eval()) if(not val_ins->can_eval())
{ {
MIGRAPHX_THROW("PARSE_PAD: input value must be constant"); MIGRAPHX_THROW("PARSE_PAD: input value must be constant");
} }
......
...@@ -157,7 +157,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -157,7 +157,7 @@ struct parse_pooling : op_parser<parse_pooling>
std::vector<int64_t> slice_end; std::vector<int64_t> slice_end;
tune_padding_size(values, paddings, count_include_pad, slice_start); tune_padding_size(values, paddings, count_include_pad, slice_start);
if(!slice_start.empty()) if(not slice_start.empty())
{ {
// calculate expected output shape // calculate expected output shape
orig_padding.insert(orig_padding.begin() + kdims, 2, 0); orig_padding.insert(orig_padding.begin() + kdims, 2, 0);
...@@ -180,7 +180,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -180,7 +180,7 @@ struct parse_pooling : op_parser<parse_pooling>
op.from_value(values); op.from_value(values);
auto l1 = info.add_instruction(op, l0); auto l1 = info.add_instruction(op, l0);
if(!slice_start.empty()) if(not slice_start.empty())
{ {
std::vector<int64_t> axes(kdims); std::vector<int64_t> axes(kdims);
std::iota(axes.begin(), axes.end(), 2); std::iota(axes.begin(), axes.end(), 2);
......
...@@ -46,7 +46,7 @@ auto compute_type(shape::type_t t1, shape::type_t t2) ...@@ -46,7 +46,7 @@ auto compute_type(shape::type_t t1, shape::type_t t2)
int it1 = t1; int it1 = t1;
int it2 = t2; int it2 = t2;
if(!contains(op_order, it1) or !contains(op_order, it2)) if(not contains(op_order, it1) or not contains(op_order, it2))
{ {
MIGRAPHX_THROW("PARSE_POW: Input data type not supported!"); MIGRAPHX_THROW("PARSE_POW: Input data type not supported!");
} }
......
...@@ -56,7 +56,7 @@ const auto& get_nearest_op(const std::string& mode) ...@@ -56,7 +56,7 @@ const auto& get_nearest_op(const std::string& mode)
return static_cast<std::size_t>(std::ceil((val))); return static_cast<std::size_t>(std::ceil((val)));
}}}; }}};
if(!contains(nearest_ops, mode)) if(not contains(nearest_ops, mode))
{ {
MIGRAPHX_THROW("PARSE_RESIZE: nearest_mode " + mode + " not supported!"); MIGRAPHX_THROW("PARSE_RESIZE: nearest_mode " + mode + " not supported!");
} }
...@@ -86,7 +86,7 @@ const auto& get_original_idx_op(const std::string& mode) ...@@ -86,7 +86,7 @@ const auto& get_original_idx_op(const std::string& mode)
return (idx + 0.5) / scale; return (idx + 0.5) / scale;
}}}; }}};
if(!contains(idx_ops, mode)) if(not contains(idx_ops, mode))
{ {
MIGRAPHX_THROW("PARSE_RESIZE: coordinate_transformation_mode " + mode + " not supported!"); MIGRAPHX_THROW("PARSE_RESIZE: coordinate_transformation_mode " + mode + " not supported!");
} }
......
...@@ -31,7 +31,7 @@ namespace migraphx { ...@@ -31,7 +31,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace onnx { namespace onnx {
//! Parser for ReverseSequence ONNX operator. // Parser for ReverseSequence ONNX operator.
/*! /*!
Reverses the data along the time axis for the batches along the batch axis. Reverses the data along the time axis for the batches along the batch axis.
The sequence lengths can be given to reverse up to the given length for each batch, keeping the The sequence lengths can be given to reverse up to the given length for each batch, keeping the
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment