"git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "6d86ff54da8572f44e09cd16d21d7a9d3b5fd4c9"
Unverified Commit d37a4df9 authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Enable cppcheck rule for 'not', 'or' keywords (#1361)

Using not and or improves readability. The cppcheck rule will help ensure we are doing it consistently.
parent 794a4335
...@@ -47,12 +47,12 @@ void rewrite_pooling::apply(module& m) const ...@@ -47,12 +47,12 @@ void rewrite_pooling::apply(module& m) const
if(not s.standard()) if(not s.standard())
continue; continue;
auto&& op = any_cast<op::pooling>(ins->get_operator()); auto&& op = any_cast<op::pooling>(ins->get_operator());
if(!std::all_of(op.padding.begin(), op.padding.end(), [](auto i) { return i == 0; })) if(not std::all_of(op.padding.begin(), op.padding.end(), [](auto i) { return i == 0; }))
continue; continue;
if(!std::all_of(op.stride.begin(), op.stride.end(), [](auto i) { return i == 1; })) if(not std::all_of(op.stride.begin(), op.stride.end(), [](auto i) { return i == 1; }))
continue; continue;
auto lens = s.lens(); auto lens = s.lens();
if(!std::equal(lens.begin() + 2, lens.end(), op.lengths.begin(), op.lengths.end())) if(not std::equal(lens.begin() + 2, lens.end(), op.lengths.begin(), op.lengths.end()))
continue; continue;
std::int64_t n = s.lens()[0]; std::int64_t n = s.lens()[0];
std::int64_t c = s.lens()[1]; std::int64_t c = s.lens()[1];
......
...@@ -214,7 +214,7 @@ void rewrite_rnn::apply_vanilla_rnn(module& m, instruction_ref ins) const ...@@ -214,7 +214,7 @@ void rewrite_rnn::apply_vanilla_rnn(module& m, instruction_ref ins) const
ih = m.add_literal(migraphx::literal{ih_shape, data}); ih = m.add_literal(migraphx::literal{ih_shape, data});
} }
if(!is_forward and variable_seq_len) if(not is_forward and variable_seq_len)
{ {
args[0] = args[0] =
m.insert_instruction(ins, make_op("rnn_var_sl_shift_sequence"), args[0], seq_lens); m.insert_instruction(ins, make_op("rnn_var_sl_shift_sequence"), args[0], seq_lens);
...@@ -520,7 +520,7 @@ void rewrite_rnn::apply_gru(module& m, instruction_ref ins) const ...@@ -520,7 +520,7 @@ void rewrite_rnn::apply_gru(module& m, instruction_ref ins) const
ih = m.add_literal(migraphx::literal{ih_shape, data}); ih = m.add_literal(migraphx::literal{ih_shape, data});
} }
if(!is_forward and variable_seq_len) if(not is_forward and variable_seq_len)
{ {
args[0] = args[0] =
m.insert_instruction(ins, make_op("rnn_var_sl_shift_sequence"), args[0], seq_lens); m.insert_instruction(ins, make_op("rnn_var_sl_shift_sequence"), args[0], seq_lens);
...@@ -977,7 +977,7 @@ void rewrite_rnn::apply_lstm(module& m, instruction_ref ins) const ...@@ -977,7 +977,7 @@ void rewrite_rnn::apply_lstm(module& m, instruction_ref ins) const
pph = args[7]; pph = args[7];
} }
if(!is_forward and variable_seq_len) if(not is_forward and variable_seq_len)
{ {
args[0] = args[0] =
m.insert_instruction(ins, make_op("rnn_var_sl_shift_sequence"), args[0], seq_lens); m.insert_instruction(ins, make_op("rnn_var_sl_shift_sequence"), args[0], seq_lens);
...@@ -1294,11 +1294,11 @@ bool rewrite_rnn::is_variable_seq_lens(const module& m, instruction_ref seq_lens ...@@ -1294,11 +1294,11 @@ bool rewrite_rnn::is_variable_seq_lens(const module& m, instruction_ref seq_lens
std::vector<int64_t> vec_lens; std::vector<int64_t> vec_lens;
arg_lens.visit([&](auto l) { vec_lens.assign(l.begin(), l.end()); }); arg_lens.visit([&](auto l) { vec_lens.assign(l.begin(), l.end()); });
int64_t l = 0; int64_t l = 0;
if(!vec_lens.empty()) if(not vec_lens.empty())
{ {
l = vec_lens[0]; l = vec_lens[0];
} }
if(!std::all_of(vec_lens.begin(), vec_lens.end(), [&](auto v) { return v == l; })) if(not std::all_of(vec_lens.begin(), vec_lens.end(), [&](auto v) { return v == l; }))
{ {
is_var_lens = true; is_var_lens = true;
} }
...@@ -1318,7 +1318,7 @@ rewrite_rnn::get_seq_len(const module& m, instruction_ref input, instruction_ref ...@@ -1318,7 +1318,7 @@ rewrite_rnn::get_seq_len(const module& m, instruction_ref input, instruction_ref
bool is_var_lens = is_variable_seq_lens(m, seq_lens); bool is_var_lens = is_variable_seq_lens(m, seq_lens);
auto input_shape = input->get_shape(); auto input_shape = input->get_shape();
auto length = input_shape.lens()[0]; auto length = input_shape.lens()[0];
if(!is_var_lens and seq_lens != m.end()) if(not is_var_lens and seq_lens != m.end())
{ {
auto arg_len = seq_lens->eval(); auto arg_len = seq_lens->eval();
std::vector<std::size_t> vec_lens; std::vector<std::size_t> vec_lens;
...@@ -1387,7 +1387,7 @@ void rewrite_rnn::replace_last_cell_output(module& m, ...@@ -1387,7 +1387,7 @@ void rewrite_rnn::replace_last_cell_output(module& m,
if(variable_seq_len) if(variable_seq_len)
{ {
if(!ins_outputs.empty()) if(not ins_outputs.empty())
{ {
cell_outputs = m.insert_instruction( cell_outputs = m.insert_instruction(
std::next(ins), std::next(ins),
......
...@@ -477,7 +477,7 @@ bool operator==(const shape::dynamic_dimension& x, const shape::dynamic_dimensio ...@@ -477,7 +477,7 @@ bool operator==(const shape::dynamic_dimension& x, const shape::dynamic_dimensio
bool operator!=(const shape::dynamic_dimension& x, const shape::dynamic_dimension& y) bool operator!=(const shape::dynamic_dimension& x, const shape::dynamic_dimension& y)
{ {
return !(x == y); return not(x == y);
} }
std::ostream& operator<<(std::ostream& os, const shape::dynamic_dimension& x) std::ostream& operator<<(std::ostream& os, const shape::dynamic_dimension& x)
{ {
...@@ -497,7 +497,7 @@ bool operator==(const shape& x, const shape& y) ...@@ -497,7 +497,7 @@ bool operator==(const shape& x, const shape& y)
x.strides() == y.strides() and x.sub_shapes() == y.sub_shapes()); x.strides() == y.strides() and x.sub_shapes() == y.sub_shapes());
} }
bool operator!=(const shape& x, const shape& y) { return !(x == y); } bool operator!=(const shape& x, const shape& y) { return not(x == y); }
std::ostream& operator<<(std::ostream& os, const shape& x) std::ostream& operator<<(std::ostream& os, const shape& x)
{ {
......
...@@ -787,7 +787,7 @@ MIGRAPHX_PRED_MATCHER(horiz_conv_dot, instruction_ref ins) ...@@ -787,7 +787,7 @@ MIGRAPHX_PRED_MATCHER(horiz_conv_dot, instruction_ref ins)
}; };
auto dots = std::count_if(ins->outputs().begin(), ins->outputs().end(), pred("dot")); auto dots = std::count_if(ins->outputs().begin(), ins->outputs().end(), pred("dot"));
auto convs = std::count_if(ins->outputs().begin(), ins->outputs().end(), pred("convolution")); auto convs = std::count_if(ins->outputs().begin(), ins->outputs().end(), pred("convolution"));
return !(dots < 2 and convs < 2); return not(dots < 2 and convs < 2);
} }
struct find_conv_dot_horiz_fusion struct find_conv_dot_horiz_fusion
...@@ -969,7 +969,7 @@ struct find_split_reshape ...@@ -969,7 +969,7 @@ struct find_split_reshape
// all outputs are reshape and of the same shape // all outputs are reshape and of the same shape
auto dims = any_cast<op::reshape>(rsp->get_operator()).dims; auto dims = any_cast<op::reshape>(rsp->get_operator()).dims;
if(!same_ops(vec_rsp)) if(not same_ops(vec_rsp))
{ {
return; return;
} }
...@@ -1052,7 +1052,7 @@ struct find_split_transpose ...@@ -1052,7 +1052,7 @@ struct find_split_transpose
// all transpose are the same // all transpose are the same
auto perm = any_cast<op::transpose>(trans->get_operator()).dims; auto perm = any_cast<op::transpose>(trans->get_operator()).dims;
if(!same_ops(vec_trans)) if(not same_ops(vec_trans))
{ {
return; return;
} }
......
...@@ -99,7 +99,7 @@ struct find_reshaper ...@@ -99,7 +99,7 @@ struct find_reshaper
std::vector<instruction_ref> reshapes{ins}; std::vector<instruction_ref> reshapes{ins};
while(is_reshaper(reshapes.back())) while(is_reshaper(reshapes.back()))
{ {
assert(!reshapes.back()->inputs().empty()); assert(not reshapes.back()->inputs().empty());
assert(m.has_instruction(reshapes.back()->inputs().front())); assert(m.has_instruction(reshapes.back()->inputs().front()));
auto input = reshapes.back()->inputs().front(); auto input = reshapes.back()->inputs().front();
reshapes.push_back(input); reshapes.push_back(input);
...@@ -288,7 +288,7 @@ struct find_concat_transpose ...@@ -288,7 +288,7 @@ struct find_concat_transpose
auto permutation = find_permutation(s); auto permutation = find_permutation(s);
// permutation should be the same for all inputs // permutation should be the same for all inputs
if(!std::all_of(trans_inputs.begin(), trans_inputs.end(), [&](auto in) { if(not std::all_of(trans_inputs.begin(), trans_inputs.end(), [&](auto in) {
return (find_permutation(in->get_shape()) == permutation); return (find_permutation(in->get_shape()) == permutation);
})) }))
{ {
......
...@@ -49,7 +49,7 @@ struct dnnl_binary : dnnl_op<dnnl_binary, dnnl::binary> ...@@ -49,7 +49,7 @@ struct dnnl_binary : dnnl_op<dnnl_binary, dnnl::binary>
auto s0 = inputs.at(0); auto s0 = inputs.at(0);
auto s1 = inputs.at(1); auto s1 = inputs.at(1);
auto r = s0; auto r = s0;
if(s0 != s1 or !s0.packed()) if(s0 != s1 or not s0.packed())
{ {
r = shape{s0.type(), s0.lens()}; r = shape{s0.type(), s0.lens()};
} }
......
...@@ -95,7 +95,7 @@ void subgraph::apply(module_pass_manager& mpm) const ...@@ -95,7 +95,7 @@ void subgraph::apply(module_pass_manager& mpm) const
for(auto it : iterator_for(mod)) for(auto it : iterator_for(mod))
{ {
// assuming we want all the params/literals as inputs to the FPGA submodule // assuming we want all the params/literals as inputs to the FPGA submodule
if(migraphx::starts_with(it->name(), "@param") || if(migraphx::starts_with(it->name(), "@param") or
migraphx::starts_with(it->name(), "@literal")) migraphx::starts_with(it->name(), "@literal"))
{ {
literal_inputs.push_back(it); literal_inputs.push_back(it);
......
...@@ -131,7 +131,7 @@ struct hip_array ...@@ -131,7 +131,7 @@ struct hip_array
friend MIGRAPHX_DEVICE_CONSTEXPR bool operator!=(const hip_array& x, const hip_array& y) friend MIGRAPHX_DEVICE_CONSTEXPR bool operator!=(const hip_array& x, const hip_array& y)
{ {
return !(x == y); return not(x == y);
} }
// This uses the product order rather than lexical order // This uses the product order rather than lexical order
friend MIGRAPHX_DEVICE_CONSTEXPR bool operator<(const hip_array& x, const hip_array& y) friend MIGRAPHX_DEVICE_CONSTEXPR bool operator<(const hip_array& x, const hip_array& y)
......
...@@ -117,12 +117,13 @@ template <class V, class F, class... Ts> ...@@ -117,12 +117,13 @@ template <class V, class F, class... Ts>
void hip_visit_all_impl(const shape& s, F f, V&& v, Ts&&... xs) void hip_visit_all_impl(const shape& s, F f, V&& v, Ts&&... xs)
{ {
std::initializer_list<migraphx::shape::type_t> types = {get_shape(xs).type()...}; std::initializer_list<migraphx::shape::type_t> types = {get_shape(xs).type()...};
if(!std::all_of( if(not std::all_of(
types.begin(), types.end(), [&](migraphx::shape::type_t t) { return t == s.type(); })) types.begin(), types.end(), [&](migraphx::shape::type_t t) { return t == s.type(); }))
MIGRAPHX_THROW("Types must be the same"); MIGRAPHX_THROW("Types must be the same");
std::initializer_list<index_int> ranks = { std::initializer_list<index_int> ranks = {
static_cast<index_int>(get_shape(xs).lens().size())...}; static_cast<index_int>(get_shape(xs).lens().size())...};
if(!std::all_of(ranks.begin(), ranks.end(), [&](index_int r) { return r == s.lens().size(); })) if(not std::all_of(
ranks.begin(), ranks.end(), [&](index_int r) { return r == s.lens().size(); }))
MIGRAPHX_THROW("Ranks must be the same"); MIGRAPHX_THROW("Ranks must be the same");
visit_tensor_size(s.lens().size(), [&](auto ndim) { visit_tensor_size(s.lens().size(), [&](auto ndim) {
s.visit_type(hip_visitor([&](auto as) { v(f(xs, ndim, as)...); })); s.visit_type(hip_visitor([&](auto as) { v(f(xs, ndim, as)...); }));
...@@ -134,7 +135,8 @@ void hip_visit_views_impl(const shape& s, F f, V&& v, Ts&&... xs) ...@@ -134,7 +135,8 @@ void hip_visit_views_impl(const shape& s, F f, V&& v, Ts&&... xs)
{ {
std::initializer_list<index_int> ranks = { std::initializer_list<index_int> ranks = {
static_cast<index_int>(get_shape(xs).lens().size())...}; static_cast<index_int>(get_shape(xs).lens().size())...};
if(!std::all_of(ranks.begin(), ranks.end(), [&](index_int r) { return r == s.lens().size(); })) if(not std::all_of(
ranks.begin(), ranks.end(), [&](index_int r) { return r == s.lens().size(); }))
MIGRAPHX_THROW("Ranks must be the same"); MIGRAPHX_THROW("Ranks must be the same");
visit_tensor_size(s.lens().size(), [&](auto ndim) { v(f(xs, ndim)...); }); visit_tensor_size(s.lens().size(), [&](auto ndim) { v(f(xs, ndim)...); });
} }
......
...@@ -47,7 +47,7 @@ constexpr Iterator upper_bound(Iterator first, Iterator last, const T& value) ...@@ -47,7 +47,7 @@ constexpr Iterator upper_bound(Iterator first, Iterator last, const T& value)
it = first; it = first;
step = count / 2; step = count / 2;
std::advance(it, step); std::advance(it, step);
if(!(value < *it)) if(not(value < *it))
{ {
first = ++it; first = ++it;
count -= step + 1; count -= step + 1;
......
...@@ -112,7 +112,7 @@ void gemm_impl(context& ctx, ...@@ -112,7 +112,7 @@ void gemm_impl(context& ctx,
bool compute_fp32) bool compute_fp32)
{ {
const bool is_3inputs = (args.size() == 4); const bool is_3inputs = (args.size() == 4);
if(!is_3inputs) if(not is_3inputs)
{ {
beta = 0; beta = 0;
} }
......
...@@ -163,7 +163,7 @@ constexpr Iterator1 search(Iterator1 first, Iterator1 last, Iterator2 s_first, I ...@@ -163,7 +163,7 @@ constexpr Iterator1 search(Iterator1 first, Iterator1 last, Iterator2 s_first, I
{ {
return last; return last;
} }
if(!(*it == *s_it)) if(not(*it == *s_it))
{ {
break; break;
} }
......
...@@ -153,7 +153,7 @@ struct array ...@@ -153,7 +153,7 @@ struct array
return true; return true;
} }
friend constexpr bool operator!=(const array& x, const array& y) { return !(x == y); } friend constexpr bool operator!=(const array& x, const array& y) { return not(x == y); }
// This uses the product order rather than lexical order // This uses the product order rather than lexical order
friend constexpr bool operator<(const array& x, const array& y) friend constexpr bool operator<(const array& x, const array& y)
{ {
......
...@@ -73,10 +73,10 @@ MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(!=) ...@@ -73,10 +73,10 @@ MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(!=)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(&) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(&)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(^) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(^)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(|) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(|)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(&&) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(and)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(||) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(or)
MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(!) MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(not )
MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(~) MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(~)
MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(+) MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(+)
MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(-) MIGRAPHX_INTEGRAL_CONSTANT_UNARY_OP(-)
......
...@@ -341,7 +341,7 @@ struct miopen_apply ...@@ -341,7 +341,7 @@ struct miopen_apply
catch(migraphx::exception&) catch(migraphx::exception&)
{ {
// In case no solver supports the default format, retry using the other format. // In case no solver supports the default format, retry using the other format.
compile_quant_conv_with_format(!int8_x4_format); compile_quant_conv_with_format(not int8_x4_format);
} }
auto args = ins->inputs(); auto args = ins->inputs();
......
...@@ -78,7 +78,7 @@ struct mlir_handle ...@@ -78,7 +78,7 @@ struct mlir_handle
friend bool operator==(ptr x, ptr y) { return x.get_value() == y.get_value(); } friend bool operator==(ptr x, ptr y) { return x.get_value() == y.get_value(); }
friend bool operator!=(ptr x, ptr y) { return !(x == y); } friend bool operator!=(ptr x, ptr y) { return not(x == y); }
T obj{}; T obj{};
}; };
...@@ -503,7 +503,7 @@ struct mlir_program ...@@ -503,7 +503,7 @@ struct mlir_program
pp = pp =
problem_params{ins->get_operator(), to_shapes(ins->inputs()), ins->get_shape()}; problem_params{ins->get_operator(), to_shapes(ins->inputs()), ins->get_shape()};
std::string tuned = get_tune_params(); std::string tuned = get_tune_params();
if(!tuned.empty()) if(not tuned.empty())
ops.add_attributes({{"perf_config", tuned}}); ops.add_attributes({{"perf_config", tuned}});
// check if HW supports xdlops // check if HW supports xdlops
if(contains(get_xdlops_archs(), target_name)) if(contains(get_xdlops_archs(), target_name))
......
...@@ -154,7 +154,7 @@ void pack_int8_args::apply(module& m) const ...@@ -154,7 +154,7 @@ void pack_int8_args::apply(module& m) const
bool transa = inputs[0]->get_shape().transposed(); bool transa = inputs[0]->get_shape().transposed();
bool transb = inputs[1]->get_shape().transposed(); bool transb = inputs[1]->get_shape().transposed();
if(!transb) if(not transb)
{ {
auto packed_b = m.insert_instruction( auto packed_b = m.insert_instruction(
ins, make_op("hip::allocate", {{"shape", to_value(inputs[1]->get_shape())}})); ins, make_op("hip::allocate", {{"shape", to_value(inputs[1]->get_shape())}}));
......
...@@ -100,7 +100,7 @@ struct parse_conv : op_parser<parse_conv> ...@@ -100,7 +100,7 @@ struct parse_conv : op_parser<parse_conv>
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] or padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); MIGRAPHX_THROW("migraphx does not support asymetric padding");
} }
......
...@@ -90,7 +90,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -90,7 +90,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h); calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h);
calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w); calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w);
if(pads[0] != pads[2] || pads[1] != pads[3]) if(pads[0] != pads[2] or pads[1] != pads[3])
{ {
std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]}; std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]};
l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0); l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0);
......
...@@ -42,7 +42,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -42,7 +42,7 @@ struct parse_pooling : op_parser<parse_pooling>
tf_parser::node_info info, tf_parser::node_info info,
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
if(!starts_with(opd.tf_name, "Max") && !starts_with(opd.tf_name, "Av")) if(not starts_with(opd.tf_name, "Max") and not starts_with(opd.tf_name, "Av"))
{ {
MIGRAPHX_THROW("tf pooling mode must be Max or Average"); MIGRAPHX_THROW("tf pooling mode must be Max or Average");
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment