Unverified Commit a05113aa authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Remove redundant cast (#982)

Enable a cppcheck rule to catch these redundant casts in the future
parent 4a71ec8c
......@@ -80,10 +80,10 @@ jobs:
uses: pat-s/always-upload-cache@v2.1.3
with:
path: cppcheck-cache
key: cppcheck-cache-${{ steps.cache_timestamp.outputs.timestamp }}
key: cppcheck-cache-${{ hashFiles('cppcheck.rules', 'CMakeLists.txt') }}-${{ steps.cache_timestamp.outputs.timestamp }}
restore-keys: |
cppcheck-cache-${{ steps.cache_timestamp.outputs.timestamp }}
cppcheck-cache-
cppcheck-cache-${{ hashFiles('cppcheck.rules', 'CMakeLists.txt') }}-${{ steps.cache_timestamp.outputs.timestamp }}
cppcheck-cache-${{ hashFiles('cppcheck.rules', 'CMakeLists.txt') }}-
- name: Build the Docker image
run: docker build . --file hip-clang.docker --tag migraphx
......
......@@ -154,7 +154,7 @@
</rule>
<rule>
<tokenlist>normal</tokenlist>
<pattern><![CDATA[((?:(?:\w+|<|>|::) )*(?:\w+|>)(?: &|\*)*) (\w) ; \2 = static_cast < \1 > (\([^()]*(?-1)*[^()]*\)) ;]]></pattern>
<pattern><![CDATA[((?:(?:\w+|<|>|::) )*(?:\w+|>)(?: &|\*)*) (\w+) ; \2 = static_cast < \1 > (\([^()]*(?-1)*[^()]*\)) ;]]></pattern>
<message>
<id>RedundantCast</id>
<severity>style</severity>
......@@ -163,7 +163,7 @@
</rule>
<rule>
<tokenlist>normal</tokenlist>
<pattern><![CDATA[auto (\w) ; \1 = static_cast < (?:(?:\w+|<|>|::) )*(?:\w+|>)(?: &|\*)* > (\([^()]*(?-1)*[^()]*\)) ;]]></pattern>
<pattern><![CDATA[auto (\w+) ; \1 = static_cast < (?:(?:\w+|<|>|::) )*(?:\w+|>)(?: &|\*)* > (\([^()]*(?-1)*[^()]*\)) ;]]></pattern>
<message>
<id>RedundantCast</id>
<severity>style</severity>
......
......@@ -37,7 +37,7 @@ struct rnn_var_sl_shift_output
argument compute(const shape& output_shape, std::vector<argument> args) const
{
argument result{output_shape};
int64_t max_len = static_cast<int64_t>(output_shape.lens()[0]);
int64_t max_len = output_shape.lens()[0];
visit_all(result, args[0])([&](auto output, auto input) {
using value_type = typename decltype(output)::value_type;
args[1].visit([&](auto seq_lens) {
......@@ -76,7 +76,7 @@ struct rnn_var_sl_shift_sequence
argument compute(const shape& output_shape, std::vector<argument> args) const
{
argument result{output_shape};
int64_t max_len = static_cast<int64_t>(output_shape.lens()[0]);
int64_t max_len = output_shape.lens()[0];
visit_all(result, args[0])([&](auto output, auto input) {
using value_type = typename decltype(output)::value_type;
args[1].visit([&](auto seq_lens) {
......
......@@ -20,7 +20,7 @@ auto tune_attribute(const std::vector<int64_t>& vec,
const std::vector<std::size_t>& lens)
{
std::vector<int64_t> result(vec);
int64_t n_rank = static_cast<int64_t>(lens.size());
int64_t n_rank = lens.size();
std::vector<op::normalize_attribute> vec_attrs = val.to_vector<op::normalize_attribute>();
if(contains(vec_attrs, op::normalize_attribute::use_output))
{
......
......@@ -39,7 +39,7 @@ struct parse_gather_elements : op_parser<parse_gather_elements>
int tuned_axis = tune_axis(n_rank, axis, opd.op_name);
auto axis_stride = data_s.strides()[tuned_axis];
int64_t data_elem_num = static_cast<int64_t>(data_s.elements());
int64_t data_elem_num = data_s.elements();
// reshape the input data as one dimension and used as input data
// to the gather operator
arg_data = info.add_instruction(make_op("reshape", {{"dims", {data_elem_num}}}), arg_data);
......
......@@ -9,21 +9,20 @@ namespace onnx {
auto compute_type(shape::type_t t1, shape::type_t t2)
{
const static std::unordered_map<int, int> op_order = {
{static_cast<int>(shape::int8_type), 1},
{static_cast<int>(shape::uint8_type), 2},
{static_cast<int>(shape::int16_type), 3},
{static_cast<int>(shape::uint16_type), 4},
{static_cast<int>(shape::int32_type), 5},
{static_cast<int>(shape::uint32_type), 6},
{static_cast<int>(shape::int64_type), 7},
{static_cast<int>(shape::uint64_type), 8},
{static_cast<int>(shape::half_type), 9},
{static_cast<int>(shape::float_type), 10},
{static_cast<int>(shape::double_type), 11}};
const static std::unordered_map<int, int> op_order = {{shape::int8_type, 1},
{shape::uint8_type, 2},
{shape::int16_type, 3},
{shape::uint16_type, 4},
{shape::int32_type, 5},
{shape::uint32_type, 6},
{shape::int64_type, 7},
{shape::uint64_type, 8},
{shape::half_type, 9},
{shape::float_type, 10},
{shape::double_type, 11}};
int it1 = static_cast<int>(t1);
int it2 = static_cast<int>(t2);
int it1 = t1;
int it2 = t2;
if(!contains(op_order, it1) or !contains(op_order, it2))
{
MIGRAPHX_THROW("PARSE_POW: Input data type not supported!");
......
......@@ -334,7 +334,7 @@ struct parse_resize : op_parser<parse_resize>
auto ins_delta = info.add_literal(dim_s, delta_data);
// slice the data
int64_t slc_stride = static_cast<int64_t>(dim_lens[0]);
int64_t slc_stride = dim_lens[0];
auto low = info.add_instruction(
make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {slc_stride}}}),
data);
......
......@@ -24,7 +24,7 @@ struct parse_split : op_parser<parse_split>
}
auto lens = args[0]->get_shape().lens();
int64_t n_rank = static_cast<int64_t>(lens.size());
int64_t n_rank = lens.size();
int64_t tuned_axis = tune_axis(n_rank, axis, opd.op_name);
std::vector<int64_t> vec_splits;
......
......@@ -269,7 +269,7 @@ std::vector<instruction_ref> rewrite_rnn::vanilla_rnn_cell(bool is_forward,
instruction_ref hidden_out = prog.end();
instruction_ref last_out{};
last_out = prog.insert_instruction(ins, make_op("unsqueeze", {{"axes", {0, 1}}}), sih);
long seq_len = static_cast<long>(get_seq_len(prog, seq, seq_lens));
long seq_len = get_seq_len(prog, seq, seq_lens);
for(long i = 0; i < seq_len; i++)
{
long seq_index = is_forward ? i : (seq_len - 1 - i);
......@@ -556,7 +556,7 @@ std::vector<instruction_ref> rewrite_rnn::gru_cell(bool is_forward,
instruction_ref last_output{};
migraphx::shape seq_shape = seq->get_shape();
migraphx::shape r_shape = r->get_shape();
long hs = static_cast<long>(r_shape.lens()[2]);
long hs = r_shape.lens()[2];
migraphx::shape ss(seq_shape.type(), {seq_shape.lens()[1], r_shape.lens()[2]});
std::vector<float> data(ss.elements(), 1.0f);
......@@ -613,7 +613,7 @@ std::vector<instruction_ref> rewrite_rnn::gru_cell(bool is_forward,
rb_h);
}
long seq_len = static_cast<long>(get_seq_len(prog, seq, seq_lens));
long seq_len = get_seq_len(prog, seq, seq_lens);
for(long i = 0; i < seq_len; i++)
{
long seq_index = is_forward ? i : (seq_len - 1 - i);
......@@ -1032,7 +1032,7 @@ std::vector<instruction_ref> rewrite_rnn::lstm_cell(bool is_forward,
instruction_ref last_cell_output{};
migraphx::shape r_shape = r->get_shape();
long hs = static_cast<long>(r_shape.lens()[2]);
long hs = r_shape.lens()[2];
auto bs = ih->get_shape().lens()[1];
std::vector<int64_t> perm{1, 0};
......@@ -1094,7 +1094,7 @@ std::vector<instruction_ref> rewrite_rnn::lstm_cell(bool is_forward,
ins, make_op("broadcast", {{"axis", 1}, {"out_lens", ic_lens}}), pphf);
}
long seq_len = static_cast<long>(get_seq_len(prog, seq, seq_lens));
long seq_len = get_seq_len(prog, seq, seq_lens);
for(long i = 0; i < seq_len; ++i)
{
long seq_index = is_forward ? i : (seq_len - 1 - i);
......
......@@ -45,7 +45,7 @@ TEST_CASE(if_pl_test)
auto ys = param_shapes["y"];
std::vector<float> yd(ys.bytes() / sizeof(float), 2.0);
pp.add("y", migraphx::argument(ys, yd.data()));
char ccond = static_cast<char>(cond);
char ccond = cond;
pp.add("cond", migraphx::argument(param_shapes["cond"], &ccond));
auto outputs = p.eval(pp);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment