Commit e5f07268 authored by Paul's avatar Paul
Browse files

Update cppcheck version

parent fef8086c
...@@ -125,6 +125,8 @@ rocm_enable_cppcheck( ...@@ -125,6 +125,8 @@ rocm_enable_cppcheck(
functionConst:*program.* functionConst:*program.*
shadowFunction shadowFunction
shadowVar shadowVar
shadowVariable
unsafeClassDivZero
definePrefix:*test/include/test.hpp definePrefix:*test/include/test.hpp
FORCE FORCE
INCONCLUSIVE INCONCLUSIVE
......
pfultz2/rocm-recipes pfultz2/rocm-recipes
danmar/cppcheck@8aa68ee297c2d9ebadf5bcfd00c66ea8d9291e35 -DHAVE_RULES=1 danmar/cppcheck,pfultz2/cppcheck@e597c29ba -DHAVE_RULES=1
ROCm-Developer-Tools/HIP@2490e42baa7d90458f0632fd9fbead2d395f41b9 ROCm-Developer-Tools/HIP@2490e42baa7d90458f0632fd9fbead2d395f41b9
python/cpython@v3.6.6 -X autotools -H sha256:92aa914572c695c0aeb01b0a214813f414da4b51a371234df514a74761f2bb36 python/cpython@v3.6.6 -X autotools -H sha256:92aa914572c695c0aeb01b0a214813f414da4b51a371234df514a74761f2bb36
-f requirements.txt -f requirements.txt
...@@ -353,14 +353,14 @@ MIGRAPHX_PRED_MATCHER(same_input_shapes, instruction_ref ins) ...@@ -353,14 +353,14 @@ MIGRAPHX_PRED_MATCHER(same_input_shapes, instruction_ref ins)
ins->inputs().begin(), ins->inputs().end(), [&](auto x) { return x->get_shape() == s; }); ins->inputs().begin(), ins->inputs().end(), [&](auto x) { return x->get_shape() == s; });
} }
MIGRAPHX_BASIC_MATCHER(output, matcher_context& ctx, instruction_ref ins) MIGRAPHX_BASIC_MATCHER(output, const matcher_context& ctx, instruction_ref ins)
{ {
if(ins->outputs().size() == 1) if(ins->outputs().size() == 1)
return ins->outputs().front(); return ins->outputs().front();
return ctx.not_found(); return ctx.not_found();
} }
MIGRAPHX_BASIC_MATCHER(used_once, matcher_context& ctx, instruction_ref ins) MIGRAPHX_BASIC_MATCHER(used_once, const matcher_context& ctx, instruction_ref ins)
{ {
if(ins->outputs().size() == 1) if(ins->outputs().size() == 1)
return ins; return ins;
...@@ -411,7 +411,7 @@ inline auto nargs(std::size_t n) ...@@ -411,7 +411,7 @@ inline auto nargs(std::size_t n)
inline auto arg(std::size_t i) inline auto arg(std::size_t i)
{ {
return make_basic_fun_matcher([=](matcher_context& ctx, instruction_ref ins) { return make_basic_fun_matcher([=](const matcher_context& ctx, instruction_ref ins) {
if(i < ins->inputs().size()) if(i < ins->inputs().size())
return ins->inputs()[i]; return ins->inputs()[i];
return ctx.not_found(); return ctx.not_found();
......
...@@ -168,6 +168,7 @@ bool verify_range(R1&& r1, R2&& r2, double tolerance = 80, double* out_error = n ...@@ -168,6 +168,7 @@ bool verify_range(R1&& r1, R2&& r2, double tolerance = 80, double* out_error = n
{ {
double threshold = std::numeric_limits<range_value<R1>>::epsilon() * tolerance; double threshold = std::numeric_limits<range_value<R1>>::epsilon() * tolerance;
auto error = rms_range(r1, r2); auto error = rms_range(r1, r2);
// cppcheck-suppress uninitvar
if(out_error != nullptr) if(out_error != nullptr)
*out_error = error; *out_error = error;
return error <= threshold; return error <= threshold;
......
...@@ -1011,7 +1011,7 @@ struct onnx_parser ...@@ -1011,7 +1011,7 @@ struct onnx_parser
} }
std::vector<operation> vec_actv_funcs(vec_names.size()); std::vector<operation> vec_actv_funcs(vec_names.size());
std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& fn) { std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](const auto& fn) {
return map_actv_funcs[fn]; return map_actv_funcs[fn];
}); });
...@@ -1127,7 +1127,7 @@ struct onnx_parser ...@@ -1127,7 +1127,7 @@ struct onnx_parser
} }
std::vector<operation> vec_actv_funcs(vec_names.size()); std::vector<operation> vec_actv_funcs(vec_names.size());
std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& name) { std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](const auto& name) {
return map_actv_funcs[name]; return map_actv_funcs[name];
}); });
...@@ -1299,7 +1299,7 @@ struct onnx_parser ...@@ -1299,7 +1299,7 @@ struct onnx_parser
} }
std::vector<operation> vec_actv_funcs(vec_names.size()); std::vector<operation> vec_actv_funcs(vec_names.size());
std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& name) { std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](const auto& name) {
return map_actv_funcs[name]; return map_actv_funcs[name];
}); });
......
...@@ -107,7 +107,7 @@ struct memory_coloring_impl ...@@ -107,7 +107,7 @@ struct memory_coloring_impl
return ins->name() == "check_context"; return ins->name() == "check_context";
} }
static bool is_disjoin(live_range& range1, live_range& range2) static bool is_disjoin(const live_range& range1, const live_range& range2)
{ {
if((range1.size == 0) || (range2.size == 0)) if((range1.size == 0) || (range2.size == 0))
return false; return false;
......
...@@ -241,7 +241,7 @@ instruction_ref program::remove_instructions(instruction_ref first, instruction_ ...@@ -241,7 +241,7 @@ instruction_ref program::remove_instructions(instruction_ref first, instruction_
// TODO: Check every element // TODO: Check every element
assert(has_instruction(first)); assert(has_instruction(first));
std::for_each(first, last, [&](instruction& ins) { ins.clear_arguments(); }); std::for_each(first, last, [&](instruction& ins) { ins.clear_arguments(); });
assert(std::all_of(first, last, [&](instruction& ins) { return ins.outputs().empty(); })); assert(std::all_of(first, last, [&](const instruction& ins) { return ins.outputs().empty(); }));
return impl->instructions.erase(first, last); return impl->instructions.erase(first, last);
} }
......
...@@ -674,7 +674,6 @@ void rewrite_rnn::apply_lstm(program& prog, instruction_ref ins) const ...@@ -674,7 +674,6 @@ void rewrite_rnn::apply_lstm(program& prog, instruction_ref ins) const
std::vector<float> ihc_data(ihc_shape.elements(), 0.0); std::vector<float> ihc_data(ihc_shape.elements(), 0.0);
migraphx::shape pph_shape{type, {1, 3 * hidden_size}}; migraphx::shape pph_shape{type, {1, 3 * hidden_size}};
std::vector<float> pph_data(pph_shape.elements(), 0.0);
auto actv_funcs = lstm_actv_funcs(ins); auto actv_funcs = lstm_actv_funcs(ins);
auto lstm_op = any_cast<op::lstm>(ins->get_operator()); auto lstm_op = any_cast<op::lstm>(ins->get_operator());
......
...@@ -245,17 +245,17 @@ struct cpu_im2col ...@@ -245,17 +245,17 @@ struct cpu_im2col
const std::size_t& stride_h = op.stride[0]; const std::size_t& stride_h = op.stride[0];
const std::size_t& stride_w = op.stride[1]; const std::size_t& stride_w = op.stride[1];
auto kdiv2_h = kernel_h / 2; long kdiv2_h = kernel_h / 2;
auto kdiv2_w = kernel_w / 2; long kdiv2_w = kernel_w / 2;
// calculate output sizes // calculate output sizes
const std::size_t col_height = (height - kernel_h + 2 * pad_h) / stride_h + 1; const std::size_t col_height = (height - kernel_h + 2 * pad_h) / stride_h + 1;
const std::size_t col_width = (width - kernel_w + 2 * pad_w) / stride_w + 1; const std::size_t col_width = (width - kernel_w + 2 * pad_w) / stride_w + 1;
// account for padding for the starting position of the input pixels // account for padding for the starting position of the input pixels
std::size_t iinput = kdiv2_h - pad_h; long iinput = kdiv2_h - pad_h;
// loop over output pixels (ioutput, joutput) // loop over output pixels (ioutput, joutput)
for(std::size_t ioutput = 0; ioutput < col_height; ioutput++, iinput += stride_h) for(std::size_t ioutput = 0; ioutput < col_height; ioutput++, iinput += stride_h)
{ {
std::size_t jinput = kdiv2_w - pad_w; long jinput = kdiv2_w - pad_w;
for(std::size_t joutput = 0; joutput < col_width; joutput++, jinput += stride_w) for(std::size_t joutput = 0; joutput < col_width; joutput++, jinput += stride_w)
{ {
// compute linear index for output // compute linear index for output
...@@ -264,8 +264,8 @@ struct cpu_im2col ...@@ -264,8 +264,8 @@ struct cpu_im2col
dfor(channels, dfor(channels,
kernel_h, kernel_h,
kernel_w)([&](std::size_t c, std::size_t koffset, std::size_t loffset) { kernel_w)([&](std::size_t c, std::size_t koffset, std::size_t loffset) {
auto idx = iinput + koffset - kdiv2_h; auto idx = iinput + long(koffset) - kdiv2_h;
auto jdx = jinput + loffset - kdiv2_w; auto jdx = jinput + long(loffset) - kdiv2_w;
col(ldx, p) = ((idx >= 0) && (idx < height) && (jdx >= 0) && (jdx < width)) col(ldx, p) = ((idx >= 0) && (idx < height) && (jdx >= 0) && (jdx < width))
? input(0, c, idx, jdx) ? input(0, c, idx, jdx)
: 0; : 0;
...@@ -466,7 +466,7 @@ struct leaky_relu_op ...@@ -466,7 +466,7 @@ struct leaky_relu_op
std::string name() const { return "cpu::leaky_relu"; } std::string name() const { return "cpu::leaky_relu"; }
auto fcn() const auto fcn() const
{ {
auto& a = op.alpha; auto a = op.alpha;
return [a](auto x) { return x > 0 ? x : x * a; }; return [a](auto x) { return x > 0 ? x : x * a; };
} }
}; };
...@@ -477,7 +477,7 @@ struct elu_op ...@@ -477,7 +477,7 @@ struct elu_op
std::string name() const { return "cpu::elu"; } std::string name() const { return "cpu::elu"; }
auto fcn() const auto fcn() const
{ {
auto& a = op.alpha; auto a = op.alpha;
return [a](auto x) { return x > 0 ? x : a * std::expm1(x); }; return [a](auto x) { return x > 0 ? x : a * std::expm1(x); };
} }
}; };
......
...@@ -155,8 +155,8 @@ __device__ void dpp_reduce(T& in, Op op) ...@@ -155,8 +155,8 @@ __device__ void dpp_reduce(T& in, Op op)
__device__ inline void dpp_reduce(float& x, sum) __device__ inline void dpp_reduce(float& x, sum)
{ {
#ifdef MIGRAPHX_USE_CLANG_TIDY #if defined (MIGRAPHX_USE_CLANG_TIDY) || defined(CPPCHECK)
(void)x; x = 1;
#else #else
__asm__ volatile("s_nop 4\n" __asm__ volatile("s_nop 4\n"
"v_add_f32 %0 %0 %0 row_shr:1\n" "v_add_f32 %0 %0 %0 row_shr:1\n"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment