Unverified Commit a359d2c8 authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Update to Cppcheck 2.11 (#1914)

parent 6dfdf24a
...@@ -61,7 +61,7 @@ namespace cpu { ...@@ -61,7 +61,7 @@ namespace cpu {
std::string target::name() const { return "cpu"; } std::string target::name() const { return "cpu"; }
// cppcheck-suppress constParameter // cppcheck-suppress constParameterReference
std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_options&) const std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_options&) const
{ {
auto& ctx = any_cast<context>(gctx); auto& ctx = any_cast<context>(gctx);
......
...@@ -124,7 +124,7 @@ void nary_broadcast_vec_impl( ...@@ -124,7 +124,7 @@ void nary_broadcast_vec_impl(
buffer[i] = binput.data()[i]; buffer[i] = binput.data()[i];
} }
__syncthreads(); __syncthreads();
auto* bp = as_pointer(buffer); const auto* bp = as_pointer(buffer);
// Process the data // Process the data
for(size_t i = idx.global; i < nelements; i += nglobal) for(size_t i = idx.global; i < nelements; i += nglobal)
{ {
...@@ -219,7 +219,7 @@ void nary_double_broadcast_vec_impl( ...@@ -219,7 +219,7 @@ void nary_double_broadcast_vec_impl(
buffer[i + bdim_vec_len] = binput2.data()[i]; buffer[i + bdim_vec_len] = binput2.data()[i];
} }
__syncthreads(); __syncthreads();
auto* bp = as_pointer(buffer); const auto* bp = as_pointer(buffer);
// Process the data // Process the data
for(size_t i = idx.global; i < nelements; i += nglobal) for(size_t i = idx.global; i < nelements; i += nglobal)
{ {
......
...@@ -72,12 +72,12 @@ struct hip_heap_vector ...@@ -72,12 +72,12 @@ struct hip_heap_vector
index_int l = 2 * index + 1; index_int l = 2 * index + 1;
index_int r = 2 * index + 2; index_int r = 2 * index + 2;
if(l < n && compare(data[data_index(l)], data[data_index(index)])) if(l < n and compare(data[data_index(l)], data[data_index(index)]))
{ {
index = l; index = l;
} }
if(r < n && compare(data[data_index(r)], data[data_index(index)])) if(r < n and compare(data[data_index(r)], data[data_index(index)]))
{ {
index = r; index = r;
if(compare(data[data_index(l)], data[data_index(r)])) if(compare(data[data_index(l)], data[data_index(r)]))
......
...@@ -239,13 +239,13 @@ struct find_mlir_op ...@@ -239,13 +239,13 @@ struct find_mlir_op
bool is_float = contains({type_t::float_type, type_t::half_type}, result_type); bool is_float = contains({type_t::float_type, type_t::half_type}, result_type);
if(contains(any_type_ops, name)) if(contains(any_type_ops, name))
return true; return true;
if(result_type != type_t::bool_type && contains(no_bool_ops, name)) if(result_type != type_t::bool_type and contains(no_bool_ops, name))
return true; return true;
if(is_float && contains(fp_only_ops, name)) if(is_float and contains(fp_only_ops, name))
return true; return true;
// Only conversions between floating types are known to be unambigiously // Only conversions between floating types are known to be unambigiously
// supported. // supported.
if(is_float && name == "convert") if(is_float and name == "convert")
{ {
return std::all_of(i.inputs().begin(), i.inputs().end(), [](const auto& arg) { return std::all_of(i.inputs().begin(), i.inputs().end(), [](const auto& arg) {
return contains({type_t::float_type, type_t::half_type}, arg->get_shape().type()); return contains({type_t::float_type, type_t::half_type}, arg->get_shape().type());
......
...@@ -92,7 +92,7 @@ struct hip_sync_stream ...@@ -92,7 +92,7 @@ struct hip_sync_stream
return inputs.front(); return inputs.front();
} }
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const argument compute(const context& ctx, const shape&, const std::vector<argument>& args) const
{ {
gpu_sync(ctx); gpu_sync(ctx);
if(args.empty()) if(args.empty())
......
...@@ -300,7 +300,8 @@ struct ck_gemm_compiler : compiler<ck_gemm_compiler> ...@@ -300,7 +300,8 @@ struct ck_gemm_compiler : compiler<ck_gemm_compiler>
const auto& b_shape = inputs[1]; const auto& b_shape = inputs[1];
const auto& c_shape = inputs.back(); const auto& c_shape = inputs.back();
auto rank = a_shape.lens().size(); // cppcheck-suppress unreadVariable
auto rank = a_shape.ndim();
auto batch_count = get_batch_count(c_shape); auto batch_count = get_batch_count(c_shape);
auto m = c_shape.lens()[rank - 2]; auto m = c_shape.lens()[rank - 2];
......
...@@ -37,7 +37,7 @@ struct mlir_compiler : compiler<mlir_compiler> ...@@ -37,7 +37,7 @@ struct mlir_compiler : compiler<mlir_compiler>
operation compile_op(context&, const std::vector<shape>&, const value&) const { return {}; } operation compile_op(context&, const std::vector<shape>&, const value&) const { return {}; }
compiler_replace compiler_replace
compile(context& ctx, instruction_ref ins, const operation&, const value& solution) const compile(const context& ctx, instruction_ref ins, const operation&, const value& solution) const
{ {
auto* smod = ins->module_inputs().front(); auto* smod = ins->module_inputs().front();
assert(smod->get_parameter_names().size() == ins->inputs().size() - 1); assert(smod->get_parameter_names().size() == ins->inputs().size() - 1);
......
...@@ -36,7 +36,10 @@ ...@@ -36,7 +36,10 @@
#include <mutex> #include <mutex>
#if !defined(MLIR_MIGRAPHX_DIALECT_API_VERSION) || MLIR_MIGRAPHX_DIALECT_API_VERSION != 3 #if !defined(MLIR_MIGRAPHX_DIALECT_API_VERSION) || MLIR_MIGRAPHX_DIALECT_API_VERSION != 3
#warning "Incompatible version of rocMLIR library used, disabling" #warning "Incompatible version of rocMLIR library used, disabling"
// Only undefine when not using cppcheck
#ifndef CPPCHECK
#undef MIGRAPHX_MLIR #undef MIGRAPHX_MLIR
#endif
#else #else
#include <mlir-c/RegisterRocMLIR.h> #include <mlir-c/RegisterRocMLIR.h>
#endif #endif
...@@ -703,7 +706,7 @@ struct mlir_program ...@@ -703,7 +706,7 @@ struct mlir_program
void dump_tuning_cfg(const char* prob_config) const void dump_tuning_cfg(const char* prob_config) const
{ {
std::string tuning_cfg_path = string_value_of(MIGRAPHX_MLIR_TUNING_CFG{}); std::string tuning_cfg_path = string_value_of(MIGRAPHX_MLIR_TUNING_CFG{});
if(!tuning_cfg_path.empty()) if(not tuning_cfg_path.empty())
{ {
std::vector<std::string> tokens = split_string(prob_config, '\t'); std::vector<std::string> tokens = split_string(prob_config, '\t');
std::string prob = tokens[1]; std::string prob = tokens[1];
...@@ -724,7 +727,7 @@ struct mlir_program ...@@ -724,7 +727,7 @@ struct mlir_program
{ {
mlir_tuning_table tuning_table{mlirRockTuningTableCreate()}; mlir_tuning_table tuning_table{mlirRockTuningTableCreate()};
std::string tuning_db_path = string_value_of(MIGRAPHX_MLIR_TUNING_DB{}); std::string tuning_db_path = string_value_of(MIGRAPHX_MLIR_TUNING_DB{});
if(!tuning_db_path.empty()) if(not tuning_db_path.empty())
{ {
std::ifstream tuning_db_tsv(tuning_db_path); std::ifstream tuning_db_tsv(tuning_db_path);
if(tuning_db_tsv) if(tuning_db_tsv)
...@@ -759,7 +762,7 @@ struct mlir_program ...@@ -759,7 +762,7 @@ struct mlir_program
// stick a mutex around all tuning table interaction. // stick a mutex around all tuning table interaction.
static std::mutex lock; static std::mutex lock;
std::lock_guard<std::mutex> guard(lock); std::lock_guard<std::mutex> guard(lock);
if(!mlirRockTuningSetFromTable(tuning_table.get(), mmodule.get())) if(not mlirRockTuningSetFromTable(tuning_table.get(), mmodule.get()))
{ {
const char* prob_config = mlirRockTuningGetKey(tuning_table.get(), mmodule.get()); const char* prob_config = mlirRockTuningGetKey(tuning_table.get(), mmodule.get());
std::stringstream key(prob_config); std::stringstream key(prob_config);
...@@ -903,6 +906,7 @@ instruction_ref ...@@ -903,6 +906,7 @@ instruction_ref
insert_mlir(module& m, instruction_ref, code_object_op co, const std::vector<instruction_ref>&) insert_mlir(module& m, instruction_ref, code_object_op co, const std::vector<instruction_ref>&)
{ {
use(co); use(co);
use(m);
return m.end(); return m.end();
} }
......
...@@ -34,7 +34,7 @@ namespace gpu { ...@@ -34,7 +34,7 @@ namespace gpu {
std::vector<argument> generate_arguments(const std::vector<shape>& shapes, unsigned long seed = 0) std::vector<argument> generate_arguments(const std::vector<shape>& shapes, unsigned long seed = 0)
{ {
std::vector<argument> args; std::vector<argument> args;
std::transform(shapes.begin(), shapes.end(), std::back_inserter(args), [&](auto& s) { std::transform(shapes.begin(), shapes.end(), std::back_inserter(args), [&](const auto& s) {
return to_gpu(generate_argument(s, seed++)); return to_gpu(generate_argument(s, seed++));
}); });
return args; return args;
......
...@@ -338,7 +338,7 @@ void tf_parser::parse_node(const std::string& name) ...@@ -338,7 +338,7 @@ void tf_parser::parse_node(const std::string& name)
std::string input_name = input; std::string input_name = input;
// if input has trailing `:0` index then remove it // if input has trailing `:0` index then remove it
auto multi_out_idx = input.find(':'); auto multi_out_idx = input.find(':');
if(multi_out_idx != std::string::npos && input.substr(multi_out_idx + 1) == "0") if(multi_out_idx != std::string::npos and input.substr(multi_out_idx + 1) == "0")
{ {
input_name = input.substr(0, multi_out_idx); input_name = input.substr(0, multi_out_idx);
} }
......
...@@ -285,7 +285,7 @@ bool value::contains(const std::string& pkey) const ...@@ -285,7 +285,7 @@ bool value::contains(const std::string& pkey) const
} }
std::size_t value::size() const std::size_t value::size() const
{ {
auto* a = if_array_impl(x); const auto* a = if_array_impl(x);
if(a == nullptr) if(a == nullptr)
return 0; return 0;
return a->size(); return a->size();
......
...@@ -89,17 +89,13 @@ bool is_overlap_load(migraphx::instruction_ref a, migraphx::instruction_ref b) ...@@ -89,17 +89,13 @@ bool is_overlap_load(migraphx::instruction_ref a, migraphx::instruction_ref b)
bool is_disjoint(const std::vector<migraphx::instruction_ref>& inss) bool is_disjoint(const std::vector<migraphx::instruction_ref>& inss)
{ {
for(auto ins1 : inss) return std::none_of(inss.begin(), inss.end(), [&](auto ins1) {
{ return std::none_of(inss.begin(), inss.end(), [&](auto ins2) {
for(auto ins2 : inss)
{
if(ins1 == ins2) if(ins1 == ins2)
continue; return true;
if(is_overlap_load(ins1, ins2)) return is_overlap_load(ins1, ins2);
return false; });
} });
}
return true;
} }
TEST_CASE(test1) TEST_CASE(test1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment