Commit ab86abd8 authored by Paul's avatar Paul
Browse files

Fixes

parent 44b343e1
...@@ -262,6 +262,7 @@ rocm_enable_cppcheck( ...@@ -262,6 +262,7 @@ rocm_enable_cppcheck(
useSmartPointer:*src/api/api.cpp useSmartPointer:*src/api/api.cpp
useSmartPointer:*make_shared_array.hpp useSmartPointer:*make_shared_array.hpp
migraphx-RedundantLocalVariable:*src/api/api.cpp migraphx-RedundantLocalVariable:*src/api/api.cpp
migraphx-UseSmartPointer:*src/api/api.cpp
FORCE FORCE
INCONCLUSIVE INCONCLUSIVE
RULE_FILE RULE_FILE
......
...@@ -65,7 +65,7 @@ struct square_custom_op final : migraphx::experimental_custom_op_base ...@@ -65,7 +65,7 @@ struct square_custom_op final : migraphx::experimental_custom_op_base
MIGRAPHX_HIP_ASSERT(hipSetDevice(0)); MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
const unsigned blocks = 512; const unsigned blocks = 512;
const unsigned threads_per_block = 256; const unsigned threads_per_block = 256;
// cppcheck-suppress UseDeviceLaunch // cppcheck-suppress migraphx-UseDeviceLaunch
hipLaunchKernelGGL(vector_square, hipLaunchKernelGGL(vector_square,
dim3(blocks), dim3(blocks),
dim3(threads_per_block), dim3(threads_per_block),
......
...@@ -33,6 +33,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -33,6 +33,7 @@ inline namespace MIGRAPHX_INLINE_NS {
template <typename T> template <typename T>
std::shared_ptr<T> make_shared_array(size_t size) std::shared_ptr<T> make_shared_array(size_t size)
{ {
// cppcheck-suppress migraphx-UseSmartPointer
return std::shared_ptr<T>(new T[size](), std::default_delete<T[]>()); // NOLINT return std::shared_ptr<T>(new T[size](), std::default_delete<T[]>()); // NOLINT
} }
......
...@@ -191,8 +191,7 @@ double rms_range(const R1& r1, const R2& r2) ...@@ -191,8 +191,7 @@ double rms_range(const R1& r1, const R2& r2)
template <class R> template <class R>
double get_rms_tol(const R&, std::size_t tolerance = 80) double get_rms_tol(const R&, std::size_t tolerance = 80)
{ {
double threshold = std::numeric_limits<range_value<R>>::epsilon() * tolerance; return std::numeric_limits<range_value<R>>::epsilon() * tolerance;
return threshold;
} }
/* /*
......
...@@ -1052,13 +1052,10 @@ void module::calc_implicit_deps(const module& smod, ...@@ -1052,13 +1052,10 @@ void module::calc_implicit_deps(const module& smod,
} }
const auto& mod_args = ii->module_inputs(); const auto& mod_args = ii->module_inputs();
if(not mod_args.empty())
{
for(const auto* ssmod : mod_args) for(const auto* ssmod : mod_args)
{ {
calc_implicit_deps(*ssmod, pmod, ins, deps); calc_implicit_deps(*ssmod, pmod, ins, deps);
} }
}
} }
} }
......
...@@ -66,9 +66,8 @@ instruction_ref bcast_scalar_instr(const migraphx::shape& shape_out, ...@@ -66,9 +66,8 @@ instruction_ref bcast_scalar_instr(const migraphx::shape& shape_out,
instruction_ref arg_in, instruction_ref arg_in,
const onnx_parser::node_info& info) const onnx_parser::node_info& info)
{ {
auto bcast_instr_out = info.add_instruction( return info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", shape_out.lens()}}), arg_in); migraphx::make_op("multibroadcast", {{"out_lens", shape_out.lens()}}), arg_in);
return bcast_instr_out;
} }
} // namespace onnx } // namespace onnx
......
...@@ -408,7 +408,7 @@ onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph, bool inlini ...@@ -408,7 +408,7 @@ onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph, bool inlini
} }
std::vector<instruction_ref> result; std::vector<instruction_ref> result;
std::size_t output_num = static_cast<std::size_t>(node.output().size()); std::size_t output_num = node.output().size();
if(ops.count(node.op_type()) == 0) if(ops.count(node.op_type()) == 0)
{ {
if(skip_unknown_operators) if(skip_unknown_operators)
......
...@@ -120,8 +120,7 @@ struct parse_groupnorm : op_parser<parse_groupnorm> ...@@ -120,8 +120,7 @@ struct parse_groupnorm : op_parser<parse_groupnorm>
info.add_instruction(make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), bias); info.add_instruction(make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), bias);
auto scaled = info.add_instruction(make_op("mul"), result, scale_bcast); auto scaled = info.add_instruction(make_op("mul"), result, scale_bcast);
auto y = info.add_instruction(make_op("add"), scaled, bias_bcast); auto y = info.add_instruction(make_op("add"), scaled, bias_bcast);
auto y_reshaped = info.add_instruction(make_op("reshape", {{"dims", x_dims}}), y); return info.add_instruction(make_op("reshape", {{"dims", x_dims}}), y);
return y_reshaped;
} }
}; };
......
...@@ -230,9 +230,7 @@ struct parse_qlinearconv : op_parser<parse_qlinearconv> ...@@ -230,9 +230,7 @@ struct parse_qlinearconv : op_parser<parse_qlinearconv>
if(args.size() > 8) if(args.size() > 8)
conv_x_w = add_bias_to_conv(args[8], conv_x_w, info); conv_x_w = add_bias_to_conv(args[8], conv_x_w, info);
auto quant_conv = return bcast_qdq_instr("quantizelinear", conv_x_w, in_scale_y, in_zero_pt_y, info);
bcast_qdq_instr("quantizelinear", conv_x_w, in_scale_y, in_zero_pt_y, info);
return quant_conv;
} }
}; };
......
...@@ -57,8 +57,8 @@ struct parse_range : op_parser<parse_range> ...@@ -57,8 +57,8 @@ struct parse_range : op_parser<parse_range>
auto limit_val = limit.front(); auto limit_val = limit.front();
auto delta_val = delta.front(); auto delta_val = delta.front();
size_t num_elements = static_cast<size_t>( size_t num_elements =
ceil(static_cast<double>(limit_val - start_val) / static_cast<double>(delta_val))); ceil(static_cast<double>(limit_val - start_val) / static_cast<double>(delta_val));
assert(num_elements > 0); assert(num_elements > 0);
......
...@@ -89,7 +89,7 @@ inline auto launch(hipStream_t stream, index_int global, index_int local) ...@@ -89,7 +89,7 @@ inline auto launch(hipStream_t stream, index_int global, index_int local)
*/ */
hipError_t flush_call = hipGetLastError(); hipError_t flush_call = hipGetLastError();
(void)(flush_call); (void)(flush_call);
// cppcheck-suppress UseDeviceLaunch // cppcheck-suppress migraphx-UseDeviceLaunch
hipLaunchKernelGGL((launcher<f_type>), nblocks, nthreads, 0, stream, f); hipLaunchKernelGGL((launcher<f_type>), nblocks, nthreads, 0, stream, f);
hipError_t kernel_launch_status = hipGetLastError(); hipError_t kernel_launch_status = hipGetLastError();
if(kernel_launch_status != hipSuccess) if(kernel_launch_status != hipSuccess)
......
...@@ -94,6 +94,7 @@ inline auto mi_nglobal(const hip_shape<N>& s, index_int nlocal) ...@@ -94,6 +94,7 @@ inline auto mi_nglobal(const hip_shape<N>& s, index_int nlocal)
// //
// assert(std::any_of(nglobal_multi.begin(), nglobal_multi.end(), [](auto x){return x>0;})); // assert(std::any_of(nglobal_multi.begin(), nglobal_multi.end(), [](auto x){return x>0;}));
// cppcheck-suppress migraphx-RedundantLocalVariable
return nglobal_multi; return nglobal_multi;
} }
......
...@@ -40,7 +40,7 @@ struct parse_onehot : op_parser<parse_onehot> ...@@ -40,7 +40,7 @@ struct parse_onehot : op_parser<parse_onehot>
tf_parser::node_info info, tf_parser::node_info info,
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
size_t depth = static_cast<size_t>(args[1]->eval().at<int32_t>()); size_t depth = args[1]->eval().at<size_t>();
int64_t axis = -1; int64_t axis = -1;
float on_value = args[2]->eval().at<float>(); float on_value = args[2]->eval().at<float>();
......
...@@ -601,7 +601,7 @@ struct driver ...@@ -601,7 +601,7 @@ struct driver
failures() = 0; failures() = 0;
f(); f();
} }
// cppcheck-suppress EmptyCatchStatement // cppcheck-suppress migraphx-EmptyCatchStatement
catch(const failure_error&) catch(const failure_error&)
{ {
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment