Commit 14a2464b authored by Paul's avatar Paul
Browse files

Upgrade to hcc 2.0

parent 3040b5e6
......@@ -5,6 +5,10 @@ CheckOptions:
value: risky
- key: modernize-loop-convert.NamingStyle
value: lower_case
- key: performance-unnecessary-copy-initialization.AllowedTypes
value: 'shape'
- key: performance-unnecessary-value-param.AllowedTypes
value: 'shape'
- key: readability-function-size.BranchThreshold
value: '15'
- key: readability-function-size.LineThreshold
......
......@@ -52,7 +52,9 @@ rocm_enable_clang_tidy(
-clang-analyzer-optin.performance.Padding
-clang-diagnostic-deprecated-declarations
-clang-diagnostic-extern-c-compat
-clang-diagnostic-disabled-macro-expansion
-clang-diagnostic-unused-command-line-argument
-cppcoreguidelines-macro-usage
-cppcoreguidelines-pro-bounds-array-to-pointer-decay
-cppcoreguidelines-pro-bounds-constant-array-index
-cppcoreguidelines-pro-bounds-pointer-arithmetic
......@@ -70,13 +72,12 @@ rocm_enable_clang_tidy(
-hicpp-explicit-conversions
-hicpp-no-array-decay
-hicpp-special-member-functions
-hicpp-uppercase-literal-suffix
-hicpp-use-override
# This check is broken
-hicpp-use-auto
-llvm-header-guard
-llvm-include-order
-misc-macro-parentheses
-modernize-use-auto
-modernize-use-override
-modernize-pass-by-value
-modernize-use-default-member-init
......@@ -84,7 +85,12 @@ rocm_enable_clang_tidy(
-readability-braces-around-statements
-readability-else-after-return
-readability-named-parameter
-readability-uppercase-literal-suffix,
-*-avoid-c-arrays
-*-explicit-constructor
-*-magic-numbers
-*-non-private-member-variables-in-classes
-*-use-auto
-*-use-emplace
-*-use-equals-default
ERRORS
......@@ -94,6 +100,7 @@ rocm_enable_clang_tidy(
".*hpp"
EXTRA_ARGS
-DMIGRAPHX_USE_CLANG_TIDY
"-Dmain\\\\(...\\\\)=main\\\\(__VA_ARGS__\\\\) // NOLINT"
)
include(ROCMCppCheck)
......
......@@ -13,7 +13,7 @@ struct match_const_add
return match::name("add")(match::args(match::name("@literal"), match::name("@literal")));
}
void apply(program& p, match::matcher_result r) const
void apply(program& p, const match::matcher_result& r) const
{
auto ins = r.result;
auto arg1 = ins->inputs().at(0)->get_literal();
......
......@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS {
template <typename T>
std::shared_ptr<T> make_shared_array(size_t size)
{
return std::shared_ptr<T>(new T[size], std::default_delete<T[]>());
return std::shared_ptr<T>(new T[size], std::default_delete<T[]>()); // NOLINT
}
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -289,7 +289,7 @@ struct transpose
}
std::vector<size_t> output_lens(input_lens.size());
std::vector<size_t> output_strides(input_lens.size());
for(int i = 0; i < output_lens.size(); i++)
for(std::size_t i = 0; i < output_lens.size(); i++)
{
output_lens[i] = input_lens[dims[i]];
output_strides[i] = input_strides[dims[i]];
......@@ -326,7 +326,7 @@ struct concat
std::size_t axis = 0;
std::string name() const { return "concat"; }
std::vector<std::size_t> compute_offsets(const shape& output_shape,
const std::vector<argument> args) const
const std::vector<argument>& args) const
{
std::vector<std::size_t> offsets;
std::vector<std::size_t> offset(args[0].get_shape().lens().size(), 0);
......
......@@ -164,7 +164,7 @@ bool operator!=(const tensor_view<T>& x, const tensor_view<U>& y)
}
template <class T>
tensor_view<T> make_view(shape s, T* data)
tensor_view<T> make_view(const shape& s, T* data)
{
return {s, data};
}
......
......@@ -18,7 +18,7 @@ const std::string& get_type_name()
name = typeid(PrivateMigraphTypeNameProbe).name();
name = name.substr(7);
#else
const char parameter_name[] = "PrivateMigraphTypeNameProbe =";
const char parameter_name[] = "PrivateMigraphTypeNameProbe =";// NOLINT
name = __PRETTY_FUNCTION__;
......
......@@ -14,7 +14,10 @@
auto reverse_int(unsigned int i)
{
unsigned char c1, c2, c3, c4;
unsigned char c1;
unsigned char c2;
unsigned char c3;
unsigned char c4;
c1 = i & 255u;
c2 = (i >> 8u) & 255u;
c3 = (i >> 16u) & 255u;
......@@ -32,7 +35,9 @@ read_mnist_images(const std::string& full_path, int& number_of_images, int& imag
if(file.is_open())
{
int magic_number = 0, n_rows = 0, n_cols = 0;
int magic_number = 0;
int n_rows = 0;
int n_cols = 0;
file.read(reinterpret_cast<char*>(&magic_number), sizeof(magic_number));
magic_number = reverse_int(magic_number);
......
......@@ -116,7 +116,7 @@ void verify_reduced_program(F f, double tolerance = 80)
{
migraphx::program p = f();
auto n = std::distance(p.begin(), p.end());
for(int i = 0; i < n; i++)
for(std::size_t i = 0; i < n; i++)
{
verify_reduced(f, i, tolerance);
}
......
......@@ -166,7 +166,8 @@ struct cpu_im2col
const std::size_t& stride_h = op.stride[0];
const std::size_t& stride_w = op.stride[1];
int kdiv2_h, kdiv2_w;
int kdiv2_h;
int kdiv2_w;
kdiv2_h = kernel_h / 2;
kdiv2_w = kernel_w / 2;
// calculate output sizes
......
......@@ -18,7 +18,8 @@ argument miopen_abs::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -22,7 +22,8 @@ argument miopen_batch_norm_inference::compute(context& ctx,
auto y_desc = make_tensor(output_shape);
auto bn_desc = make_tensor(args[3].get_shape());
float alpha = 1.0, beta = 0.0f;
float alpha = 1.0;
float beta = 0.0f;
miopenBatchNormalizationForwardInference(ctx.get_stream().get_miopen(),
miopenBatchNormMode_t(op.bn_mode),
......
......@@ -21,7 +21,8 @@ argument miopen_convolution::compute(context& ctx,
auto w_desc = make_tensor(args[1].get_shape());
auto y_desc = make_tensor(output_shape);
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha,
x_desc.get(),
......
......@@ -18,7 +18,8 @@ argument miopen_elu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -265,7 +265,8 @@ struct miopen_conv_bias
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
auto fargs = make_fused_args();
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
return f.execute(ctx, fargs, args[0], args[4]);
......@@ -308,7 +309,8 @@ struct miopen_conv_bias_relu
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
auto fargs = make_fused_args();
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
......
......@@ -16,7 +16,8 @@ std::string hip_error(int error) { return hipGetErrorString(static_cast<hipError
std::size_t get_available_gpu_memory()
{
size_t free, total;
size_t free;
size_t total;
auto status = hipMemGetInfo(&free, &total);
if(status != hipSuccess)
MIGRAPHX_THROW("Failed getting available memory: " + hip_error(status));
......@@ -72,13 +73,13 @@ argument allocate_gpu(const shape& s, bool host)
return {s, [p]() mutable { return reinterpret_cast<char*>(p.get()); }};
}
argument to_gpu(argument arg, bool host)
argument to_gpu(const argument& arg, bool host)
{
auto p = share(write_to_gpu(arg.data(), arg.get_shape().bytes(), host));
return {arg.get_shape(), [p]() mutable { return reinterpret_cast<char*>(p.get()); }};
}
argument from_gpu(argument arg)
argument from_gpu(const argument& arg)
{
argument result;
arg.visit([&](auto x) {
......@@ -98,7 +99,7 @@ void set_device(std::size_t id)
void gpu_sync() { hipDeviceSynchronize(); }
void copy_to_gpu(argument src, argument dst)
void copy_to_gpu(const argument& src, const argument& dst)
{
std::size_t src_size = src.get_shape().bytes();
std::size_t dst_size = dst.get_shape().bytes();
......
......@@ -9,17 +9,17 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
migraphx::argument allocate_gpu(const migraphx::shape& s, bool host = false);
argument allocate_gpu(const shape& s, bool host = false);
migraphx::argument to_gpu(migraphx::argument arg, bool host = false);
argument to_gpu(const argument& arg, bool host = false);
migraphx::argument from_gpu(migraphx::argument arg);
argument from_gpu(const argument& arg);
void set_device(std::size_t id);
void gpu_sync();
void copy_to_gpu(argument src, argument dst);
void copy_to_gpu(const argument& src, const argument& dst);
struct hip_allocate
{
......
......@@ -18,7 +18,8 @@ argument miopen_leaky_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -20,7 +20,8 @@ argument miopen_pooling::compute(context& ctx,
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenPoolingForward(ctx.get_stream().get_miopen(),
pd.get(),
......
......@@ -18,7 +18,8 @@ argument miopen_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment