"examples/dreambooth/train_dreambooth_flax.py" did not exist on "4bf675f4652759b42280103cb84ab0101cf23382"
Unverified Commit 180e5720 authored by Illia Silin's avatar Illia Silin Committed by GitHub
Browse files

Fixing most of the cppcheck errors. (#1142)

* fix cppcheck errors, first pass

* fix format

* fix returned value in examples

* add macro definitions for cppcheck

* fix the profile_gemm logic

* update the gemm profiler logic

* add more difinitions to cppcheck, fix couple more errors

* replace runtime error with message in device function

* fix a couple of int4 issues

* no return for fill function

* fix errors in data_types.hpp

* fix format

* fix few remaining errors

* fix errors in data_types.hpp

* fix last couple of errors in datat_types.hpp
parent 6169fbbd
......@@ -304,7 +304,7 @@ def buildHipClangJob(Map conf=[:]){
gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel') {
withDockerContainer(image: image, args: dockerOpts + ' -v=/var/jenkins/:/var/jenkins') {
timeout(time: 20, unit: 'HOURS')
timeout(time: 48, unit: 'HOURS')
{
cmake_build(conf)
}
......@@ -755,7 +755,11 @@ pipeline {
-o -iname \'*.cl\' \
| grep -v 'build/' \
| xargs -n 1 -P 1 -I{} -t sh -c \'clang-format-12 -style=file {} | diff - {}\' && \
/cppcheck/build/bin/cppcheck ../* -v -j \$(nproc) -I ../include -I ../profiler/include -I ../library/include --file-filter=*.cpp --enable=all --output-file=ck_cppcheck.log"
/cppcheck/build/bin/cppcheck ../* -v -j \$(nproc) -I ../include -I ../profiler/include -I ../library/include \
-D CK_ENABLE_FP64 -D CK_ENABLE_FP32 -D CK_ENABLE_FP16 -D CK_ENABLE_FP8 -D CK_ENABLE_BF16 -D CK_ENABLE_BF8 -D CK_ENABLE_INT8 -D DL_KERNELS \
-D __gfx908__ -D __gfx90a__ -D __gfx940__ -D __gfx941__ -D __gfx942__ -D __gfx1030__ -D __gfx1100__ -D __gfx1101__ -D __gfx1102__ \
-U __gfx803__ -U __gfx900__ -U __gfx906__ -U CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 \
--file-filter=*.cpp --force --enable=all --output-file=ck_cppcheck.log"
}
steps{
buildHipClangJobAndReboot(setup_cmd: "", build_cmd: "", execute_cmd: execute_cmd, no_reboot:true)
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#error Should compile this file with ck::int4_t support
#endif
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#include "common.hpp"
......@@ -43,3 +41,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
#include "run_gemm_example.inc"
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
#endif
\ No newline at end of file
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#error Should compile this file with ck::int4_t support
#endif
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#include "common.hpp"
......@@ -44,3 +42,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
#include "run_gemm_example.inc"
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
#endif
\ No newline at end of file
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#error Should compile this file with ck::int4_t support
#endif
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#include "common.hpp"
......@@ -58,3 +56,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataTyp
#include "run_gemm_add_add_fastgelu_example.inc"
int main(int argc, char* argv[]) { return !run_gemm_add_add_fastgelu_example(argc, argv); }
#endif
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#error Should compile this file with ck::int4_t support
#endif
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#define BUILD_INT4_EXAMPLE
......@@ -24,3 +22,4 @@ using RsDataType = ck::Tuple<R0DataType>;
#include "run_convnd_fwd_max_example.inc"
int main(int argc, char* argv[]) { return !run_convnd_fwd_max_example(argc, argv); }
#endif
......@@ -274,13 +274,12 @@ int main(int argc, char* argv[])
{
auto reduce0_acc = reduce0_op.GetIdentityValue<ReduceAccDataType>();
auto reduce1_acc = reduce1_op.GetIdentityValue<ReduceAccDataType>();
ReduceAccDataType d0_val = 0;
ReduceAccDataType d1_val = 0;
for(int n = 0; n < N; ++n)
{
auto c_val =
ck::type_convert<ReduceAccDataType>(c_g_m_n_host_result(batch, m, n));
ReduceAccDataType d0_val;
ReduceAccDataType d1_val;
UnaryIdenticElementOp{}(d0_val, c_val);
UnarySquareElementOp{}(d1_val, c_val);
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#error Should compile this file with ck::int4_t support
#endif
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#include "common.hpp"
......@@ -29,3 +27,4 @@ using OutElementOp = ck::tensor_operation::element_wise::AddReluAdd;
#include "run_grouped_conv_fwd_bias_relu_add_example.inc"
int main(int argc, char* argv[]) { return !run_grouped_conv_fwd_bias_relu_add_example(argc, argv); }
#endif
......@@ -9,9 +9,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
Gemm1
*/
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#error Should compile this file with ck::int4_t support
#endif
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#include <iostream>
#include <numeric>
......@@ -144,3 +142,4 @@ static_assert(sizeof(ck::int4_t) == sizeof(int8_t));
#endif
int main(int argc, char* argv[]) { return run_batched_gemm_gemm_example(argc, argv) ? 0 : 1; }
#endif
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#ifndef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#error Should compile this file with ck::int4_t support
#endif
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#include <cstdlib>
#include <iostream>
......@@ -120,3 +118,4 @@ static_assert(sizeof(ck::int4_t) == sizeof(int8_t));
#endif
int main(int argc, char* argv[]) { return run_grouped_conv_conv_fwd_example(argc, argv) ? 0 : 1; }
#endif
......@@ -32,6 +32,8 @@ std::vector<ck::index_t> f_tensor_strides_ncdhw(ck::index_t N_,
return {C_ * D * H * W, D * H * W, H * W, W, 1_uz};
else if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NDHWC>::value)
return {D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_};
throw std::runtime_error("Pool3d_fwd: problem with layout. ");
return {0, 0, 0, 0, 0};
};
template <typename TensorLayout>
......@@ -53,6 +55,8 @@ HostTensorDescriptor f_host_tensor_descriptor(std::size_t N_,
return HostTensorDescriptor({N_, C_, D, H, W},
{D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_});
}
throw std::runtime_error("Pool3d_fwd: problem with layout. ");
return HostTensorDescriptor({0, 0, 0, 0, 0}, {0, 0, 0, 0, 0});
};
template <typename DevicePoolFwdInstance,
......
......@@ -26,6 +26,8 @@ std::vector<ck::index_t> f_tensor_strides_ncdhw(ck::index_t N_,
return {C_ * D * H * W, D * H * W, H * W, W, 1_uz};
else if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NDHWC>::value)
return {D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_};
throw std::runtime_error("Avgpool3d_bwd: problem with layout. ");
return {0, 0, 0, 0, 0};
};
template <typename TensorLayout>
......@@ -47,6 +49,8 @@ HostTensorDescriptor f_host_tensor_descriptor(std::size_t N_,
return HostTensorDescriptor({N_, C_, D, H, W},
{D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_});
}
throw std::runtime_error("Avgpool3d_bwd: problem with layout. ");
return HostTensorDescriptor({0, 0, 0, 0, 0}, {0, 0, 0, 0, 0});
};
template <typename DevicePoolBwdInstance,
......
......@@ -189,6 +189,7 @@ struct vector_type<T, 1>
}
};
int static err = 0;
template <typename T>
struct vector_type<T, 2>
{
......@@ -221,6 +222,10 @@ struct vector_type<T, 2>
{
return data_.d2x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -236,6 +241,10 @@ struct vector_type<T, 2>
{
return data_.d2x1_;
}
else
{
return err;
}
}
};
......@@ -278,6 +287,10 @@ struct vector_type<T, 4>
{
return data_.d4x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -298,6 +311,10 @@ struct vector_type<T, 4>
{
return data_.d4x1_;
}
else
{
return err;
}
}
};
......@@ -347,6 +364,10 @@ struct vector_type<T, 8>
{
return data_.d8x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -372,6 +393,10 @@ struct vector_type<T, 8>
{
return data_.d8x1_;
}
else
{
return err;
}
}
};
......@@ -428,6 +453,10 @@ struct vector_type<T, 16>
{
return data_.d16x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -458,6 +487,10 @@ struct vector_type<T, 16>
{
return data_.d16x1_;
}
else
{
return err;
}
}
};
......@@ -520,6 +553,10 @@ struct vector_type<T, 32>
{
return data_.d32x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -554,6 +591,10 @@ struct vector_type<T, 32>
{
return data_.d32x1_;
}
else
{
return err;
}
}
};
......@@ -623,6 +664,10 @@ struct vector_type<T, 64>
{
return data_.d64x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -662,6 +707,10 @@ struct vector_type<T, 64>
{
return data_.d64x1_;
}
else
{
return err;
}
}
};
......@@ -737,6 +786,10 @@ struct vector_type<T, 128>
{
return data_.d128x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -780,6 +833,10 @@ struct vector_type<T, 128>
{
return data_.d128x1_;
}
else
{
return err;
}
}
};
......@@ -861,6 +918,10 @@ struct vector_type<T, 256>
{
return data_.d256x1_;
}
else
{
return err;
}
}
template <typename X>
......@@ -908,6 +969,10 @@ struct vector_type<T, 256>
{
return data_.d256x1_;
}
else
{
return err;
}
}
};
......
......@@ -265,6 +265,8 @@ struct ReferenceColumnToImage : public device::BaseOperator
return 0;
}
throw std::runtime_error("Col2Img: number of dimensions should be between 1 and 3.");
return 1;
}
float Run(const device::BaseArgument* p_arg,
......
......@@ -313,6 +313,9 @@ struct ReferenceConvBwdData : public device::BaseOperator
return 0;
}
throw std::runtime_error(
"Conv_bwd_data: number of dimensions must be between 1 and 3.");
return 1;
}
float Run(const device::BaseArgument* p_arg,
......
......@@ -265,6 +265,8 @@ struct ReferenceConvBwdWeight : public device::BaseOperator
return 0;
}
throw std::runtime_error("Conv_bwd: number of dimensions must be between 1 and 3.");
return 1;
}
float Run(const device::BaseArgument* p_arg,
......
......@@ -360,6 +360,8 @@ struct ReferenceConvFwd : public device::BaseOperator
return 0;
}
throw std::runtime_error("Conv_fwd: number of dimensions must be between 1 and 3.");
return 1;
}
float Run(const device::BaseArgument* p_arg,
......
......@@ -63,12 +63,11 @@ struct ReferenceGemm : public device::BaseOperator
const int K = arg.a_m_k_.mDesc.GetLengths()[1];
AccDataType v_acc = 0;
ComputeTypeA v_a = 0;
ComputeTypeB v_b = 0;
for(int k = 0; k < K; ++k)
{
ComputeTypeA v_a;
ComputeTypeB v_b;
// use PassThrough instead of ConvertBF16RTN for reference calculation
if constexpr(is_same_v<AElementwiseOperation,
ck::tensor_operation::element_wise::ConvertBF16RTN>)
......@@ -94,7 +93,7 @@ struct ReferenceGemm : public device::BaseOperator
ck::type_convert<AccDataType>(v_a) * ck::type_convert<AccDataType>(v_b);
}
CDataType v_c;
CDataType v_c = 0;
arg.c_element_op_(v_c, v_acc);
......
......@@ -230,6 +230,8 @@ struct ReferenceImageToColumn : public device::BaseOperator
return 0;
}
throw std::runtime_error("Img2Col: number of dimensions should be between 1 and 3.");
return 1;
}
float Run(const device::BaseArgument* p_arg,
......
......@@ -106,9 +106,8 @@ struct DeviceOperationInstanceFactory<
return op_ptrs;
}
};
#endif
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
#endif
......@@ -114,9 +114,8 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGemmSt
return op_ptrs;
}
};
#endif
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment