Commit 57f2d3c3 authored by myamlak's avatar myamlak
Browse files

Review remarks

parent c82abef1
......@@ -131,7 +131,7 @@ int main(int argc, char* argv[])
std::size_t flop = 0, num_btype = 0;
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
a_tensors.push_back(Tensor<ADataType>(f_host_tensor_descriptor(
gemm_shapes[i].M, gemm_shapes[i].K, gemm_shapes[i].StrideA, ALayout{})));
......@@ -168,7 +168,7 @@ int main(int argc, char* argv[])
}
}
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
a_tensors_device.emplace_back(
std::make_unique<DeviceMem>(sizeof(ADataType) * a_tensors[i].mDesc.GetElementSpace()));
......@@ -213,7 +213,7 @@ int main(int argc, char* argv[])
if(do_verification)
{
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
c_tensors_device[i]->FromDevice(c_device_tensors[i].mData.data());
auto ref_gemm = ReferenceGemmInstance{};
......
......@@ -698,7 +698,7 @@ struct DeviceConv2dBwdDataXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K
}
// Gridwise GEMM size
for(int i = 0; i < ck::type_convert<int>(arg.a_grid_desc_k0_m_k1_container_.size()); i++)
for(std::size_t i = 0; i < arg.a_grid_desc_k0_m_k1_container_.size(); i++)
{
if(!GridwiseGemm::CheckValidity(arg.a_grid_desc_k0_m_k1_container_[i],
arg.b_grid_desc_k0_n_k1_container_[i],
......
......@@ -1413,7 +1413,7 @@ struct DeviceConvndBwdDataXdl_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho
}
// Gridwise GEMM size
for(int i = 0; i < ck::type_convert<int>(arg.a_grid_desc_k0_m_k1_container_.size()); i++)
for(std::size_t i = 0; i < arg.a_grid_desc_k0_m_k1_container_.size(); i++)
{
if(!GridwiseGemm::CheckValidity(arg.a_grid_desc_k0_m_k1_container_[i],
arg.b_grid_desc_k0_n_k1_container_[i],
......
......@@ -301,7 +301,7 @@ struct DeviceGroupedGemmXdl
gemm_desc_kernel_arg_.reserve(group_count_);
for(index_t i = 0; i < ck::type_convert<index_t>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
const index_t M = gemm_shapes[i].M;
const index_t N = gemm_shapes[i].N;
......
......@@ -316,7 +316,7 @@ float check_error(const Tensor<T>& ref, const Tensor<T>& result)
constexpr float eps = 1e-10;
for(int i = 0; i < ck::type_convert<int>(ref.mData.size()); ++i)
for(std::size_t i = 0; i < ref.mData.size(); ++i)
{
float ref_v = ck::type_convert<float>(ref.mData[i]);
float result_v = ck::type_convert<float>(result.mData[i]);
......
......@@ -25,7 +25,7 @@ std::size_t HostTensorDescriptor::GetElementSize() const
std::size_t HostTensorDescriptor::GetElementSpace() const
{
std::size_t space = 1;
for(int i = 0; i < ck::type_convert<int>(mLens.size()); ++i)
for(std::size_t i = 0; i < mLens.size(); ++i)
{
space += (mLens[i] - 1) * mStrides[i];
}
......@@ -68,7 +68,7 @@ void ostream_HostTensorDescriptor(const HostTensorDescriptor& desc, std::ostream
// FIXME: remove
void bf16_to_f32_(const Tensor<ck::bhalf_t>& src, Tensor<float>& dst)
{
for(int i = 0; i < ck::type_convert<int>(src.mData.size()); ++i)
for(std::size_t i = 0; i < src.mData.size(); ++i)
dst.mData[i] = ck::type_convert<float>(src.mData[i]);
}
#endif
......@@ -222,7 +222,7 @@ static bool check_out(const Tensor<T>& ref, const Tensor<T>& result)
{
float max_diff = 1e-6;
for(int i = 0; i < ck::type_convert<int>(ref.mData.size()); ++i)
for(std::size_t i = 0; i < ref.mData.size(); ++i)
{
float diff = std::abs(double(ref.mData[i]) - double(result.mData[i]));
if(max_diff < diff)
......
......@@ -50,12 +50,12 @@ void profile_grouped_gemm_impl(int do_verification,
int init_method,
bool do_log,
int nrepeat,
std::vector<int> Ms,
std::vector<int> Ns,
std::vector<int> Ks,
std::vector<int> StrideAs,
std::vector<int> StrideBs,
std::vector<int> StrideCs)
const std::vector<int>& Ms,
const std::vector<int>& Ns,
const std::vector<int>& Ks,
const std::vector<int>& StrideAs,
const std::vector<int>& StrideBs,
const std::vector<int>& StrideCs)
{
auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
......@@ -83,7 +83,7 @@ void profile_grouped_gemm_impl(int do_verification,
std::vector<Tensor<BDataType>> b_k_n;
std::vector<Tensor<CDataType>> c_m_n_device_results;
for(int i = 0; i < ck::type_convert<int>(Ms.size()); i++)
for(std::size_t i = 0; i < Ms.size(); i++)
{
a_m_k.push_back(
Tensor<ADataType>(f_host_tensor_descriptor(Ms[i], Ks[i], StrideAs[i], ALayout{})));
......@@ -144,7 +144,7 @@ void profile_grouped_gemm_impl(int do_verification,
gemm_shapes.reserve(group_count);
for(int i = 0; i < ck::type_convert<int>(group_count); i++)
for(std::size_t i = 0; i < group_count; i++)
{
a_device_buf.emplace_back(
std::make_unique<DeviceMem>(sizeof(ADataType) * a_m_k[i].mDesc.GetElementSpace()));
......@@ -234,7 +234,7 @@ void profile_grouped_gemm_impl(int do_verification,
float ave_time = invoker_ptr->Run(argument_ptr.get(), nrepeat);
std::size_t flop = 0, num_btype = 0;
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
flop += std::size_t(2) * Ms[i] * Ns[i] * Ks[i];
......@@ -258,7 +258,7 @@ void profile_grouped_gemm_impl(int do_verification,
if(do_verification)
{
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
c_device_buf[i]->FromDevice(c_m_n_device_results[i].mData.data());
......
......@@ -45,7 +45,7 @@ static bool check_out(const Tensor<T>& ref, const Tensor<T>& result)
{
float max_diff = 1e-6;
for(int i = 0; i < ck::type_convert<int>(ref.mData.size()); ++i)
for(std::size_t i = 0; i < ref.mData.size(); ++i)
{
float diff = std::abs(double(ref.mData[i]) - double(result.mData[i]));
if(max_diff < diff)
......
......@@ -104,7 +104,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
b_tensors_device.reserve(group_count);
c_tensors_device.reserve(group_count);
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
a_tensors.emplace_back(Tensor<ADataType>(f_host_tensor_descriptor(
gemm_shapes[i].M, gemm_shapes[i].K, gemm_shapes[i].StrideA, ALayout{})));
......@@ -119,7 +119,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
b_tensors[i].GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
}
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
a_tensors_device.emplace_back(
std::make_unique<DeviceMem>(sizeof(ADataType) * a_tensors[i].mDesc.GetElementSize()));
......@@ -147,7 +147,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
invoker_ptr->Run(argument_ptr.get());
for(int i = 0; i < ck::type_convert<int>(gemm_shapes.size()); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
c_tensors_device[i]->FromDevice(c_device_tensors[i].mData.data());
......
......@@ -9,7 +9,7 @@ namespace reduce_util {
template <typename T>
void to_f32_vector(const Tensor<T>& src, Tensor<float>& dst)
{
for(int i = 0; i < ck::type_convert<int>(src.mData.size()); ++i)
for(std::size_t i = 0; i < src.mData.size(); ++i)
dst.mData[i] = type_convert<float>(src.mData[i]);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment