Unverified Commit 886d9eeb authored by Illia Silin's avatar Illia Silin Committed by GitHub
Browse files

Add an option to change the number of warm-up cycles and iterations. (#1124)

* allow setting the number of warmup cycles and iterations for profiler

* fix the gemm_splitk and grouped_gemm examples
parent e699dbd8
......@@ -30,7 +30,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
block_dim.y,
block_dim.z);
printf("Warm up 1 time\n");
printf("Warm up %d times\n", stream_config.cold_niters_);
#endif
// warm up
for(int i = 0; i < stream_config.cold_niters_; ++i)
......@@ -103,14 +103,17 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
block_dim.y,
block_dim.z);
printf("Warm up 1 time\n");
printf("Warm up %d times\n", stream_config.cold_niters_);
#endif
// warm up
preprocess();
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...);
hip_check_error(hipGetLastError());
for(int i = 0; i < stream_config.cold_niters_; ++i)
{
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...);
hip_check_error(hipGetLastError());
}
const int nrepeat = 10;
const int nrepeat = stream_config.nrepeat_;
#if DEBUG_LOG
printf("Start running %d times...\n", nrepeat);
#endif
......
......@@ -42,7 +42,9 @@ int profile_gemm_impl(int do_verification,
int K,
int StrideA,
int StrideB,
int StrideC)
int StrideC,
int n_warmup,
int n_iter)
{
bool pass = true;
......@@ -165,8 +167,8 @@ int profile_gemm_impl(int do_verification,
std::string op_name = op_ptr->GetTypeString();
float avg_time =
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel, 0, 10, 50});
float avg_time = invoker_ptr->Run(
argument_ptr.get(), StreamConfig{nullptr, time_kernel, 0, n_warmup, n_iter});
std::size_t flop = std::size_t(2) * M * N * K;
......
......@@ -42,7 +42,9 @@ bool profile_gemm_splitk_impl(int do_verification,
int StrideA,
int StrideB,
int StrideC,
int KBatch)
int KBatch,
int n_warmup,
int n_iter)
{
bool pass = true;
......@@ -177,7 +179,8 @@ bool profile_gemm_splitk_impl(int do_verification,
// re-init C to zero before profiling next kernel
c_device_buf.SetZero();
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
invoker_ptr->Run(argument_ptr.get(),
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
if(do_verification)
{
......@@ -200,8 +203,8 @@ bool profile_gemm_splitk_impl(int do_verification,
std::string op_name = op_ptr->GetTypeString();
float ave_time =
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
float ave_time = invoker_ptr->Run(
argument_ptr.get(), StreamConfig{nullptr, time_kernel, 0, n_warmup, n_iter});
std::size_t flop = std::size_t(2) * M * N * K;
......
......@@ -42,7 +42,9 @@ bool profile_grouped_gemm_impl(int do_verification,
const std::vector<int>& StrideAs,
const std::vector<int>& StrideBs,
const std::vector<int>& StrideCs,
int kbatch = 1)
int kbatch = 1,
int n_warmup = 1,
int n_iter = 10)
{
bool pass = true;
......@@ -261,7 +263,8 @@ bool profile_grouped_gemm_impl(int do_verification,
for(std::size_t i = 0; i < gemm_descs.size(); i++)
c_device_buf[i]->SetZero();
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
invoker_ptr->Run(argument_ptr.get(),
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
if(do_verification)
{
......@@ -307,8 +310,8 @@ bool profile_grouped_gemm_impl(int do_verification,
pass = pass && instance_pass;
}
float ave_time =
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
float ave_time = invoker_ptr->Run(
argument_ptr.get(), StreamConfig{nullptr, time_kernel, 0, n_warmup, n_iter});
if(time_kernel)
{
......
......@@ -42,12 +42,15 @@ static void print_helper_msg()
<< "arg6: print tensor value (0: no; 1: yes)\n"
<< "arg7: time kernel (0: no, 1: yes)\n"
<< "arg8 to 13: M, N, K, StrideA, StrideB, StrideC\n"
<< "optional:\n"
<< "arg14: number of warm-up cycles (default 1)\n"
<< "arg15: number of iterations (default 10)\n"
<< std::endl;
}
int profile_gemm(int argc, char* argv[])
{
if(argc != 14)
if(argc != 14 && argc != 16)
{
print_helper_msg();
exit(1);
......@@ -68,6 +71,13 @@ int profile_gemm(int argc, char* argv[])
const int StrideB = std::stoi(argv[12]);
const int StrideC = std::stoi(argv[13]);
int n_warmup = 1;
int n_iter = 10;
if(argc == 16)
{
n_warmup = std::stoi(argv[14]);
n_iter = std::stoi(argv[15]);
}
using F32 = float;
using F16 = ck::half_t;
#ifdef CK_ENABLE_BF16
......@@ -120,7 +130,9 @@ int profile_gemm(int argc, char* argv[])
K,
(StrideA < 0) ? DefaultStrideA : StrideA,
(StrideB < 0) ? DefaultStrideB : StrideB,
(StrideC < 0) ? DefaultStrideC : StrideC);
(StrideC < 0) ? DefaultStrideC : StrideC,
n_warmup,
n_iter);
return pass ? 0 : 1;
};
......
......@@ -33,7 +33,7 @@ enum struct GemmDataType
int profile_gemm_splitk(int argc, char* argv[])
{
if(argc != 15)
if(argc != 15 && argc != 17)
{
printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8; 4: f8@f16; 5: f16@f8; 6: f16, "
......@@ -48,6 +48,9 @@ int profile_gemm_splitk(int argc, char* argv[])
printf("arg7: time kernel (0=no, 1=yes)\n");
printf("arg8 to 13: M, N, K, StrideA, StrideB, StrideC\n");
printf("arg14: split k into mulitiple batch\n");
printf("optional:\n");
printf("arg15: number of warm-up cycles (default 1)\n");
printf("arg16: number of iterations (default 10)\n");
exit(1);
}
......@@ -67,6 +70,14 @@ int profile_gemm_splitk(int argc, char* argv[])
const int StrideC = std::stoi(argv[13]);
const int KBatch = std::stoi(argv[14]);
int n_warmup = 1;
int n_iter = 10;
if(argc == 17)
{
n_warmup = std::stoi(argv[15]);
n_iter = std::stoi(argv[16]);
}
using F32 = float;
using F16 = ck::half_t;
#if defined CK_ENABLE_FP8
......@@ -117,7 +128,9 @@ int profile_gemm_splitk(int argc, char* argv[])
(StrideA < 0) ? DefaultStrideA : StrideA,
(StrideB < 0) ? DefaultStrideB : StrideB,
(StrideC < 0) ? DefaultStrideC : StrideC,
KBatch);
KBatch,
n_warmup,
n_iter);
return pass ? 0 : 1;
};
......
......@@ -69,7 +69,10 @@ int profile_grouped_gemm(int argc, char* argv[])
<< "arg7: time kernel (0=n0, 1=yes)\n"
<< "arg8 to 13: Ms, Ns, Ks, StrideAs, StrideBs, StrideCs (e.g., 256,256 128,128 64,64 "
"64,64 64,64 128,128)\n"
<< "arg15: kbatch value (default 4)\n"
<< "arg15: kbatch value (default 1)\n"
<< "optional:\n"
<< "arg16: number of warm-up cycles (default 1)\n"
<< "arg17: number of iterations (default 10)\n"
<< std::endl;
exit(1);
......@@ -90,6 +93,15 @@ int profile_grouped_gemm(int argc, char* argv[])
const auto StrideBs = argToIntArray(argv[12]);
const auto StrideCs = argToIntArray(argv[13]);
const int kbatch = argc == 15 ? std::stoi(argv[14]) : 1;
int n_warmup = 1;
int n_iter = 10;
if(argc == 17)
{
n_warmup = std::stoi(argv[16]);
n_iter = std::stoi(argv[17]);
}
#ifdef CK_ENABLE_FP16
if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_KN_MN)
{
......@@ -109,7 +121,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs,
StrideBs,
StrideCs,
kbatch);
kbatch,
n_warmup,
n_iter);
}
else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_NK_MN)
{
......@@ -129,7 +143,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs,
StrideBs,
StrideCs,
kbatch);
kbatch,
n_warmup,
n_iter);
}
else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_KN_MN)
{
......@@ -149,7 +165,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs,
StrideBs,
StrideCs,
kbatch);
kbatch,
n_warmup,
n_iter);
}
else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_NK_MN)
{
......@@ -169,7 +187,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs,
StrideBs,
StrideCs,
kbatch);
kbatch,
n_warmup,
n_iter);
}
else if(data_type == GemmDataType::F8_F16_F16 && layout == GemmMatrixLayout::MK_KN_MN)
{
......@@ -189,7 +209,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs,
StrideBs,
StrideCs,
kbatch);
kbatch,
n_warmup,
n_iter);
}
else if(data_type == GemmDataType::F16_F8_F16 && layout == GemmMatrixLayout::MK_KN_MN)
{
......@@ -209,7 +231,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs,
StrideBs,
StrideCs,
kbatch);
kbatch,
n_warmup,
n_iter);
}
else
{
......
......@@ -60,7 +60,9 @@ class TestGemmSplitK : public testing::Test
const int StrideA,
const int StrideB,
const int StrideC,
int kbatch = 1)
int kbatch = 1,
int n_warmup = 1,
int n_iter = 10)
{
bool pass = ck::profiler::profile_gemm_splitk_impl<ADataType,
BDataType,
......@@ -68,8 +70,19 @@ class TestGemmSplitK : public testing::Test
CDataType,
ALayout,
BLayout,
CLayout>(
verify_, init_method_, log_, bench_, M, N, K, StrideA, StrideB, StrideC, kbatch);
CLayout>(verify_,
init_method_,
log_,
bench_,
M,
N,
K,
StrideA,
StrideB,
StrideC,
kbatch,
n_warmup,
n_iter);
EXPECT_TRUE(pass);
}
};
......
......@@ -63,7 +63,9 @@ class TestGroupedGemm : public testing::TestWithParam<int>
const std::vector<int>& StrideAs,
const std::vector<int>& StrideBs,
const std::vector<int>& StrideCs,
int kbatch = 1)
int kbatch = 1,
int n_warmup = 1,
int n_iter = 10)
{
bool pass = ck::profiler::profile_grouped_gemm_impl<ADataType,
BDataType,
......@@ -71,8 +73,19 @@ class TestGroupedGemm : public testing::TestWithParam<int>
float,
ALayout,
BLayout,
ELayout>(
verify_, init_method_, log_, bench_, Ms, Ns, Ks, StrideAs, StrideBs, StrideCs, kbatch);
ELayout>(verify_,
init_method_,
log_,
bench_,
Ms,
Ns,
Ks,
StrideAs,
StrideBs,
StrideCs,
kbatch,
n_warmup,
n_iter);
EXPECT_TRUE(pass);
}
};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment