Commit e18cd249 authored by aska-0096's avatar aska-0096
Browse files

example gemm: multiple instances support

time_kernel: increase from 1,10 to 50,200
parent e1fa0091
...@@ -5,17 +5,20 @@ ...@@ -5,17 +5,20 @@
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp" #include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_waveletmodel_cshuffle.hpp"
using ADataType = ck::half_t; using ADataType = ck::half_t;
using BDataType = ck::half_t; using BDataType = ck::half_t;
using AccDataType = float; using AccDataType = float;
using CShuffleDataType = float; using CShuffleDataType = ck::half_t;
using CDataType = ck::half_t; using CDataType = ck::half_t;
using F16 = ck::half_t; using F16 = ck::half_t;
using F32 = float;
using ALayout = Row; using ALayout = Row;
using BLayout = Col; using BLayout = Row;
using CLayout = Row; using CLayout = Row;
using AElementOp = PassThrough; using AElementOp = PassThrough;
...@@ -25,24 +28,228 @@ using CElementOp = PassThrough; ...@@ -25,24 +28,228 @@ using CElementOp = PassThrough;
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
// clang-format off // clang-format off
using DeviceGemmInstance0 = ck::tensor_operation::device::DeviceGemmXdl using DeviceGemmFactory =
// ######| AData| BData| CData| AccData| ALayout| BLayout| CLayout| A| B| C| GEMM| Block| MPer| NPer| K0Per| K1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CThreadTransfer| CThreadTransfer| std::tuple<
// ######| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise|Spacialization| Size| Block| Block| Block| | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| SrcDstVectorDim| DstScalar| #if 0
// ######| | | | | | | | Operation| Operation| Operation| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | | PerVector| ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Row, Row, Row,
< ADataType, BDataType, CDataType, AccDataType, ALayout, BLayout, CLayout, AElementOp, BElementOp, CElementOp, GemmDefault, 256, 256, 128, 4, 8, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, 7, 1>; F16, F16, F16, F32, F16,
// // clang-format on PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 256,
32, 8, 8,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 8, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 256,
32, 8, 2,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 2, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Col, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 256,
32, 8, 8,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Col, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
2, 256,
256, 256,
32, 8, 8,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 256,
32, 8, 8,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 8, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v2>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 256,
32, 8, 2,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 2, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v2>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Col, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 256,
32, 8, 8,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v2>,
#endif
#if 0
// interwave best:
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 128,
32, 8, 2,
32, 32,
4, 2,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 2, 0,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Interwave, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 128,
32, 8, 2,
32, 32,
4, 2,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 2, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Interwave, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 128,
32, 8, 4,
32, 32,
4, 2,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 4, 0,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Interwave, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 128,
32, 8, 4,
32, 32,
4, 2,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 4, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Interwave, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_WaveletModel_CShuffle<
ALayout, BLayout, CLayout,
ADataType, BDataType, AccDataType, F16, CDataType,
AElementOp, BElementOp, CElementOp, GemmDefault,
1, 256, 256, 256, 128,
32, 8, 2,
32, 32,
4, 2,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 2, 0,
1, 1, S<1, 32, 1, 8>, 8>,
#endif
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
2, 256,
256, 256,
32, 8, 4,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 4, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row,
F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256,
256, 256,
32, 8, 4,
32, 32,
4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1,
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 4, 1,
1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1>
>;
// clang-format off
using DeviceGemmInstance1 = ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle
// ######| ALayout| BLayout| CLayout| AData| BData| CData| AccData| CShuffle| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
// ######| | | | Type| Type| Type| Type| DataType| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
// ######| | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
< ALayout, BLayout, CLayout, ADataType, BDataType, CDataType, AccDataType, CShuffleDataType, AElementOp, BElementOp, CElementOp, GemmDefault, 1, 256, 256, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>;
// clang-format on // clang-format on
using DeviceGemmInstance = DeviceGemmInstance1; // using DeviceGemmInstance = DeviceGemmFactory;
using ReferenceGemmInstance = ck::tensor_operation::host:: using ReferenceGemmInstance = ck::tensor_operation::host::
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>; ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
......
...@@ -33,10 +33,14 @@ float launch_and_time_kernel(const StreamConfig& stream_config, ...@@ -33,10 +33,14 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
printf("Warm up 1 time\n"); printf("Warm up 1 time\n");
#endif #endif
// warm up // warm up
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...); const int warmup_repeat = 50;
hip_check_error(hipGetLastError()); const int profile_repeat = 200;
for(int i = 0; i < warmup_repeat; ++i)
const int nrepeat = 10; {
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...);
hip_check_error(hipGetLastError());
}
#if DEBUG_LOG #if DEBUG_LOG
printf("Start running %d times...\n", nrepeat); printf("Start running %d times...\n", nrepeat);
#endif #endif
...@@ -48,7 +52,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config, ...@@ -48,7 +52,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
hip_check_error(hipDeviceSynchronize()); hip_check_error(hipDeviceSynchronize());
hip_check_error(hipEventRecord(start, stream_config.stream_id_)); hip_check_error(hipEventRecord(start, stream_config.stream_id_));
for(int i = 0; i < nrepeat; ++i) for(int i = 0; i < profile_repeat; ++i)
{ {
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...); kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...);
hip_check_error(hipGetLastError()); hip_check_error(hipGetLastError());
...@@ -61,7 +65,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config, ...@@ -61,7 +65,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
hip_check_error(hipEventElapsedTime(&total_time, start, stop)); hip_check_error(hipEventElapsedTime(&total_time, start, stop));
return total_time / nrepeat; return total_time / profile_repeat;
} }
else else
{ {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment