Commit 8cc53111 authored by aska-0096's avatar aska-0096
Browse files

tempsave, failed to generate ideal code based on iglp

parent 29087570
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_waveletmodel_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_waveletmodel_cshuffle.hpp"
using ADataType = ck::half_t; using ADataType = ck::half_t;
using BDataType = ck::half_t; using BDataType = ck::half_t;
using AccDataType = float; using AccDataType = float;
...@@ -18,7 +17,7 @@ using F16 = ck::half_t; ...@@ -18,7 +17,7 @@ using F16 = ck::half_t;
using F32 = float; using F32 = float;
using ALayout = Row; using ALayout = Row;
using BLayout = Row; using BLayout = Col;
using CLayout = Row; using CLayout = Row;
using AElementOp = PassThrough; using AElementOp = PassThrough;
...@@ -213,12 +212,11 @@ using DeviceGemmFactory = ...@@ -213,12 +212,11 @@ using DeviceGemmFactory =
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 2, 0, 1, 4, 2, 0,
1, 1, S<1, 32, 1, 8>, 8>, 1, 1, S<1, 32, 1, 8>, 8>,
#endif
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle< ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row, Row, Row, Row,
F16, F16, F16, F32, F16, F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault, PassThrough, PassThrough, PassThrough, GemmDefault,
2, 256, 1, 256,
256, 256, 256, 256,
32, 8, 4, 32, 8, 4,
32, 32, 32, 32,
...@@ -228,24 +226,25 @@ using DeviceGemmFactory = ...@@ -228,24 +226,25 @@ using DeviceGemmFactory =
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>,
1, 4, 4, 1, 1, 4, 4, 1,
1, 1, S<1, 32, 1, 8>, 8, 1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1>, ck::LoopScheduler::Default, ck::PipelineVersion::v1>
#endif
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle< ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<
Row, Row, Row, Row, Col, Row,
F16, F16, F16, F32, F16, F16, F16, F16, F32, F16,
PassThrough, PassThrough, PassThrough, GemmDefault, PassThrough, PassThrough, PassThrough, GemmDefault,
1, 256, 2, 256,
256, 256, 256, 256,
32, 8, 4, 32, 8, 8,
32, 32, 32, 32,
4, 4, 4, 4,
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
2, 8, 8, 1, 2, 8, 8, 0,
S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
1, 4, 4, 1, 2, 8, 8, 0,
1, 1, S<1, 32, 1, 8>, 8, 1, 1, S<1, 32, 1, 8>, 8,
ck::LoopScheduler::Default, ck::PipelineVersion::v1> ck::LoopScheduler::Default, ck::PipelineVersion::v1>
>;
>;
// clang-format on // clang-format on
......
...@@ -7,12 +7,14 @@ ...@@ -7,12 +7,14 @@
using ADataType = ck::f8_t; using ADataType = ck::f8_t;
using BDataType = ck::f8_t; using BDataType = ck::f8_t;
using CDataType = ck::half_t; using CDataType = ck::f8_t;
using AccDataType = float; using AccDataType = float;
using CShuffleDataType = float; using CShuffleDataType = float;
using F8 = ck::f8_t;
using F32 = float;
using ALayout = Row; using ALayout = Row;
using BLayout = Col; using BLayout = Row;
using CLayout = Row; using CLayout = Row;
using AElementOp = PassThrough; using AElementOp = PassThrough;
...@@ -21,15 +23,138 @@ using CElementOp = PassThrough; ...@@ -21,15 +23,138 @@ using CElementOp = PassThrough;
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
// clang-format off using DeviceGemmFactory = std::tuple<
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle #if 1
// ######| ALayout| BLayout| CLayout| AData| BData| CData| AccData| CShuffle| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<Row,
// ######| | | | Type| Type| Type| Type| DataType| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| Row,
// ######| | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| Row,
// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | F8,
< ALayout, BLayout, CLayout, ADataType, BDataType, CDataType, AccDataType, CShuffleDataType, AElementOp, BElementOp, CElementOp, GemmDefault, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 64, 1, 4>, 8>; F8,
// clang-format on F8,
F32,
F8,
PassThrough,
PassThrough,
PassThrough,
GemmDefault,
1,
256,
256,
128,
64,
16,
4,
32,
32,
4,
2,
S<4, 64, 1>,
S<1, 0, 2>,
S<1, 0, 2>,
2,
16,
16,
1,
S<8, 32, 1>,
S<0, 2, 1>,
S<0, 2, 1>,
1,
4,
4,
0,
1,
1,
S<1, 64, 1, 4>,
16,
ck::LoopScheduler::Interwave,
ck::PipelineVersion::v1>,
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<Row,
Row,
Row,
F8,
F8,
F8,
F32,
F8,
PassThrough,
PassThrough,
PassThrough,
GemmDefault,
1,
256,
256,
128,
64,
16,
16,
32,
32,
4,
2,
S<4, 64, 1>,
S<1, 0, 2>,
S<1, 0, 2>,
2,
16,
16,
1,
S<4, 64, 1>,
S<0, 2, 1>,
S<0, 2, 1>,
1,
2,
16,
1,
1,
1,
S<1, 64, 1, 4>,
16,
ck::LoopScheduler::Interwave,
ck::PipelineVersion::v1>,
#endif
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle<Row,
Row,
Row,
F8,
F8,
F8,
F32,
F8,
PassThrough,
PassThrough,
PassThrough,
GemmDefault,
1,
256,
256,
128,
64,
16,
8,
32,
32,
4,
2,
S<4, 64, 1>,
S<1, 0, 2>,
S<1, 0, 2>,
2,
16,
16,
1,
S<8, 32, 1>,
S<0, 2, 1>,
S<0, 2, 1>,
1,
4,
8,
1,
1,
1,
S<1, 64, 1, 4>,
16,
ck::LoopScheduler::Interwave,
ck::PipelineVersion::v1>>;
using ReferenceGemmInstance = ck::tensor_operation::host:: using ReferenceGemmInstance = ck::tensor_operation::host::
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>; ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
......
...@@ -3,8 +3,6 @@ ...@@ -3,8 +3,6 @@
#pragma once #pragma once
#include "ck/tensor_operation/gpu/device/device_gemm_streamk.hpp"
template <typename ProblemType> template <typename ProblemType>
bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
{ {
...@@ -68,6 +66,26 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) ...@@ -68,6 +66,26 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k); ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k);
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n); ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n);
break; break;
case 2:
ck::utils::FillUniformDistribution<ADataType>{-5.f, 5.f}(a_m_k);
ck::utils::FillUniformDistribution<BDataType>{-5.f, 5.f}(b_k_n);
break;
case 3:
ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k);
ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n);
break;
case 4:
ck::utils::FillUniformDistribution<ADataType>{0.0f, 0.1f}(a_m_k);
ck::utils::FillUniformDistribution<BDataType>{-0.01f, 0.01f}(b_k_n);
break;
case 5:
ck::utils::FillConstant<ADataType>{static_cast<ADataType>(1.f)}(a_m_k);
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n);
break;
case 6:
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k);
ck::utils::FillConstant<BDataType>{static_cast<BDataType>(1.f)}(b_k_n);
break;
default: default:
ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k); ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k);
ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n); ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n);
...@@ -99,62 +117,24 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) ...@@ -99,62 +117,24 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
a_m_k_device_buf.ToDevice(a_m_k.mData.data()); a_m_k_device_buf.ToDevice(a_m_k.mData.data());
b_k_n_device_buf.ToDevice(b_k_n.mData.data()); b_k_n_device_buf.ToDevice(b_k_n.mData.data());
#endif #endif
DeviceMem workspace;
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
auto b_element_op = BElementOp{}; auto b_element_op = BElementOp{};
auto c_element_op = CElementOp{}; auto c_element_op = CElementOp{};
using BaseStreamK = ck::tensor_operation::device::DeviceGemmStreamK<ALayout, float best_perf = .0;
BLayout, float best_time = .0;
CLayout, std::string best_kernel = "";
ADataType,
BDataType,
CDataType,
AElementOp,
BElementOp,
CElementOp>;
// do GEMM
auto gemm = DeviceGemmInstance{};
auto invoker = gemm.MakeInvoker();
float ave_time = 0;
if constexpr(std::is_same<ProblemType, ProblemSize>::value &&
!std::is_base_of<BaseStreamK, DeviceGemmInstance>::value)
{
auto argument = gemm.MakeArgument(
#ifdef BUILD_INT4_EXAMPLE
static_cast<KernelADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
static_cast<KernelBDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
static_cast<KernelCDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
#else
static_cast<ADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
static_cast<BDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
static_cast<CDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
#endif
M,
N,
K,
StrideA,
StrideB,
StrideC,
a_element_op,
b_element_op,
c_element_op);
if(!gemm.IsSupportedArgument(argument)) ck::static_for<0, std::tuple_size_v<DeviceGemmFactory>, 1>{}([&](auto i) -> void {
{ const auto device_gemm_instance = std::get<i>(DeviceGemmFactory{});
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl;
return true; using DeviceGemmInstance = ck::remove_cvref_t<decltype(device_gemm_instance)>;
} // do GEMM
auto gemm = DeviceGemmInstance{};
auto invoker = gemm.MakeInvoker();
float ave_time = 0;
ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
}
else if constexpr(std::is_same<ProblemType, ProblemSizeStreamK>::value &&
std::is_base_of<BaseStreamK, DeviceGemmInstance>::value)
{
auto argument = gemm.MakeArgument( auto argument = gemm.MakeArgument(
#ifdef BUILD_INT4_EXAMPLE #ifdef BUILD_INT4_EXAMPLE
static_cast<KernelADataType*>(a_m_k_device_buf.GetDeviceBuffer()), static_cast<KernelADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
...@@ -173,51 +153,47 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) ...@@ -173,51 +153,47 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
StrideC, StrideC,
a_element_op, a_element_op,
b_element_op, b_element_op,
c_element_op, c_element_op);
problem_size.NumSKBlocks); #if 0
if(!gemm.IsSupportedArgument(argument)) if(!gemm.IsSupportedArgument(argument))
{ {
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl; std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl;
return true; return true;
} }
std::size_t workspace_size = gemm.GetWorkSpaceSize(&argument);
if(workspace_size != 0)
{
workspace.Realloc(workspace_size);
gemm.SetWorkSpacePointer(&argument, workspace.GetDeviceBuffer());
}
ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
#if 0
// TODO!!!!!
if(workspace_size != 0){
float * ws_ptr = reinterpret_cast<float*>(malloc(workspace_size));
size_t ws_dwords = workspace_size / sizeof(float);
workspace.FromDevice(ws_ptr);
for(size_t i = 0; i < ws_dwords; i++) {
uint32_t rere = reinterpret_cast<uint32_t*>(ws_ptr)[i];
printf("%4lu : %f(0x%08x)\n", i, ws_ptr[i], rere);
}
free(ws_ptr);
}
#endif #endif
} ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
std::size_t flop = 2_uz * M * N * K;
std::size_t num_btype =
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N;
float tflops = static_cast<float>(flop) / 1.E9 / ave_time; std::size_t flop = 2_uz * M * N * K;
std::size_t num_btype =
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N;
float gb_per_sec = num_btype / 1.E6 / ave_time; float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
if(tflops > best_perf)
{
best_perf = tflops;
best_time = ave_time;
best_kernel = gemm.GetTypeString();
}
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, " float gb_per_sec = num_btype / 1.E6 / ave_time;
<< gemm.GetTypeString() << std::endl;
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
<< " GB/s, " << gemm.GetTypeString() << std::endl;
});
std::cout << "---------------------------------------------------------------------------------"
"-----------"
<< std::endl;
std::cout << "Problem Size: M: " << M << ", N: " << N << ", K: " << K << std::endl;
std::cout << "---------------------------------------------------------------------------------"
"-----------"
<< std::endl;
std::cout << "Best kernel: " << best_kernel << " , " << best_perf << " TFlops , " << best_time
<< " ms" << std::endl;
std::cout << "---------------------------------------------------------------------------------"
"-----------"
<< std::endl;
if(config.do_verification) if(config.do_verification)
{ {
......
...@@ -44,9 +44,9 @@ int run_layernorm2d_fwd_example() ...@@ -44,9 +44,9 @@ int run_layernorm2d_fwd_example()
{0, 1}, {0, 1},
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()}, std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(), std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
save_mean.mDesc.GetStrides().end()}, save_mean.mDesc.GetStrides().end()},
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(), std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
save_mean.mDesc.GetStrides().end()}, save_mean.mDesc.GetStrides().end()},
{1}, {1},
1e-4, 1e-4,
x_dev.GetDeviceBuffer(), x_dev.GetDeviceBuffer(),
......
...@@ -65,9 +65,9 @@ int run_groupnorm_fwd_example(int argc, char* argv[]) ...@@ -65,9 +65,9 @@ int run_groupnorm_fwd_example(int argc, char* argv[])
{0, 0, 0, C, 1}, {0, 0, 0, C, 1},
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()}, std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(), std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
save_mean.mDesc.GetStrides().end()}, save_mean.mDesc.GetStrides().end()},
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(), std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
save_mean.mDesc.GetStrides().end()}, save_mean.mDesc.GetStrides().end()},
{1, 2, 4}, // reduction dimension: [H, W, C] {1, 2, 4}, // reduction dimension: [H, W, C]
1e-6, 1e-6,
x_dev.GetDeviceBuffer(), x_dev.GetDeviceBuffer(),
......
...@@ -46,9 +46,9 @@ int run_layernorm4d_fwd_example() ...@@ -46,9 +46,9 @@ int run_layernorm4d_fwd_example()
{0, W * C, C, 1}, {0, W * C, C, 1},
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()}, std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(), std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
save_mean.mDesc.GetStrides().end()}, save_mean.mDesc.GetStrides().end()},
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(), std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
save_mean.mDesc.GetStrides().end()}, save_mean.mDesc.GetStrides().end()},
{1, 2, 3}, {1, 2, 3},
1e-4, 1e-4,
x_dev.GetDeviceBuffer(), x_dev.GetDeviceBuffer(),
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#ifdef CK_USE_LAUNCH_BOUNDS #ifdef CK_USE_LAUNCH_BOUNDS
// for most kernels // for most kernels
#define CK_MAX_THREAD_PER_BLOCK 256 #define CK_MAX_THREAD_PER_BLOCK 256
#define CK_MIN_BLOCK_PER_CU 2 #define CK_MIN_BLOCK_PER_CU 1
// for wavelet GEMM kernel // for wavelet GEMM kernel
#define CK_WAVELET_MAX_THREAD_PER_BLOCK 512 #define CK_WAVELET_MAX_THREAD_PER_BLOCK 512
......
...@@ -51,7 +51,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config, ...@@ -51,7 +51,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
hip_check_error(hipDeviceSynchronize()); hip_check_error(hipDeviceSynchronize());
hip_check_error(hipEventRecord(start, stream_config.stream_id_)); hip_check_error(hipEventRecord(start, stream_config.stream_id_));
for(int i = 0; i < profile_repeat; ++i) for(int i = 0; i < nrepeat; ++i)
{ {
kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...); kernel<<<grid_dim, block_dim, lds_byte, stream_config.stream_id_>>>(args...);
hip_check_error(hipGetLastError()); hip_check_error(hipGetLastError());
...@@ -64,7 +64,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config, ...@@ -64,7 +64,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
hip_check_error(hipEventElapsedTime(&total_time, start, stop)); hip_check_error(hipEventElapsedTime(&total_time, start, stop));
return total_time / profile_repeat; return total_time / nrepeat;
} }
else else
{ {
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" #include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm.hpp" #include "ck/tensor_operation/gpu/device/device_gemm.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" #include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/grid/gridwise_gemm_xdl_cshuffle_v1.hpp" #include "ck/tensor_operation/gpu/grid/gridwise_gemm_xdl_cshuffle_v1_doublelds.hpp"
#include "ck/host_utility/device_prop.hpp" #include "ck/host_utility/device_prop.hpp"
#include "ck/host_utility/kernel_launch.hpp" #include "ck/host_utility/kernel_launch.hpp"
...@@ -155,10 +155,15 @@ struct DeviceGemm_Xdl_CShuffle : public DeviceGemm<ALayout, ...@@ -155,10 +155,15 @@ struct DeviceGemm_Xdl_CShuffle : public DeviceGemm<ALayout,
index_t gdx, gdy, gdz; index_t gdx, gdy, gdz;
std::tie(gdx, gdy, gdz) = GridwiseGemm::CalculateGridSize(arg.M, arg.N); std::tie(gdx, gdy, gdz) = GridwiseGemm::CalculateGridSize(arg.M, arg.N);
const auto K = GridwiseGemm::CalculateAK0(arg.K) * AK1;
float ave_time = 0; float ave_time = 0;
#if 1
const auto kernel = kernel_gemm_xdl_cshuffle_v1<GridwiseGemm, true>;
ave_time = launch_and_time_kernel(
stream_config, kernel, dim3(gdx, gdy, gdz), dim3(BlockSize), 0, arg);
#endif
#if 0
const auto K = GridwiseGemm::CalculateAK0(arg.K) * AK1;
if(GridwiseGemm::CalculateHasMainKBlockLoop(K)) if(GridwiseGemm::CalculateHasMainKBlockLoop(K))
{ {
const auto kernel = kernel_gemm_xdl_cshuffle_v1<GridwiseGemm, true>; const auto kernel = kernel_gemm_xdl_cshuffle_v1<GridwiseGemm, true>;
...@@ -173,7 +178,7 @@ struct DeviceGemm_Xdl_CShuffle : public DeviceGemm<ALayout, ...@@ -173,7 +178,7 @@ struct DeviceGemm_Xdl_CShuffle : public DeviceGemm<ALayout,
ave_time = launch_and_time_kernel( ave_time = launch_and_time_kernel(
stream_config, kernel, dim3(gdx, gdy, gdz), dim3(BlockSize), 0, arg); stream_config, kernel, dim3(gdx, gdy, gdz), dim3(BlockSize), 0, arg);
} }
#endif
return ave_time; return ave_time;
} }
......
#find . -name deps -prune -o -name build -prune -o -iname '*.h' -o -iname '*.hpp' -o -iname '*.cpp' -o -iname '*.h.in' -o -iname '*.hpp.in' -o -iname '*.cpp.in' -o -iname '*.cl' -o -iname '*.cuh' -o -iname '*.cu' -o -iname '*.inc' | xargs -n 1 -P 16 -I{} -t sh -c 'clang-format-12 -i -style=file {}' find . -name deps -prune -o -name build -prune -o -iname '*.h' -o -iname '*.hpp' -o -iname '*.cpp' -o -iname '*.h.in' -o -iname '*.hpp.in' -o -iname '*.cpp.in' -o -iname '*.cl' -o -iname '*.cuh' -o -iname '*.cu' -o -iname '*.inc' | xargs -n 1 -P 16 -I{} -t sh -c 'clang-format-12 -i -style=file {}'
git status --porcelain | awk '$1 != "D" && (match($2, "\\.cpp|hpp|inc")) {print $2}' | xargs -n 1 -P 16 -I{} -t sh -c 'clang-format-12 -i -style=file {}' # git status --porcelain | awk '$1 != "D" && (match($2, "\\.cpp|hpp|inc")) {print $2}' | xargs -n 1 -P 16 -I{} -t sh -c 'clang-format-12 -i -style=file {}'
...@@ -14,7 +14,6 @@ TYPED_TEST(TestTranspose, Test1) ...@@ -14,7 +14,6 @@ TYPED_TEST(TestTranspose, Test1)
this->Run(); this->Run();
} }
TYPED_TEST(TestTranpose, Test2) TYPED_TEST(TestTranpose, Test2)
{ {
std::vector<int> Ms{127, 255, 312, 799, 1573}; std::vector<int> Ms{127, 255, 312, 799, 1573};
...@@ -27,4 +26,3 @@ TYPED_TEST(TestTranpose, Test2) ...@@ -27,4 +26,3 @@ TYPED_TEST(TestTranpose, Test2)
this->Run(); this->Run();
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment