Commit cec9e840 authored by ltqin's avatar ltqin
Browse files

change name form c_thread_buffer to in_thread_buffer

parent c0252636
...@@ -51,7 +51,7 @@ using DeviceGemmInstance0 = ck::tensor_operation::device::DeviceGemmXdl ...@@ -51,7 +51,7 @@ using DeviceGemmInstance0 = ck::tensor_operation::device::DeviceGemmXdl
//######| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise|Spacialization| Size| Block| Block| Block| | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| SrcDstVectorDim| DstScalar| //######| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise|Spacialization| Size| Block| Block| Block| | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| SrcDstVectorDim| DstScalar|
//######| | | | | | | | Operation| Operation| Operation| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | | PerVector| //######| | | | | | | | Operation| Operation| Operation| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | | PerVector|
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | //######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
< ADataType, BDataType, CDataType, AccDataType, ALayout, BLayout, CLayout, AElementOp, BElementOp, CElementOp, GemmDefault, 64, 32, 32, 4, 8, 32, 32, 1, 1, S<2, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<2, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, 7, 1>; < ADataType, BDataType, CDataType, AccDataType, ALayout, BLayout, CLayout, AElementOp, BElementOp, CElementOp, GemmDefault, 64, 16, 16, 4, 8, 16, 16, 1, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, 7, 1>;
// clang-format on // clang-format on
using DeviceGemmInstance1 = ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle using DeviceGemmInstance1 = ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle
...@@ -92,8 +92,8 @@ int main(int argc, char* argv[]) ...@@ -92,8 +92,8 @@ int main(int argc, char* argv[])
bool time_kernel = false; bool time_kernel = false;
// GEMM shape // GEMM shape
ck::index_t M = 32; ck::index_t M = 16;
ck::index_t N = 32; ck::index_t N = 16;
ck::index_t K = 64; ck::index_t K = 64;
ck::index_t StrideA = K; ck::index_t StrideA = K;
......
...@@ -28,7 +28,7 @@ struct BlockwiseSoftmax_V1 ...@@ -28,7 +28,7 @@ struct BlockwiseSoftmax_V1
static constexpr index_t MThreadSliceSize = 1; static constexpr index_t MThreadSliceSize = 1;
static constexpr index_t WaveSize = 64; static constexpr index_t WaveSize = 64;
constexpr static auto c_thread_desc = make_naive_tensor_descriptor_packed( constexpr static auto in_thread_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, Number<RegSizePerXdlops>{})); make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, Number<RegSizePerXdlops>{}));
using ThreadReduceSrcDesc_M_K = decltype( using ThreadReduceSrcDesc_M_K = decltype(
...@@ -72,10 +72,10 @@ struct BlockwiseSoftmax_V1 ...@@ -72,10 +72,10 @@ struct BlockwiseSoftmax_V1
false, // ignored false, // ignored
detail::AccumulateWithNanIgnore<reduce::Add, AccDataType>>; detail::AccumulateWithNanIgnore<reduce::Add, AccDataType>>;
template <typename CThreadBuffer> template <typename CThreadBuffer>
__host__ __device__ static void Run(CThreadBuffer& c_thread_buf, void* __restrict__ p_shared) __host__ __device__ static void Run(CThreadBuffer& in_thread_buf, void* __restrict__ p_shared)
{ {
// printf("c_thread_desc: {%d, %d, %d}", c_thread_desc.GetLength(I0).value, // printf("in_thread_desc: {%d, %d, %d}", in_thread_desc.GetLength(I0).value,
// c_thread_desc.GetLength(I1).value, c_thread_desc.GetLength(I2).value); // in_thread_desc.GetLength(I1).value, in_thread_desc.GetLength(I2).value);
auto reduce_work_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>( auto reduce_work_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
static_cast<AccDataType*>(p_shared), BlockSize); static_cast<AccDataType*>(p_shared), BlockSize);
...@@ -89,8 +89,8 @@ struct BlockwiseSoftmax_V1 ...@@ -89,8 +89,8 @@ struct BlockwiseSoftmax_V1
// max value for one thread // max value for one thread
static_for<0, NRepeat, 1>{}([&](auto n) { static_for<0, NRepeat, 1>{}([&](auto n) {
constexpr index_t c_offset = c_thread_desc.CalculateOffset(make_tuple(0, n, 0)); constexpr index_t in_offset = in_thread_desc.CalculateOffset(make_tuple(0, n, 0));
auto& xdlops_out = c_thread_buf.GetVectorTypeReference(Number<c_offset>{}); auto& xdlops_out = in_thread_buf.GetVectorTypeReference(Number<in_offset>{});
ThreadwiseMaxReduce::Reduce(xdlops_out.template AsType<float>(), max_value_buf); ThreadwiseMaxReduce::Reduce(xdlops_out.template AsType<float>(), max_value_buf);
}); });
...@@ -115,8 +115,8 @@ struct BlockwiseSoftmax_V1 ...@@ -115,8 +115,8 @@ struct BlockwiseSoftmax_V1
}); });
// calculate exp for elements // calculate exp for elements
static_for<0, NRepeat, 1>{}([&](auto n) { static_for<0, NRepeat, 1>{}([&](auto n) {
constexpr index_t c_offset = c_thread_desc.CalculateOffset(make_tuple(0, n, 0)); constexpr index_t in_offset = in_thread_desc.CalculateOffset(make_tuple(0, n, 0));
auto& xdlops_out = c_thread_buf.GetVectorTypeReference(Number<c_offset>{}); auto& xdlops_out = in_thread_buf.GetVectorTypeReference(Number<in_offset>{});
static_for<0, RegSizePerXdlops, 1>{}([&](auto iK) { static_for<0, RegSizePerXdlops, 1>{}([&](auto iK) {
xdlops_out.template AsType<float>()(iK) = xdlops_out.template AsType<float>()(iK) =
...@@ -125,8 +125,8 @@ struct BlockwiseSoftmax_V1 ...@@ -125,8 +125,8 @@ struct BlockwiseSoftmax_V1
}); });
// sum data // sum data
static_for<0, NRepeat, 1>{}([&](auto n) { static_for<0, NRepeat, 1>{}([&](auto n) {
constexpr index_t c_offset = c_thread_desc.CalculateOffset(make_tuple(0, n, 0)); constexpr index_t in_offset = in_thread_desc.CalculateOffset(make_tuple(0, n, 0));
auto& xdlops_out = c_thread_buf.GetVectorTypeReference(Number<c_offset>{}); auto& xdlops_out = in_thread_buf.GetVectorTypeReference(Number<in_offset>{});
ThreadwiseSumReduce::Reduce(xdlops_out.template AsType<float>(), accu_value_buf); ThreadwiseSumReduce::Reduce(xdlops_out.template AsType<float>(), accu_value_buf);
block_sync_lds(); block_sync_lds();
}); });
...@@ -135,10 +135,10 @@ struct BlockwiseSoftmax_V1 ...@@ -135,10 +135,10 @@ struct BlockwiseSoftmax_V1
// change elements // change elements
static_for<0, NRepeat, 1>{}([&](auto n) { static_for<0, NRepeat, 1>{}([&](auto n) {
constexpr index_t c_offset = c_thread_desc.CalculateOffset(make_tuple(0, n, 0)); constexpr index_t in_offset = in_thread_desc.CalculateOffset(make_tuple(0, n, 0));
auto& xdlops_out = c_thread_buf.GetVectorTypeReference(Number<c_offset>{}); auto& xdlops_out = in_thread_buf.GetVectorTypeReference(Number<in_offset>{});
static_for<0, c_thread_desc.GetLength(I2), 1>{}([&](auto iK) { static_for<0, in_thread_desc.GetLength(I2), 1>{}([&](auto iK) {
xdlops_out.template AsType<float>()(iK) = xdlops_out.template AsType<float>()(iK) =
xdlops_out.template AsType<float>()[iK] / accu_value_buf(I0); xdlops_out.template AsType<float>()[iK] / accu_value_buf(I0);
}); });
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment