Commit 471830a0 authored by Chao Liu's avatar Chao Liu
Browse files

tidy yp

parent 1bd880a6
...@@ -381,7 +381,8 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) ...@@ -381,7 +381,8 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s)
constexpr auto I7 = Number<7>{}; constexpr auto I7 = Number<7>{};
constexpr auto I8 = Number<8>{}; constexpr auto I8 = Number<8>{};
printf("%s dim %u, lengths {%u %u %u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u %u %u}\n", printf("%s dim %u, lengths {%u %u %u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u %u "
"%u}\n",
s, s,
desc.GetDimension(), desc.GetDimension(),
desc.GetLength(I0), desc.GetLength(I0),
...@@ -416,7 +417,8 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s) ...@@ -416,7 +417,8 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s)
constexpr auto I8 = Number<8>{}; constexpr auto I8 = Number<8>{};
constexpr auto I9 = Number<9>{}; constexpr auto I9 = Number<9>{};
printf("%s dim %u, lengths {%u %u %u %u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u %u %u %u}\n", printf("%s dim %u, lengths {%u %u %u %u %u %u %u %u %u %u}, strides {%u %u %u %u %u %u %u "
"%u %u %u}\n",
s, s,
desc.GetDimension(), desc.GetDimension(),
desc.GetLength(I0), desc.GetLength(I0),
......
...@@ -577,8 +577,8 @@ struct Blockwise4dTensorCopy3 ...@@ -577,8 +577,8 @@ struct Blockwise4dTensorCopy3
iloop_d3 * thread_per_d3 * DataPerRead); iloop_d3 * thread_per_d3 * DataPerRead);
*(reinterpret_cast<vector_t*>(&p_dst[dst_offset + mDstMyThreadOffset])) = *(reinterpret_cast<vector_t*>(&p_dst[dst_offset + mDstMyThreadOffset])) =
*(reinterpret_cast<const vector_t*>(&p_src[src_offset + *(reinterpret_cast<const vector_t*>(
mSrcMyThreadOffset])); &p_src[src_offset + mSrcMyThreadOffset]));
} }
} }
} }
...@@ -612,7 +612,8 @@ struct Blockwise4dTensorCopy3 ...@@ -612,7 +612,8 @@ struct Blockwise4dTensorCopy3
return DataPerRead * nloop_d0 * nloop_d1 * nloop_d2 * nloop_d3; return DataPerRead * nloop_d0 * nloop_d1 * nloop_d2 * nloop_d3;
} }
__device__ void RunLoadRegisterClipboard(const Float* __restrict__ p_src, Float* __restrict__ p_clipboard) const __device__ void RunLoadRegisterClipboard(const Float* __restrict__ p_src,
Float* __restrict__ p_clipboard) const
{ {
constexpr auto I0 = Number<0>{}; constexpr auto I0 = Number<0>{};
constexpr auto I1 = Number<1>{}; constexpr auto I1 = Number<1>{};
...@@ -670,8 +671,8 @@ struct Blockwise4dTensorCopy3 ...@@ -670,8 +671,8 @@ struct Blockwise4dTensorCopy3
iloop_d3 * thread_per_d3 * DataPerRead); iloop_d3 * thread_per_d3 * DataPerRead);
*(reinterpret_cast<vector_t*>(&p_clipboard[dst_offset])) = *(reinterpret_cast<vector_t*>(&p_clipboard[dst_offset])) =
*(reinterpret_cast<const vector_t*>(&p_src[src_offset + *(reinterpret_cast<const vector_t*>(
mSrcMyThreadOffset])); &p_src[src_offset + mSrcMyThreadOffset]));
} }
} }
} }
......
...@@ -43,7 +43,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn ...@@ -43,7 +43,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn
Float* const __restrict__ p_out_global) const Float* const __restrict__ p_out_global) const
{ {
// be careful of this assertion // be careful of this assertion
static_assert(NPerThread <= NPerBlock && NPerBlock % NPerThread == 0, static_assert(
NPerThread <= NPerBlock && NPerBlock % NPerThread == 0,
"wrong! should satisfy: NPerThread <= NPerBlock && NPerBlock % NPerThread == 0"); "wrong! should satisfy: NPerThread <= NPerBlock && NPerBlock % NPerThread == 0");
constexpr auto I0 = Number<0>{}; constexpr auto I0 = Number<0>{};
...@@ -69,7 +70,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn ...@@ -69,7 +70,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn
constexpr index_t WiPerBlock = WoPerBlock + X - 1; constexpr index_t WiPerBlock = WoPerBlock + X - 1;
// divide block work: [K, Ho, Wo, N] // divide block work: [K, Ho, Wo, N]
static_assert(N % NPerBlock == 0 && K % KPerBlock == 0 && C % CPerBlock == 0 && Ho % HoPerBlock == 0 && Wo % WoPerBlock == 0, static_assert(N % NPerBlock == 0 && K % KPerBlock == 0 && C % CPerBlock == 0 &&
Ho % HoPerBlock == 0 && Wo % WoPerBlock == 0,
"wrong! cannot evenly divide work for workgroup "); "wrong! cannot evenly divide work for workgroup ");
constexpr index_t KBlockWork = (K + KPerBlock - 1) / KPerBlock; constexpr index_t KBlockWork = (K + KPerBlock - 1) / KPerBlock;
...@@ -101,8 +103,7 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn ...@@ -101,8 +103,7 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn
mod_conv::max(index_t(4), InBlockCopyDataPerRead, WeiBlockCopyDataPerRead); mod_conv::max(index_t(4), InBlockCopyDataPerRead, WeiBlockCopyDataPerRead);
constexpr auto in_chwn_block_desc = make_ConstantTensorDescriptor_aligned( constexpr auto in_chwn_block_desc = make_ConstantTensorDescriptor_aligned(
Sequence<CPerBlock, HiPerBlock, WiPerBlock, NPerBlock>{}, Sequence<CPerBlock, HiPerBlock, WiPerBlock, NPerBlock>{}, Number<max_align>{});
Number<max_align>{});
constexpr auto wei_ek_block_desc = make_ConstantTensorDescriptor_aligned( constexpr auto wei_ek_block_desc = make_ConstantTensorDescriptor_aligned(
Sequence<CPerBlock * Y * X, KPerBlock>{}, Number<max_align>{}); Sequence<CPerBlock * Y * X, KPerBlock>{}, Number<max_align>{});
...@@ -280,17 +281,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn ...@@ -280,17 +281,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn
constexpr index_t K2 = GemmMPerThreadSubC; constexpr index_t K2 = GemmMPerThreadSubC;
constexpr index_t K1 = KPerBlock / KPerThread; constexpr index_t K1 = KPerBlock / KPerThread;
constexpr auto out_10d_global_desc = constexpr auto out_10d_global_desc = make_ConstantTensorDescriptor(
make_ConstantTensorDescriptor(Sequence<K / (K1 * K2), Sequence<K / (K1 * K2), K1, K2, Ho, Wo / (W1 * W2), W1, W2, N / (N1 * N2), N1, N2>{});
K1,
K2,
Ho,
Wo / (W1 * W2),
W1,
W2,
N / (N1 * N2),
N1,
N2>{});
constexpr auto out_10d_thread_desc = make_ConstantTensorDescriptor( constexpr auto out_10d_thread_desc = make_ConstantTensorDescriptor(
Sequence<KPerThread / K2, 1, K2, HoPerThread, 1, W1, 1, 1, 1, N2>{}); Sequence<KPerThread / K2, 1, K2, HoPerThread, 1, W1, 1, 1, 1, N2>{});
......
...@@ -43,7 +43,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer ...@@ -43,7 +43,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer
Float* const __restrict__ p_out_global) const Float* const __restrict__ p_out_global) const
{ {
// be careful of this assertion // be careful of this assertion
static_assert(NPerThread <= NPerBlock && NPerBlock % NPerThread == 0, static_assert(
NPerThread <= NPerBlock && NPerBlock % NPerThread == 0,
"wrong! should satisfy: NPerThread <= NPerBlock && NPerBlock % NPerThread == 0"); "wrong! should satisfy: NPerThread <= NPerBlock && NPerBlock % NPerThread == 0");
constexpr auto I0 = Number<0>{}; constexpr auto I0 = Number<0>{};
...@@ -72,7 +73,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer ...@@ -72,7 +73,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer
static_assert(C % (2 * CPerBlock) == 0, "C cannot be evenly divided"); static_assert(C % (2 * CPerBlock) == 0, "C cannot be evenly divided");
// divide block work: [K, Ho, Wo, N] // divide block work: [K, Ho, Wo, N]
static_assert(N % NPerBlock == 0 && K % KPerBlock == 0 && C % CPerBlock == 0 && Ho % HoPerBlock == 0 && Wo % WoPerBlock == 0, static_assert(N % NPerBlock == 0 && K % KPerBlock == 0 && C % CPerBlock == 0 &&
Ho % HoPerBlock == 0 && Wo % WoPerBlock == 0,
"wrong! cannot evenly divide work for workgroup "); "wrong! cannot evenly divide work for workgroup ");
constexpr index_t KBlockWork = (K + KPerBlock - 1) / KPerBlock; constexpr index_t KBlockWork = (K + KPerBlock - 1) / KPerBlock;
...@@ -104,8 +106,7 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer ...@@ -104,8 +106,7 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer
mod_conv::max(index_t(4), InBlockCopyDataPerRead, WeiBlockCopyDataPerRead); mod_conv::max(index_t(4), InBlockCopyDataPerRead, WeiBlockCopyDataPerRead);
constexpr auto in_chwn_block_desc = make_ConstantTensorDescriptor_aligned( constexpr auto in_chwn_block_desc = make_ConstantTensorDescriptor_aligned(
Sequence<CPerBlock, HiPerBlock, WiPerBlock, NPerBlock>{}, Sequence<CPerBlock, HiPerBlock, WiPerBlock, NPerBlock>{}, Number<max_align>{});
Number<max_align>{});
constexpr auto wei_ek_block_desc = make_ConstantTensorDescriptor_aligned( constexpr auto wei_ek_block_desc = make_ConstantTensorDescriptor_aligned(
Sequence<CPerBlock * Y * X, KPerBlock>{}, Number<max_align>{}); Sequence<CPerBlock * Y * X, KPerBlock>{}, Number<max_align>{});
...@@ -250,14 +251,13 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer ...@@ -250,14 +251,13 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer
blockwise_wei_copy.RunLoadRegisterClipboard(p_wei_global_block_offset, blockwise_wei_copy.RunLoadRegisterClipboard(p_wei_global_block_offset,
p_wei_register_clipboard); p_wei_register_clipboard);
// a series of batched GEMM // a series of batched GEMM
for(index_t y = 0; y < Y; ++y) for(index_t y = 0; y < Y; ++y)
{ {
for(index_t x = 0; x < X; ++x) for(index_t x = 0; x < X; ++x)
{ {
blockwise_batch_gemm.Run(p_wei_block_now + blockwise_batch_gemm.Run(
wei_cyxk_block_desc.Get1dIndex(0, y, x, 0), p_wei_block_now + wei_cyxk_block_desc.Get1dIndex(0, y, x, 0),
p_in_block_now + in_chwn_block_desc.Get1dIndex(0, y, x, 0), p_in_block_now + in_chwn_block_desc.Get1dIndex(0, y, x, 0),
p_out_thread); p_out_thread);
} }
...@@ -291,8 +291,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer ...@@ -291,8 +291,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer
{ {
for(index_t x = 0; x < X; ++x) for(index_t x = 0; x < X; ++x)
{ {
blockwise_batch_gemm.Run(p_wei_block_double + blockwise_batch_gemm.Run(
wei_cyxk_block_desc.Get1dIndex(0, y, x, 0), p_wei_block_double + wei_cyxk_block_desc.Get1dIndex(0, y, x, 0),
p_in_block_double + in_chwn_block_desc.Get1dIndex(0, y, x, 0), p_in_block_double + in_chwn_block_desc.Get1dIndex(0, y, x, 0),
p_out_thread); p_out_thread);
} }
...@@ -376,17 +376,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer ...@@ -376,17 +376,8 @@ struct GridwiseConvolutionImplicitGemm_v1_chwn_cyxk_khwn_lds_double_buffer
constexpr index_t K2 = GemmMPerThreadSubC; constexpr index_t K2 = GemmMPerThreadSubC;
constexpr index_t K1 = KPerBlock / KPerThread; constexpr index_t K1 = KPerBlock / KPerThread;
constexpr auto out_10d_global_desc = constexpr auto out_10d_global_desc = make_ConstantTensorDescriptor(
make_ConstantTensorDescriptor(Sequence<K / (K1 * K2), Sequence<K / (K1 * K2), K1, K2, Ho, Wo / (W1 * W2), W1, W2, N / (N1 * N2), N1, N2>{});
K1,
K2,
Ho,
Wo / (W1 * W2),
W1,
W2,
N / (N1 * N2),
N1,
N2>{});
constexpr auto out_10d_thread_desc = make_ConstantTensorDescriptor( constexpr auto out_10d_thread_desc = make_ConstantTensorDescriptor(
Sequence<KPerThread / K2, 1, K2, HoPerThread, 1, W1, 1, 1, 1, N2>{}); Sequence<KPerThread / K2, 1, K2, HoPerThread, 1, W1, 1, 1, 1, N2>{});
......
...@@ -43,7 +43,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn ...@@ -43,7 +43,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn
Float* const __restrict__ p_out_global) const Float* const __restrict__ p_out_global) const
{ {
// be careful of this assertion // be careful of this assertion
static_assert(NPerThread <= NPerBlock && NPerBlock % NPerThread == 0, static_assert(
NPerThread <= NPerBlock && NPerBlock % NPerThread == 0,
"wrong! should satisfy: NPerThread <= NPerBlock && NPerBlock % NPerThread == 0"); "wrong! should satisfy: NPerThread <= NPerBlock && NPerBlock % NPerThread == 0");
constexpr auto I0 = Number<0>{}; constexpr auto I0 = Number<0>{};
...@@ -69,7 +70,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn ...@@ -69,7 +70,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn
constexpr index_t WiPerBlock = WoPerBlock + X - 1; constexpr index_t WiPerBlock = WoPerBlock + X - 1;
// divide block work: [K, Ho, Wo, N] // divide block work: [K, Ho, Wo, N]
static_assert(N % NPerBlock == 0 && K % KPerBlock == 0 && C % CPerBlock == 0 && Ho % HoPerBlock == 0 && Wo % WoPerBlock == 0, static_assert(N % NPerBlock == 0 && K % KPerBlock == 0 && C % CPerBlock == 0 &&
Ho % HoPerBlock == 0 && Wo % WoPerBlock == 0,
"wrong! cannot evenly divide work for workgroup "); "wrong! cannot evenly divide work for workgroup ");
constexpr index_t KBlockWork = (K + KPerBlock - 1) / KPerBlock; constexpr index_t KBlockWork = (K + KPerBlock - 1) / KPerBlock;
...@@ -93,7 +95,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn ...@@ -93,7 +95,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn
const index_t wi_block_data_begin = wo_block_data_begin; const index_t wi_block_data_begin = wo_block_data_begin;
// 2d tensor view of gridwise weight // 2d tensor view of gridwise weight
constexpr auto wei_ck_global_desc = make_ConstantTensorDescriptor(Sequence<C, K>{}, Sequence<Y * X * K, 1>{}); constexpr auto wei_ck_global_desc =
make_ConstantTensorDescriptor(Sequence<C, K>{}, Sequence<Y * X * K, 1>{});
// tensor view of blockwise input and weight in LDS // tensor view of blockwise input and weight in LDS
// be careful of alignment // be careful of alignment
...@@ -124,7 +127,7 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn ...@@ -124,7 +127,7 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn
// blockwise wei copy // blockwise wei copy
// format is [CPerBlock, KPerBlock] // format is [CPerBlock, KPerBlock]
const auto blockwise_wei_copy = const auto blockwise_wei_copy =
#if 0//debug #if 0 // debug
Blockwise2dTensorCopy1<BlockSize, Blockwise2dTensorCopy1<BlockSize,
Float, Float,
decltype(wei_ck_global_desc), decltype(wei_ck_global_desc),
...@@ -145,8 +148,10 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn ...@@ -145,8 +148,10 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn
// A_matrix[C,K] is a sub-matrix of wei_block[C,K] // A_matrix[C,K] is a sub-matrix of wei_block[C,K]
// B_matrix[C,Wo*N] is a sub-matrix of in_block[C,Hi,Wi,N] // B_matrix[C,Wo*N] is a sub-matrix of in_block[C,Hi,Wi,N]
// C_matrix[K,Wo*N] is a sub-matrix of out_block[K,Ho,Wo,N] // C_matrix[K,Wo*N] is a sub-matrix of out_block[K,Ho,Wo,N]
constexpr auto a_cxk_block_mtx_desc = make_ConstantMatrixDescriptor( constexpr auto a_cxk_block_mtx_desc =
Number<CPerBlock>{}, Number<KPerBlock>{}, Number<wei_ck_block_desc.GetStride(I0)>{}); make_ConstantMatrixDescriptor(Number<CPerBlock>{},
Number<KPerBlock>{},
Number<wei_ck_block_desc.GetStride(I0)>{});
constexpr auto b_cxwn_block_mtx_desc = constexpr auto b_cxwn_block_mtx_desc =
make_ConstantMatrixDescriptor(Number<CPerBlock>{}, make_ConstantMatrixDescriptor(Number<CPerBlock>{},
...@@ -297,17 +302,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn ...@@ -297,17 +302,8 @@ struct GridwiseConvolutionImplicitGemm_v1r2_chwn_cyxk_khwn
constexpr index_t K2 = GemmMPerThreadSubC; constexpr index_t K2 = GemmMPerThreadSubC;
constexpr index_t K1 = KPerBlock / KPerThread; constexpr index_t K1 = KPerBlock / KPerThread;
constexpr auto out_10d_global_desc = constexpr auto out_10d_global_desc = make_ConstantTensorDescriptor(
make_ConstantTensorDescriptor(Sequence<K / (K1 * K2), Sequence<K / (K1 * K2), K1, K2, Ho, Wo / (W1 * W2), W1, W2, N / (N1 * N2), N1, N2>{});
K1,
K2,
Ho,
Wo / (W1 * W2),
W1,
W2,
N / (N1 * N2),
N1,
N2>{});
constexpr auto out_10d_thread_desc = make_ConstantTensorDescriptor( constexpr auto out_10d_thread_desc = make_ConstantTensorDescriptor(
Sequence<KPerThread / K2, 1, K2, HoPerThread, 1, W1, 1, 1, 1, N2>{}); Sequence<KPerThread / K2, 1, K2, HoPerThread, 1, W1, 1, 1, 1, N2>{});
......
#pragma once #pragma once
template <class Float, class SrcMatrix, class DstMatrix, index_t NRow, index_t NCol, index_t DataPerRead> template <class Float,
class SrcMatrix,
class DstMatrix,
index_t NRow,
index_t NCol,
index_t DataPerRead>
__device__ void threadwise_matrix_copy(SrcMatrix, __device__ void threadwise_matrix_copy(SrcMatrix,
const Float* __restrict__ p_src, const Float* __restrict__ p_src,
DstMatrix, DstMatrix,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment