Commit 79137e1a authored by Jing Zhang's avatar Jing Zhang
Browse files

clean code

parent 63bad606
......@@ -413,7 +413,7 @@ struct GridwiseBatchGemmXdlops_gkmkpack_gknkpack_gmn_v2
constexpr auto a_g_k_m_kpack_block_desc = make_native_tensor_descriptor_aligned(
Sequence<1, KPerBlock, MPerBlock, KPack>{}, Number<max_align>{});
auto a_blockwise_copy = BlockwiseGenericTensorSliceCopy_v4<
auto a_blockwise_copy = BlockwiseGenericTensorSliceCopy_v5<
BlockSize,
decltype(a_g_k_m_kpack_global_desc),
decltype(a_g_k_m_kpack_block_desc),
......@@ -509,14 +509,14 @@ struct GridwiseBatchGemmXdlops_gkmkpack_gknkpack_gmn_v2
for(index_t k_block_data_begin = 0; k_block_data_begin < K - KPerBlock;
k_block_data_begin += KPerBlock)
{
ABFloat p_a_thread_buffer[a_blockwise_copy.GetThreadBufferSize()];
// ABFloat p_a_thread_buffer[a_blockwise_copy.GetThreadBufferSize()];
// load next data from device mem
a_blockwise_copy.MoveSrcSliceWindow(blockwise_a_copy_src_step, True);
b_blockwise_copy.MoveSrcSliceWindow(blockwise_b_copy_src_step, True);
// a_blockwise_copy.RunLoadThreadBuffer(p_a_global);
a_blockwise_copy.RunLoadThreadBuffer(p_a_global, p_a_thread_buffer);
a_blockwise_copy.RunLoadThreadBuffer(p_a_global);
// a_blockwise_copy.RunLoadThreadBuffer(p_a_global, p_a_thread_buffer);
b_blockwise_copy.RunLoadThreadBuffer(p_b_global);
......@@ -535,8 +535,8 @@ struct GridwiseBatchGemmXdlops_gkmkpack_gknkpack_gmn_v2
block_sync_lds();
// store next data to LDS
// a_blockwise_copy.RunStoreThreadBuffer(p_a_block);
a_blockwise_copy.RunStoreThreadBuffer(p_a_thread_buffer, p_a_block);
a_blockwise_copy.RunStoreThreadBuffer(p_a_block);
// a_blockwise_copy.RunStoreThreadBuffer(p_a_thread_buffer, p_a_block);
b_blockwise_copy.RunStoreThreadBuffer(p_b_block);
}
......
......@@ -727,9 +727,6 @@ struct XdlopsGemm_t
#else
const index_t laneId = get_thread_local_1d_id() % mfma_type.wave_size;
FloatA a[K * MRepeats];
FloatB b[K * NRepeats];
constexpr index_t data_size = sizeof(FloatA) / sizeof(data_type);
constexpr index_t a_reg_buff_size = K * MRepeats * data_size;
constexpr index_t b_reg_buff_size = K * NRepeats * data_size;
......@@ -748,13 +745,11 @@ struct XdlopsGemm_t
constexpr index_t KRepeats = sizeof(FloatA) / (sizeof(data_type) * mfma_type.k_base);
auto pa = reinterpret_cast<const data_type*>(&a);
auto pb = reinterpret_cast<const data_type*>(&b);
constexpr index_t AStride = K * KRepeats;
constexpr index_t BStride = K * KRepeats;
static_if<!IsKReduction>{}([&](auto) {
#if 0
for(index_t m_i = 0; m_i < MRepeats; ++m_i)
for(index_t k_i = 0; k_i < K; ++k_i)
a[k_i + m_i * K] = p_a_wave[k_i * M + laneId + MPerXdlops * m_i];
......@@ -774,6 +769,7 @@ struct XdlopsGemm_t
BStride>(
&pa[k_i * mfma_type.k_base], &pb[k_i * mfma_type.k_base], p_c_thread);
}
#endif
})
.Else([&](auto) {
const index_t blk_id = laneId / mfma_type.num_threads_blk;
......
......@@ -36,7 +36,6 @@ union float_vec4_t
StaticallyIndexedArray<float, 4> s1;
StaticallyIndexedArray<float2_t, 2> s2;
StaticallyIndexedArray<float4_t, 1> s4;
float n[4];
__host__ __device__ constexpr float_vec4_t() { s4(Number<0>{}) = 0; }
template <index_t vs>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment