Commit d47bf127 authored by letaoqin's avatar letaoqin
Browse files

remove passthrough check

parent b5bae9d6
...@@ -539,116 +539,107 @@ struct GridwiseGemmDlMultipleD_km_kn_mn ...@@ -539,116 +539,107 @@ struct GridwiseGemmDlMultipleD_km_kn_mn
blockwise_gemm.CalculateCThreadOriginOnBlock_BM0_BM1_BN0_BN1( blockwise_gemm.CalculateCThreadOriginOnBlock_BM0_BM1_BN0_BN1(
get_thread_local_1d_id()); get_thread_local_1d_id());
if constexpr(!is_same_v<CDEElementwiseOperation, const auto ds_grid_buf = generate_tuple(
ck::tensor_operation::element_wise::PassThrough>) [&](auto i) {
{ return make_dynamic_buffer<AddressSpaceEnum::Global>(
const auto ds_grid_buf = generate_tuple( p_ds_grid[i], ds_grid_desc_m0_m10_m11_n0_n10_n11[i].GetElementSpaceSize());
[&](auto i) { },
return make_dynamic_buffer<AddressSpaceEnum::Global>( Number<NumDTensor>{});
p_ds_grid[i],
ds_grid_desc_m0_m10_m11_n0_n10_n11[i].GetElementSpaceSize()); auto ds_thread_buf = generate_tuple(
}, [&](auto i) {
Number<NumDTensor>{}); using DDataType = remove_cvref_t<tuple_element_t<i.value, DsDataType>>;
auto ds_thread_buf = generate_tuple( return StaticBuffer<AddressSpaceEnum::Vgpr,
[&](auto i) { DDataType,
using DDataType = remove_cvref_t<tuple_element_t<i.value, DsDataType>>; c_m10_m11_n10_n11_thread_tensor_lengths[I3],
true>{};
return StaticBuffer<AddressSpaceEnum::Vgpr, },
DDataType, Number<NumDTensor>{});
c_m10_m11_n10_n11_thread_tensor_lengths[I3],
true>{}; auto ds_threadwise_copy = generate_tuple(
}, [&](auto i) {
Number<NumDTensor>{}); using DDataType = remove_cvref_t<tuple_element_t<i.value, DsDataType>>;
auto ds_threadwise_copy = generate_tuple( return ThreadwiseTensorSliceTransfer_v2<
[&](auto i) { DDataType,
using DDataType = remove_cvref_t<tuple_element_t<i.value, DsDataType>>; DDataType,
decltype(ds_grid_desc_m0_m10_m11_n0_n10_n11[i]),
return ThreadwiseTensorSliceTransfer_v2< decltype(c_thread_desc_m0_m10_m11_n0_n10_n11),
DDataType, Sequence<I1,
DDataType, I1,
decltype(ds_grid_desc_m0_m10_m11_n0_n10_n11[i]), I1,
decltype(c_thread_desc_m0_m10_m11_n0_n10_n11), I1,
Sequence<I1, I1,
I1, Number<c_m10_m11_n10_n11_thread_tensor_lengths[I3]>{}>,
I1, CThreadTransferSrcDstAccessOrder,
I1, CThreadTransferSrcDstVectorDim,
I1, CThreadTransferDstScalarPerVector,
Number<c_m10_m11_n10_n11_thread_tensor_lengths[I3]>{}>, 1,
CThreadTransferSrcDstAccessOrder, false>(ds_grid_desc_m0_m10_m11_n0_n10_n11[i],
CThreadTransferSrcDstVectorDim, make_multi_index(im0,
CThreadTransferDstScalarPerVector, c_m10_m11_n10_n11_thread_origin_idx_on_block[I0],
1, c_m10_m11_n10_n11_thread_origin_idx_on_block[I1],
false>( in0,
ds_grid_desc_m0_m10_m11_n0_n10_n11[i], c_m10_m11_n10_n11_thread_origin_idx_on_block[I2],
make_multi_index(im0, c_m10_m11_n10_n11_thread_origin_idx_on_block[I3]));
c_m10_m11_n10_n11_thread_origin_idx_on_block[I0], },
c_m10_m11_n10_n11_thread_origin_idx_on_block[I1], Number<NumDTensor>{});
in0,
c_m10_m11_n10_n11_thread_origin_idx_on_block[I2], static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I0], 1>{}([&](auto m10) {
c_m10_m11_n10_n11_thread_origin_idx_on_block[I3])); static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I1], 1>{}([&](auto m11) {
}, static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I2], 1>{}([&](auto n10) {
Number<NumDTensor>{}); // load d matrix data
static_for<0, NumDTensor, 1>{}([&](auto i) {
static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I0], 1>{}([&](auto m10) { ds_threadwise_copy(i).Run(ds_grid_desc_m0_m10_m11_n0_n10_n11[i],
static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I1], 1>{}([&](auto m11) { ds_grid_buf[i],
static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I2], 1>{}( c_thread_desc_m0_m10_m11_n0_n10_n11,
[&](auto n10) { make_tuple(I0, I0, I0, I0, I0, I0),
// load d matrix data ds_thread_buf(i));
static_for<0, NumDTensor, 1>{}([&](auto i) { });
ds_threadwise_copy(i).Run(ds_grid_desc_m0_m10_m11_n0_n10_n11[i], // cal element op
ds_grid_buf[i], static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I3], 1>{}(
c_thread_desc_m0_m10_m11_n0_n10_n11, [&](auto i) {
make_tuple(I0, I0, I0, I0, I0, I0), // get reference to src data
ds_thread_buf(i)); const auto src_data_refs = generate_tie(
}); // return type should be lvalue
// cal element op [&](auto iSrc) -> const auto& {
static_for<0, c_m10_m11_n10_n11_thread_tensor_lengths[I3], 1>{}( return ds_thread_buf[iSrc][i];
[&](auto i) { },
// get reference to src data Number<NumDTensor>{});
const auto src_data_refs = generate_tie(
// return type should be lvalue // get reference to dst data
[&](auto iSrc) -> const auto& { constexpr index_t c_offset =
return ds_thread_buf[iSrc][i]; c_thread_desc_m0_m10_m11_n0_n10_n11.CalculateOffset(
}, make_tuple(0, m10, m11, 0, n10, i));
Number<NumDTensor>{}); auto dst_data_refs = generate_tie(
// return type should be lvalue
// get reference to dst data [&](auto) -> auto& { return c_thread_buf(Number<c_offset>{}); },
constexpr index_t c_offset = Number<2>{});
c_thread_desc_m0_m10_m11_n0_n10_n11.CalculateOffset(
make_tuple(0, m10, m11, 0, n10, i)); unpack2(cde_element_op, dst_data_refs, src_data_refs);
auto dst_data_refs = generate_tie(
// return type should be lvalue
[&](auto) -> auto& {
return c_thread_buf(Number<c_offset>{});
},
Number<2>{});
unpack2(cde_element_op, dst_data_refs, src_data_refs);
});
static_for<0, NumDTensor, 1>{}([&](auto i) {
ds_threadwise_copy(i).MoveSrcSliceWindow(
ds_grid_desc_m0_m10_m11_n0_n10_n11[i],
make_multi_index(0, 0, 0, 0, 1, 0));
});
}); });
static_for<0, NumDTensor, 1>{}([&](auto i) { static_for<0, NumDTensor, 1>{}([&](auto i) {
ds_threadwise_copy(i).MoveSrcSliceWindow( ds_threadwise_copy(i).MoveSrcSliceWindow(
ds_grid_desc_m0_m10_m11_n0_n10_n11[i], ds_grid_desc_m0_m10_m11_n0_n10_n11[i],
make_multi_index( make_multi_index(0, 0, 0, 0, 1, 0));
0, 0, 1, 0, -c_m10_m11_n10_n11_thread_tensor_lengths[I2], 0));
}); });
}); });
static_for<0, NumDTensor, 1>{}([&](auto i) { static_for<0, NumDTensor, 1>{}([&](auto i) {
ds_threadwise_copy(i).MoveSrcSliceWindow( ds_threadwise_copy(i).MoveSrcSliceWindow(
ds_grid_desc_m0_m10_m11_n0_n10_n11[i], ds_grid_desc_m0_m10_m11_n0_n10_n11[i],
make_multi_index( make_multi_index(
0, 1, -c_m10_m11_n10_n11_thread_tensor_lengths[I1], 0, 0, 0)); 0, 0, 1, 0, -c_m10_m11_n10_n11_thread_tensor_lengths[I2], 0));
}); });
}); });
} static_for<0, NumDTensor, 1>{}([&](auto i) {
ds_threadwise_copy(i).MoveSrcSliceWindow(
ds_grid_desc_m0_m10_m11_n0_n10_n11[i],
make_multi_index(
0, 1, -c_m10_m11_n10_n11_thread_tensor_lengths[I1], 0, 0, 0));
});
});
ThreadwiseTensorSliceTransfer_v1r3< ThreadwiseTensorSliceTransfer_v1r3<
FloatAcc, FloatAcc,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment