Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
73a665f0
Commit
73a665f0
authored
Apr 13, 2021
by
Jing Zhang
Browse files
test copy speed
parent
285d0de6
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
11 additions
and
201 deletions
+11
-201
composable_kernel/include/tensor_operation/gridwise_dynamic_gemm_v2.hpp
...nel/include/tensor_operation/gridwise_dynamic_gemm_v2.hpp
+11
-12
composable_kernel/include/tensor_operation/threadwise_dynamic_tensor_slice_transfer.hpp
...or_operation/threadwise_dynamic_tensor_slice_transfer.hpp
+0
-189
No files found.
composable_kernel/include/tensor_operation/gridwise_dynamic_gemm_v2.hpp
View file @
73a665f0
...
@@ -174,7 +174,7 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -174,7 +174,7 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
const
index_t
wo_thread_data_on_global
=
const
index_t
wo_thread_data_on_global
=
wo_block_data_on_global
+
wo_thread_id
*
WoPerThread
;
wo_block_data_on_global
+
wo_thread_id
*
WoPerThread
;
#if
1
#if
0
// A matrix blockwise copy
// A matrix blockwise copy
auto a_blockwise_copy =
auto a_blockwise_copy =
BlockwiseDynamicTensorSliceTransfer_v4<BlockSize,
BlockwiseDynamicTensorSliceTransfer_v4<BlockSize,
...
@@ -353,6 +353,8 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -353,6 +353,8 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
p_c_thread);
p_c_thread);
}
}
#endif
#endif
#if 0
// output: register to global memory
// output: register to global memory
{
{
constexpr auto HoPerThreadx2 = HoPerThread * 2;
constexpr auto HoPerThreadx2 = HoPerThread * 2;
...
@@ -381,7 +383,6 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -381,7 +383,6 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
constexpr auto vector_len = CThreadTransferDstScalarPerVector;
constexpr auto vector_len = CThreadTransferDstScalarPerVector;
constexpr auto c_k_n_ho_wo_global_tensor_iterator_hacks = CGlobalIteratorHacks{};
constexpr auto c_k_n_ho_wo_global_tensor_iterator_hacks = CGlobalIteratorHacks{};
#if 1
vector_type<int8_t, vector_len> d_vec;
vector_type<int8_t, vector_len> d_vec;
for(index_t k_i = 0; k_i < KPerThreadAdd; ++k_i)
for(index_t k_i = 0; k_i < KPerThreadAdd; ++k_i)
...
@@ -390,10 +391,9 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -390,10 +391,9 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
{
{
for(index_t w_i = 0; w_i < WoPerThreadx2; ++w_i)
for(index_t w_i = 0; w_i < WoPerThreadx2; ++w_i)
{
{
#if 1
ThreadwiseDynamicTensorSliceTransfer_v2<
ThreadwiseDynamicTensorSliceTransfer_v2<
FloatAB,
FloatAB,
decltype
(
d_vec
)
,
FloatAB
,
decltype(d_k_n_hox2_wox2_global_desc),
decltype(d_k_n_hox2_wox2_global_desc),
decltype(d_k_n_hox2_wox2_thread_desc),
decltype(d_k_n_hox2_wox2_thread_desc),
Sequence<1, 1, 1, 1>,
Sequence<1, 1, 1, 1>,
...
@@ -414,16 +414,16 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -414,16 +414,16 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
p_d_global,
p_d_global,
d_k_n_hox2_wox2_thread_desc,
d_k_n_hox2_wox2_thread_desc,
make_tuple(I0, I0, I0, I0),
make_tuple(I0, I0, I0, I0),
d_vec
,
&(
d_vec
.Vector())
,
c_k_n_ho_wo_global_tensor_iterator_hacks);
c_k_n_ho_wo_global_tensor_iterator_hacks);
#endif
static_for<0, vector_len, 1>{}([&](auto i) {
static_for<0, vector_len, 1>{}([&](auto i) {
d_vec
.
Scalars
()(
i
)
+=
// d_vec.Scalars()(i) +=
p_c_thread
[
c_k_n_ho_wo_thread_desc
.
CalculateOffset
(
// p_c_thread[c_k_n_ho_wo_thread_desc.CalculateOffset(
make_tuple
(
k_i
*
vector_len
+
i
,
0
,
h_i
/
2
,
w_i
/
2
))];
// make_tuple(k_i * vector_len + i, 0, h_i / 2, w_i / 2))];
d_vec.Vector() += 1;
});
});
#if 1
ThreadwiseDynamicTensorSliceTransfer_v1r3<
ThreadwiseDynamicTensorSliceTransfer_v1r3<
FloatAB,
FloatAB,
FloatAB,
FloatAB,
...
@@ -449,12 +449,11 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -449,12 +449,11 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
d_k_n_hox2_wox2_global_desc,
d_k_n_hox2_wox2_global_desc,
p_c_global,
p_c_global,
c_k_n_ho_wo_global_tensor_iterator_hacks);
c_k_n_ho_wo_global_tensor_iterator_hacks);
#endif
}
}
}
}
}
}
#endif
}
}
#endif
}
}
// pass tensor descriptor by reference
// pass tensor descriptor by reference
...
...
composable_kernel/include/tensor_operation/threadwise_dynamic_tensor_slice_transfer.hpp
View file @
73a665f0
...
@@ -395,195 +395,6 @@ struct ThreadwiseDynamicTensorSliceTransfer_v2
...
@@ -395,195 +395,6 @@ struct ThreadwiseDynamicTensorSliceTransfer_v2
src_slice_origin_coord_
=
make_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_idx
);
src_slice_origin_coord_
=
make_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_idx
);
}
}
template
<
typename
DstSliceOriginIdx
,
typename
SrcIteratorHacks
>
__device__
void
Run
(
const
SrcDesc
&
src_desc
,
const
SrcData
*
p_src
,
const
DstDesc
&
,
const
DstSliceOriginIdx
&
,
DstData
&
p_dst
,
const
SrcIteratorHacks
&
src_iterator_hacks
)
{
static_assert
(
DstDesc
::
IsKnownAtCompileTime
(),
"wrong! DstDesc need to known at compile-time"
);
static_assert
(
is_known_at_compile_time
<
remove_cv_t
<
remove_reference_t
<
DstSliceOriginIdx
>>>::
value
,
"wrong! DstSliceOrigin need to known at compile-time"
);
// DstDesc and dst_slice_origin_idx are known at compile-time
constexpr
auto
dst_desc
=
remove_cv_t
<
remove_reference_t
<
DstDesc
>>
{};
constexpr
auto
dst_slice_origin_idx
=
DstSliceOriginIdx
{};
constexpr
auto
I0
=
Number
<
0
>
{};
constexpr
auto
I1
=
Number
<
1
>
{};
// scalar per access on each dim
// TODO: don't use lambda_scalar_per_access
constexpr
auto
src_scalar_per_access
=
generate_sequence
(
lambda_scalar_per_access
<
SrcVectorDim
,
SrcScalarPerVector
>
{},
Number
<
nDim
>
{});
constexpr
auto
src_scalar_step_in_vector
=
generate_sequence
(
lambda_scalar_step_in_vector
<
SrcVectorDim
>
{},
Number
<
nDim
>
{});
constexpr
auto
access_lengths
=
SliceLengths
{}
/
src_scalar_per_access
;
constexpr
auto
dim_access_order
=
DimAccessOrder
{};
constexpr
auto
ordered_access_lengths
=
container_reorder_given_new2old
(
access_lengths
,
dim_access_order
);
// make forward iterators
const
auto
src_forward_iterators
=
generate_tuple
(
[
&
](
auto
i
)
{
Index
forward_step
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
j
)
{
forward_step
(
j
)
=
(
i
.
value
==
j
.
value
)
?
src_scalar_per_access
[
i
]
:
0
;
});
return
make_dynamic_tensor_coordinate_iterator
(
src_desc
,
forward_step
,
src_iterator_hacks
[
I0
][
i
]);
},
Number
<
nDim
>
{});
// make backward iterators
const
auto
src_backward_iterators
=
generate_tuple
(
[
&
](
auto
i
)
{
Index
backward_step
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
j
)
{
backward_step
(
j
)
=
(
i
.
value
==
j
.
value
)
?
-
src_scalar_per_access
[
i
]
:
0
;
});
return
make_dynamic_tensor_coordinate_iterator
(
src_desc
,
backward_step
,
src_iterator_hacks
[
I1
][
i
]);
},
Number
<
nDim
>
{});
// loop over tensor and copy
static_ford
<
decltype
(
ordered_access_lengths
)
>
{}([
&
](
auto
ordered_access_idx
)
{
// judge move forward or move backward
constexpr
auto
forward_sweep
=
[
&
]()
{
StaticallyIndexedArray
<
bool
,
nDim
>
forward_sweep
;
forward_sweep
(
I0
)
=
true
;
static_for
<
1
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
index_t
tmp
=
ordered_access_idx
[
I0
];
static_for
<
0
,
i
,
1
>
{}([
&
](
auto
j
)
{
tmp
=
tmp
*
ordered_access_lengths
[
j
]
+
ordered_access_idx
[
j
];
});
forward_sweep
(
i
)
=
tmp
%
2
==
0
;
});
return
forward_sweep
;
}();
// calculate src data index
constexpr
auto
src_data_idx
=
[
&
]()
{
Index
ordered_idx
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
ordered_idx
(
i
)
=
forward_sweep
[
i
]
?
ordered_access_idx
[
i
]
:
ordered_access_lengths
[
i
]
-
1
-
ordered_access_idx
[
i
];
});
auto
src_data_idx
=
container_reorder_given_old2new
(
ordered_idx
,
dim_access_order
)
*
src_scalar_per_access
;
return
src_data_idx
;
}();
// copy data
static_assert
(
DstAddressSpace
==
AddressSpace
::
Vgpr
,
"wrong! hardcode for vgpr dst"
);
vector_type
<
SrcData
,
SrcScalarPerVector
>
src_vector
;
using
src_vector_t
=
typename
vector_type
<
SrcData
,
SrcScalarPerVector
>::
type
;
const
bool
is_src_valid
=
coordinate_has_valid_offset_assuming_visible_index_is_valid
(
src_desc
,
src_slice_origin_coord_
);
if
constexpr
(
SrcAddressSpace
==
AddressSpace
::
Global
)
{
#if CK_USE_AMD_BUFFER_ADDRESSING
src_vector
.
Vector
()
=
amd_buffer_load_v2
<
SrcData
,
SrcScalarPerVector
>
(
p_src
,
src_slice_origin_coord_
.
GetOffset
(),
is_src_valid
,
src_desc
.
GetElementSpaceSize
());
#else
src_vector
.
Vector
()
=
is_src_valid
?
*
reinterpret_cast
<
const
src_vector_t
*>
(
&
p_src
[
src_slice_origin_coord_
.
GetOffset
()])
:
src_vector_t
{
0
};
#endif
}
else
{
src_vector
.
Vector
()
=
is_src_valid
?
*
reinterpret_cast
<
const
src_vector_t
*>
(
&
p_src
[
src_slice_origin_coord_
.
GetOffset
()])
:
src_vector_t
{
0
};
}
static_for
<
0
,
SrcScalarPerVector
,
1
>
{}([
&
](
auto
i
)
{
constexpr
index_t
dst_offset
=
dst_desc
.
CalculateOffset
(
to_multi_index
(
dst_slice_origin_idx
)
+
src_data_idx
+
i
*
src_scalar_step_in_vector
);
p_dst
.
Vectors
(
Number
<
SrcScalarPerVector
>
{})(
Number
<
dst_offset
>
{})
=
src_vector
.
Scalars
()[
i
];
});
constexpr
auto
move_on_dim
=
[
&
]()
constexpr
{
StaticallyIndexedArray
<
bool
,
nDim
>
move_on_dim
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
move_on_dim
(
i
)
=
ordered_access_idx
[
i
]
<
ordered_access_lengths
[
i
]
-
1
;
static_for
<
i
+
1
,
nDim
,
1
>
{}([
&
](
auto
j
)
{
move_on_dim
(
i
)
&=
ordered_access_idx
[
j
]
==
ordered_access_lengths
[
j
]
-
1
;
});
});
return
move_on_dim
;
}
();
// move
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
if
constexpr
(
move_on_dim
[
i
])
{
if
constexpr
(
forward_sweep
[
i
])
{
move_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_coord_
,
src_forward_iterators
[
dim_access_order
[
i
]]);
}
else
{
move_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_coord_
,
src_backward_iterators
[
dim_access_order
[
i
]]);
}
}
});
});
// move src coordinate back to slice origin (or not)
if
constexpr
(
SrcResetCoordinateAfterRun
)
{
const
auto
src_reset_iterator
=
make_dynamic_tensor_coordinate_iterator
(
src_desc
,
GetSrcCoordinateResetStep
());
move_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_coord_
,
src_reset_iterator
);
}
}
template
<
typename
DstSliceOriginIdx
,
typename
SrcIteratorHacks
>
template
<
typename
DstSliceOriginIdx
,
typename
SrcIteratorHacks
>
__device__
void
Run
(
const
SrcDesc
&
src_desc
,
__device__
void
Run
(
const
SrcDesc
&
src_desc
,
const
SrcData
*
p_src
,
const
SrcData
*
p_src
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment