Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
285d0de6
Commit
285d0de6
authored
Apr 13, 2021
by
Jing Zhang
Browse files
vec load
parent
08c00140
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
250 additions
and
75 deletions
+250
-75
composable_kernel/include/tensor_operation/gridwise_dynamic_gemm_v2.hpp
...nel/include/tensor_operation/gridwise_dynamic_gemm_v2.hpp
+61
-75
composable_kernel/include/tensor_operation/threadwise_dynamic_tensor_slice_transfer.hpp
...or_operation/threadwise_dynamic_tensor_slice_transfer.hpp
+189
-0
No files found.
composable_kernel/include/tensor_operation/gridwise_dynamic_gemm_v2.hpp
View file @
285d0de6
...
@@ -174,7 +174,7 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -174,7 +174,7 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
const
index_t
wo_thread_data_on_global
=
const
index_t
wo_thread_data_on_global
=
wo_block_data_on_global
+
wo_thread_id
*
WoPerThread
;
wo_block_data_on_global
+
wo_thread_id
*
WoPerThread
;
#if
0
#if
1
// A matrix blockwise copy
// A matrix blockwise copy
auto
a_blockwise_copy
=
auto
a_blockwise_copy
=
BlockwiseDynamicTensorSliceTransfer_v4
<
BlockSize
,
BlockwiseDynamicTensorSliceTransfer_v4
<
BlockSize
,
...
@@ -375,99 +375,85 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
...
@@ -375,99 +375,85 @@ struct GridwiseDynamicGemm_km_kn_mn_v3
k_block_data_on_global_add
+
k_thread_id
*
KPerThreadAdd
;
k_block_data_on_global_add
+
k_thread_id
*
KPerThreadAdd
;
constexpr
auto
d_k_n_hox2_wox2_thread_desc
=
constexpr
auto
d_k_n_hox2_wox2_thread_desc
=
make_dynamic_naive_tensor_descriptor_packed_v2
(
make_tuple
(
Number
<
KPerThreadAdd
>
{},
make_dynamic_naive_tensor_descriptor_packed_v2
(
Number
<
1
>
{},
make_tuple
(
Number
<
1
>
{},
Number
<
1
>
{},
Number
<
1
>
{},
Number
<
1
>
{}));
Number
<
HoPerThreadx2
>
{},
Number
<
WoPerThreadx2
>
{}));
FloatAB
p_d_thread
[
d_k_n_hox2_wox2_thread_desc
.
GetElementSpaceSize
()];
constexpr
auto
vector_len
=
CThreadTransferDstScalarPerVector
;
constexpr
auto
vector_len
=
CThreadTransferDstScalarPerVector
;
static_assert
(
vector_len
==
16
);
constexpr
auto
c_k_n_ho_wo_global_tensor_iterator_hacks
=
CGlobalIteratorHacks
{};
constexpr
auto
c_k_n_ho_wo_global_tensor_iterator_hacks
=
CGlobalIteratorHacks
{};
#if 1
#if 1
ThreadwiseDynamicTensorSliceTransfer_v2
<
vector_type
<
int8_t
,
vector_len
>
d_vec
;
FloatAB
,
FloatAB
,
decltype
(
d_k_n_hox2_wox2_global_desc
),
decltype
(
d_k_n_hox2_wox2_thread_desc
),
Sequence
<
KPerThreadAdd
,
1
,
HoPerThreadx2
,
WoPerThreadx2
>
,
CThreadTransferSrcDstAccessOrder
,
CThreadTransferSrcDstVectorDim
,
// CThreadTransferDstScalarPerVector,
1
,
AddressSpace
::
Global
,
AddressSpace
::
Vgpr
,
InMemoryDataOperation
::
Set
,
1
,
true
>
(
d_k_n_hox2_wox2_global_desc
,
make_multi_index
(
k_thread_data_on_global_add
,
0
,
hox2_thread_data_on_global
,
wox2_thread_data_on_global
))
.
Run
(
d_k_n_hox2_wox2_global_desc
,
p_d_global
,
d_k_n_hox2_wox2_thread_desc
,
make_tuple
(
I0
,
I0
,
I0
,
I0
),
p_d_thread
,
c_k_n_ho_wo_global_tensor_iterator_hacks
);
#endif
#if 1
for
(
index_t
k_i
=
0
;
k_i
<
KPerThreadAdd
;
++
k_i
)
for
(
index_t
k_i
=
0
;
k_i
<
KPerThreadAdd
;
++
k_i
)
{
{
for
(
index_t
h_i
=
0
;
h_i
<
HoPerThreadx2
;
++
h_i
)
for
(
index_t
h_i
=
0
;
h_i
<
HoPerThreadx2
;
++
h_i
)
{
{
for
(
index_t
w_i
=
0
;
w_i
<
WoPerThreadx2
;
++
w_i
)
for
(
index_t
w_i
=
0
;
w_i
<
WoPerThreadx2
;
++
w_i
)
{
{
vector_type
<
int8_t
,
vector_len
>
d_vec
;
#if 1
ThreadwiseDynamicTensorSliceTransfer_v2
<
d_vec
.
Vector
()
=
p_d_thread
[
d_k_n_hox2_wox2_thread_desc
.
CalculateOffset
(
FloatAB
,
make_tuple
(
k_i
,
0
,
h_i
,
w_i
))];
decltype
(
d_vec
),
decltype
(
d_k_n_hox2_wox2_global_desc
),
decltype
(
d_k_n_hox2_wox2_thread_desc
),
Sequence
<
1
,
1
,
1
,
1
>
,
CThreadTransferSrcDstAccessOrder
,
CThreadTransferSrcDstVectorDim
,
// CThreadTransferDstScalarPerVector,
1
,
AddressSpace
::
Global
,
AddressSpace
::
Vgpr
,
InMemoryDataOperation
::
Set
,
1
,
true
>
(
d_k_n_hox2_wox2_global_desc
,
make_multi_index
(
k_thread_data_on_global_add
+
k_i
,
0
,
hox2_thread_data_on_global
+
h_i
,
wox2_thread_data_on_global
+
w_i
))
.
Run
(
d_k_n_hox2_wox2_global_desc
,
p_d_global
,
d_k_n_hox2_wox2_thread_desc
,
make_tuple
(
I0
,
I0
,
I0
,
I0
),
d_vec
,
c_k_n_ho_wo_global_tensor_iterator_hacks
);
#endif
static_for
<
0
,
vector_len
,
1
>
{}([
&
](
auto
i
)
{
static_for
<
0
,
vector_len
,
1
>
{}([
&
](
auto
i
)
{
d_vec
.
Scalars
()(
i
)
+=
1
;
d_vec
.
Scalars
()(
i
)
+=
//
p_c_thread[c_k_n_ho_wo_thread_desc.CalculateOffset(
p_c_thread
[
c_k_n_ho_wo_thread_desc
.
CalculateOffset
(
//
make_tuple(k_i * vector_len + i, 0, h_i / 2, w_i / 2))];
make_tuple
(
k_i
*
vector_len
+
i
,
0
,
h_i
/
2
,
w_i
/
2
))];
});
});
#if 1
p_d_thread
[
d_k_n_hox2_wox2_thread_desc
.
CalculateOffset
(
ThreadwiseDynamicTensorSliceTransfer_v1r3
<
make_tuple
(
k_i
,
0
,
h_i
,
w_i
))]
=
d_vec
.
Vector
();
FloatAB
,
FloatAB
,
decltype
(
d_k_n_hox2_wox2_thread_desc
),
decltype
(
d_k_n_hox2_wox2_global_desc
),
Sequence
<
1
,
1
,
1
,
1
>
,
CThreadTransferSrcDstAccessOrder
,
CThreadTransferSrcDstVectorDim
,
// CThreadTransferDstScalarPerVector,
1
,
AddressSpace
::
Vgpr
,
AddressSpace
::
Global
,
CGlobalMemoryDataOperation
,
1
,
true
>
(
d_k_n_hox2_wox2_global_desc
,
make_multi_index
(
k_thread_data_on_global_add
+
k_i
,
0
,
hox2_thread_data_on_global
+
h_i
,
wox2_thread_data_on_global
+
w_i
))
.
Run
(
d_k_n_hox2_wox2_thread_desc
,
make_tuple
(
I0
,
I0
,
I0
,
I0
),
&
(
d_vec
.
Vector
()),
d_k_n_hox2_wox2_global_desc
,
p_c_global
,
c_k_n_ho_wo_global_tensor_iterator_hacks
);
#endif
}
}
}
}
}
}
#endif
#endif
#if 1
ThreadwiseDynamicTensorSliceTransfer_v1r3
<
FloatAB
,
FloatAB
,
decltype
(
d_k_n_hox2_wox2_thread_desc
),
decltype
(
d_k_n_hox2_wox2_global_desc
),
Sequence
<
KPerThreadAdd
,
1
,
HoPerThreadx2
,
WoPerThreadx2
>
,
CThreadTransferSrcDstAccessOrder
,
CThreadTransferSrcDstVectorDim
,
// CThreadTransferDstScalarPerVector,
1
,
AddressSpace
::
Vgpr
,
AddressSpace
::
Global
,
CGlobalMemoryDataOperation
,
1
,
true
>
(
d_k_n_hox2_wox2_global_desc
,
make_multi_index
(
k_thread_data_on_global_add
,
0
,
hox2_thread_data_on_global
,
wox2_thread_data_on_global
))
.
Run
(
d_k_n_hox2_wox2_thread_desc
,
make_tuple
(
I0
,
I0
,
I0
,
I0
),
p_d_thread
,
d_k_n_hox2_wox2_global_desc
,
p_c_global
,
c_k_n_ho_wo_global_tensor_iterator_hacks
);
#endif
}
}
}
}
...
...
composable_kernel/include/tensor_operation/threadwise_dynamic_tensor_slice_transfer.hpp
View file @
285d0de6
...
@@ -395,6 +395,195 @@ struct ThreadwiseDynamicTensorSliceTransfer_v2
...
@@ -395,6 +395,195 @@ struct ThreadwiseDynamicTensorSliceTransfer_v2
src_slice_origin_coord_
=
make_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_idx
);
src_slice_origin_coord_
=
make_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_idx
);
}
}
template
<
typename
DstSliceOriginIdx
,
typename
SrcIteratorHacks
>
__device__
void
Run
(
const
SrcDesc
&
src_desc
,
const
SrcData
*
p_src
,
const
DstDesc
&
,
const
DstSliceOriginIdx
&
,
DstData
&
p_dst
,
const
SrcIteratorHacks
&
src_iterator_hacks
)
{
static_assert
(
DstDesc
::
IsKnownAtCompileTime
(),
"wrong! DstDesc need to known at compile-time"
);
static_assert
(
is_known_at_compile_time
<
remove_cv_t
<
remove_reference_t
<
DstSliceOriginIdx
>>>::
value
,
"wrong! DstSliceOrigin need to known at compile-time"
);
// DstDesc and dst_slice_origin_idx are known at compile-time
constexpr
auto
dst_desc
=
remove_cv_t
<
remove_reference_t
<
DstDesc
>>
{};
constexpr
auto
dst_slice_origin_idx
=
DstSliceOriginIdx
{};
constexpr
auto
I0
=
Number
<
0
>
{};
constexpr
auto
I1
=
Number
<
1
>
{};
// scalar per access on each dim
// TODO: don't use lambda_scalar_per_access
constexpr
auto
src_scalar_per_access
=
generate_sequence
(
lambda_scalar_per_access
<
SrcVectorDim
,
SrcScalarPerVector
>
{},
Number
<
nDim
>
{});
constexpr
auto
src_scalar_step_in_vector
=
generate_sequence
(
lambda_scalar_step_in_vector
<
SrcVectorDim
>
{},
Number
<
nDim
>
{});
constexpr
auto
access_lengths
=
SliceLengths
{}
/
src_scalar_per_access
;
constexpr
auto
dim_access_order
=
DimAccessOrder
{};
constexpr
auto
ordered_access_lengths
=
container_reorder_given_new2old
(
access_lengths
,
dim_access_order
);
// make forward iterators
const
auto
src_forward_iterators
=
generate_tuple
(
[
&
](
auto
i
)
{
Index
forward_step
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
j
)
{
forward_step
(
j
)
=
(
i
.
value
==
j
.
value
)
?
src_scalar_per_access
[
i
]
:
0
;
});
return
make_dynamic_tensor_coordinate_iterator
(
src_desc
,
forward_step
,
src_iterator_hacks
[
I0
][
i
]);
},
Number
<
nDim
>
{});
// make backward iterators
const
auto
src_backward_iterators
=
generate_tuple
(
[
&
](
auto
i
)
{
Index
backward_step
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
j
)
{
backward_step
(
j
)
=
(
i
.
value
==
j
.
value
)
?
-
src_scalar_per_access
[
i
]
:
0
;
});
return
make_dynamic_tensor_coordinate_iterator
(
src_desc
,
backward_step
,
src_iterator_hacks
[
I1
][
i
]);
},
Number
<
nDim
>
{});
// loop over tensor and copy
static_ford
<
decltype
(
ordered_access_lengths
)
>
{}([
&
](
auto
ordered_access_idx
)
{
// judge move forward or move backward
constexpr
auto
forward_sweep
=
[
&
]()
{
StaticallyIndexedArray
<
bool
,
nDim
>
forward_sweep
;
forward_sweep
(
I0
)
=
true
;
static_for
<
1
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
index_t
tmp
=
ordered_access_idx
[
I0
];
static_for
<
0
,
i
,
1
>
{}([
&
](
auto
j
)
{
tmp
=
tmp
*
ordered_access_lengths
[
j
]
+
ordered_access_idx
[
j
];
});
forward_sweep
(
i
)
=
tmp
%
2
==
0
;
});
return
forward_sweep
;
}();
// calculate src data index
constexpr
auto
src_data_idx
=
[
&
]()
{
Index
ordered_idx
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
ordered_idx
(
i
)
=
forward_sweep
[
i
]
?
ordered_access_idx
[
i
]
:
ordered_access_lengths
[
i
]
-
1
-
ordered_access_idx
[
i
];
});
auto
src_data_idx
=
container_reorder_given_old2new
(
ordered_idx
,
dim_access_order
)
*
src_scalar_per_access
;
return
src_data_idx
;
}();
// copy data
static_assert
(
DstAddressSpace
==
AddressSpace
::
Vgpr
,
"wrong! hardcode for vgpr dst"
);
vector_type
<
SrcData
,
SrcScalarPerVector
>
src_vector
;
using
src_vector_t
=
typename
vector_type
<
SrcData
,
SrcScalarPerVector
>::
type
;
const
bool
is_src_valid
=
coordinate_has_valid_offset_assuming_visible_index_is_valid
(
src_desc
,
src_slice_origin_coord_
);
if
constexpr
(
SrcAddressSpace
==
AddressSpace
::
Global
)
{
#if CK_USE_AMD_BUFFER_ADDRESSING
src_vector
.
Vector
()
=
amd_buffer_load_v2
<
SrcData
,
SrcScalarPerVector
>
(
p_src
,
src_slice_origin_coord_
.
GetOffset
(),
is_src_valid
,
src_desc
.
GetElementSpaceSize
());
#else
src_vector
.
Vector
()
=
is_src_valid
?
*
reinterpret_cast
<
const
src_vector_t
*>
(
&
p_src
[
src_slice_origin_coord_
.
GetOffset
()])
:
src_vector_t
{
0
};
#endif
}
else
{
src_vector
.
Vector
()
=
is_src_valid
?
*
reinterpret_cast
<
const
src_vector_t
*>
(
&
p_src
[
src_slice_origin_coord_
.
GetOffset
()])
:
src_vector_t
{
0
};
}
static_for
<
0
,
SrcScalarPerVector
,
1
>
{}([
&
](
auto
i
)
{
constexpr
index_t
dst_offset
=
dst_desc
.
CalculateOffset
(
to_multi_index
(
dst_slice_origin_idx
)
+
src_data_idx
+
i
*
src_scalar_step_in_vector
);
p_dst
.
Vectors
(
Number
<
SrcScalarPerVector
>
{})(
Number
<
dst_offset
>
{})
=
src_vector
.
Scalars
()[
i
];
});
constexpr
auto
move_on_dim
=
[
&
]()
constexpr
{
StaticallyIndexedArray
<
bool
,
nDim
>
move_on_dim
;
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
move_on_dim
(
i
)
=
ordered_access_idx
[
i
]
<
ordered_access_lengths
[
i
]
-
1
;
static_for
<
i
+
1
,
nDim
,
1
>
{}([
&
](
auto
j
)
{
move_on_dim
(
i
)
&=
ordered_access_idx
[
j
]
==
ordered_access_lengths
[
j
]
-
1
;
});
});
return
move_on_dim
;
}
();
// move
static_for
<
0
,
nDim
,
1
>
{}([
&
](
auto
i
)
{
if
constexpr
(
move_on_dim
[
i
])
{
if
constexpr
(
forward_sweep
[
i
])
{
move_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_coord_
,
src_forward_iterators
[
dim_access_order
[
i
]]);
}
else
{
move_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_coord_
,
src_backward_iterators
[
dim_access_order
[
i
]]);
}
}
});
});
// move src coordinate back to slice origin (or not)
if
constexpr
(
SrcResetCoordinateAfterRun
)
{
const
auto
src_reset_iterator
=
make_dynamic_tensor_coordinate_iterator
(
src_desc
,
GetSrcCoordinateResetStep
());
move_dynamic_tensor_coordinate
(
src_desc
,
src_slice_origin_coord_
,
src_reset_iterator
);
}
}
template
<
typename
DstSliceOriginIdx
,
typename
SrcIteratorHacks
>
template
<
typename
DstSliceOriginIdx
,
typename
SrcIteratorHacks
>
__device__
void
Run
(
const
SrcDesc
&
src_desc
,
__device__
void
Run
(
const
SrcDesc
&
src_desc
,
const
SrcData
*
p_src
,
const
SrcData
*
p_src
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment