Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
b24d93a1
Commit
b24d93a1
authored
Sep 28, 2023
by
Jun Liu
Browse files
Merge branch 'amd-develop' into amd-master
parents
742dd3aa
56c72035
Changes
62
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
3573 additions
and
123 deletions
+3573
-123
CHANGELOG.md
CHANGELOG.md
+12
-1
CMakeLists.txt
CMakeLists.txt
+2
-2
client_example/22_im2col_col2im/CMakeLists.txt
client_example/22_im2col_col2im/CMakeLists.txt
+5
-0
client_example/22_im2col_col2im/column_to_image.cpp
client_example/22_im2col_col2im/column_to_image.cpp
+173
-0
client_example/22_im2col_col2im/image_to_column.cpp
client_example/22_im2col_col2im/image_to_column.cpp
+16
-10
example/52_im2col_col2im/CMakeLists.txt
example/52_im2col_col2im/CMakeLists.txt
+4
-2
example/52_im2col_col2im/column_to_image_f32.cpp
example/52_im2col_col2im/column_to_image_f32.cpp
+165
-0
example/52_im2col_col2im/common.hpp
example/52_im2col_col2im/common.hpp
+3
-1
example/52_im2col_col2im/image_to_column_f32.cpp
example/52_im2col_col2im/image_to_column_f32.cpp
+12
-11
example/60_gemm_multiABD/CMakeLists.txt
example/60_gemm_multiABD/CMakeLists.txt
+10
-0
example/60_gemm_multiABD/gemm_multiABD_xdl_fp16.cpp
example/60_gemm_multiABD/gemm_multiABD_xdl_fp16.cpp
+361
-0
include/ck/host_utility/kernel_launch.hpp
include/ck/host_utility/kernel_launch.hpp
+8
-0
include/ck/tensor_operation/gpu/block/thread_group_tensor_slice_transfer_v7r2.hpp
...ion/gpu/block/thread_group_tensor_slice_transfer_v7r2.hpp
+214
-0
include/ck/tensor_operation/gpu/device/conv_tensor_rearrange_op.hpp
.../tensor_operation/gpu/device/conv_tensor_rearrange_op.hpp
+33
-0
include/ck/tensor_operation/gpu/device/device_conv_tensor_rearrange.hpp
...sor_operation/gpu/device/device_conv_tensor_rearrange.hpp
+16
-11
include/ck/tensor_operation/gpu/device/device_gemm_multiple_abd.hpp
.../tensor_operation/gpu/device/device_gemm_multiple_abd.hpp
+60
-0
include/ck/tensor_operation/gpu/device/impl/device_column_to_image_impl.hpp
...operation/gpu/device/impl/device_column_to_image_impl.hpp
+621
-0
include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp
...gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp
+766
-0
include/ck/tensor_operation/gpu/device/impl/device_image_to_column_impl.hpp
...operation/gpu/device/impl/device_image_to_column_impl.hpp
+59
-85
include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_abd_xdl_cshuffle.hpp
...tion/gpu/grid/gridwise_gemm_multiple_abd_xdl_cshuffle.hpp
+1033
-0
No files found.
CHANGELOG.md
View file @
b24d93a1
...
@@ -2,6 +2,18 @@
...
@@ -2,6 +2,18 @@
Full documentation for Composable Kernel is not yet available.
Full documentation for Composable Kernel is not yet available.
## (Unreleased) CK for ROCm 6.0.0
### Fixed
### Optimizations
### Added
-
Added image to column (#867) and column to image kernels (#930).
### Changed
## CK 0.2.0 for ROCm 5.5.0
## CK 0.2.0 for ROCm 5.5.0
### Fixed
### Fixed
...
@@ -29,4 +41,3 @@ Full documentation for Composable Kernel is not yet available.
...
@@ -29,4 +41,3 @@ Full documentation for Composable Kernel is not yet available.
-
Added MaxPool backward (#750).
-
Added MaxPool backward (#750).
### Changed
### Changed
-
Changed ...
CMakeLists.txt
View file @
b24d93a1
...
@@ -446,14 +446,14 @@ if(NOT DEFINED INSTANCES_ONLY)
...
@@ -446,14 +446,14 @@ if(NOT DEFINED INSTANCES_ONLY)
rocm_package_setup_component
(
profiler
rocm_package_setup_component
(
profiler
LIBRARY_NAME composablekernel
LIBRARY_NAME composablekernel
PACKAGE_NAME ck
P
rofiler
PACKAGE_NAME ck
p
rofiler
)
)
add_subdirectory
(
profiler
)
add_subdirectory
(
profiler
)
else
()
else
()
#When building PROFILER_ONLY, label the package with GPU_ARCH
#When building PROFILER_ONLY, label the package with GPU_ARCH
rocm_package_setup_component
(
profiler
rocm_package_setup_component
(
profiler
LIBRARY_NAME composablekernel
LIBRARY_NAME composablekernel
PACKAGE_NAME ck
P
rofiler_
${
GPU_ARCH
}
PACKAGE_NAME ck
p
rofiler_
${
GPU_ARCH
}
)
)
add_subdirectory
(
profiler
)
add_subdirectory
(
profiler
)
endif
()
endif
()
...
...
client_example/2
0
_im
age_to
_col
umn
/CMakeLists.txt
→
client_example/2
2
_im
2col
_col
2im
/CMakeLists.txt
View file @
b24d93a1
add_executable
(
client_image_to_column image_to_column.cpp
)
add_executable
(
client_image_to_column image_to_column.cpp
)
target_link_libraries
(
client_image_to_column PRIVATE composable_kernel::device_operations
)
target_link_libraries
(
client_image_to_column PRIVATE composable_kernel::device_operations
)
add_executable
(
client_column_to_image column_to_image.cpp
)
target_link_libraries
(
client_column_to_image PRIVATE composable_kernel::device_operations
)
client_example/22_im2col_col2im/column_to_image.cpp
0 → 100644
View file @
b24d93a1
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <iterator>
#include <numeric>
#include <vector>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/conv_tensor_rearrange.hpp"
#include "ck/tensor_operation/gpu/device/conv_tensor_rearrange_op.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
using
InDataType
=
ck
::
half_t
;
using
OutDataType
=
ck
::
half_t
;
using
ImageLayout
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
N
=
32
;
// batch size
static
constexpr
ck
::
index_t
C
=
32
;
// input channel (per group)
static
constexpr
ck
::
index_t
Y
=
3
;
// filter H
static
constexpr
ck
::
index_t
X
=
3
;
// filter W
static
constexpr
ck
::
index_t
Hi
=
28
;
// input H
static
constexpr
ck
::
index_t
Wi
=
28
;
// input W
static
constexpr
ck
::
index_t
Ho
=
28
;
// output H
static
constexpr
ck
::
index_t
Wo
=
28
;
// output W
struct
SimpleDeviceMem
{
SimpleDeviceMem
()
=
delete
;
SimpleDeviceMem
(
std
::
size_t
mem_size
)
:
p_mem_
{}
{
(
void
)
hipMalloc
(
static_cast
<
void
**>
(
&
p_mem_
),
mem_size
);
}
void
*
GetDeviceBuffer
()
{
return
p_mem_
;
}
~
SimpleDeviceMem
()
{
(
void
)
hipFree
(
p_mem_
);
}
void
*
p_mem_
;
};
int
main
()
{
std
::
array
<
ck
::
index_t
,
2
>
in_spatial_lengths
{
Hi
,
Wi
};
std
::
array
<
ck
::
index_t
,
2
>
wei_spatial_lengths
{
Y
,
X
};
std
::
array
<
ck
::
index_t
,
2
>
out_spatial_lengths
{
Ho
,
Wo
};
// We have NHWGC in memory space (G is dummy)
// However, CK's API only accept length and stride with order of GNCHW
// Hence, we need to adjust the order of stride
std
::
array
<
ck
::
index_t
,
5
>
image_strides
{
C
,
Hi
*
Wi
*
G
*
C
,
1
,
Wi
*
G
*
C
,
G
*
C
};
std
::
array
<
ck
::
index_t
,
2
>
gemm_strides
{
Y
*
X
*
C
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
filter_strides
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
filter_dilations
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
input_left_pads
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
input_right_pads
{
1
,
1
};
SimpleDeviceMem
in
(
sizeof
(
InDataType
)
*
N
*
Ho
*
Wo
*
Y
*
X
*
C
);
SimpleDeviceMem
out
(
sizeof
(
OutDataType
)
*
N
*
Hi
*
Wi
*
G
*
C
);
using
namespace
ck
::
conv_tensor_rearrange_op
;
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceConvTensorRearrange
<
NumDimSpatial
,
ImageLayout
,
InDataType
,
OutDataType
,
ColumnToImage
>
;
// get device op instances
const
auto
op_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
op_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
std
::
string
best_op_name
;
int
best_op_id
=
-
1
;
float
best_avg_time
=
std
::
numeric_limits
<
float
>::
max
();
float
best_gb_per_sec
=
0
;
// profile device operation instances
std
::
cout
<<
"Run all instances and do timing"
<<
std
::
endl
;
for
(
int
i
=
0
;
i
<
op_ptrs
.
size
();
++
i
)
{
auto
&
op_ptr
=
op_ptrs
[
i
];
auto
argument_ptr
=
op_ptr
->
MakeArgumentPointer
(
in
.
GetDeviceBuffer
(),
out
.
GetDeviceBuffer
(),
N
,
C
,
in_spatial_lengths
,
out_spatial_lengths
,
wei_spatial_lengths
,
image_strides
,
gemm_strides
,
filter_strides
,
filter_dilations
,
input_left_pads
,
input_right_pads
);
auto
invoker_ptr
=
op_ptr
->
MakeInvokerPointer
();
std
::
string
op_name
=
op_ptr
->
GetTypeString
();
if
(
op_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
float
avg_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
true
});
std
::
size_t
num_bytes
=
sizeof
(
InDataType
)
*
N
*
Hi
*
Wi
*
G
*
C
+
sizeof
(
OutDataType
)
*
N
*
Ho
*
Wo
*
Y
*
X
*
C
;
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s, "
<<
op_name
<<
std
::
endl
;
if
(
avg_time
<
best_avg_time
)
{
best_op_id
=
i
;
best_op_name
=
op_name
;
best_avg_time
=
avg_time
;
best_gb_per_sec
=
gb_per_sec
;
}
}
else
{
std
::
cerr
<<
op_name
<<
" does not support this problem"
<<
std
::
endl
;
}
}
if
(
best_op_id
<
0
)
{
std
::
cerr
<<
"no suitable instance"
<<
std
::
endl
;
return
EXIT_FAILURE
;
}
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
// run the best intance
{
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
<<
std
::
endl
;
auto
argument_ptr
=
op_ptr
->
MakeArgumentPointer
(
in
.
GetDeviceBuffer
(),
out
.
GetDeviceBuffer
(),
N
,
C
,
in_spatial_lengths
,
out_spatial_lengths
,
wei_spatial_lengths
,
image_strides
,
gemm_strides
,
filter_strides
,
filter_dilations
,
input_left_pads
,
input_right_pads
);
auto
invoker_ptr
=
op_ptr
->
MakeInvokerPointer
();
if
(
op_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
false
});
}
std
::
cout
<<
"Done"
<<
std
::
endl
;
}
}
client_example/2
0
_im
age_to
_col
umn
/image_to_column.cpp
→
client_example/2
2
_im
2col
_col
2im
/image_to_column.cpp
View file @
b24d93a1
...
@@ -9,13 +9,14 @@
...
@@ -9,13 +9,14 @@
#include <vector>
#include <vector>
#include "ck/ck.hpp"
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/image_to_column.hpp"
#include "ck/library/tensor_operation_instance/gpu/conv_tensor_rearrange.hpp"
#include "ck/tensor_operation/gpu/device/conv_tensor_rearrange_op.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
using
InDataType
=
ck
::
half_t
;
using
InDataType
=
ck
::
half_t
;
using
OutDataType
=
ck
::
half_t
;
using
OutDataType
=
ck
::
half_t
;
using
I
n
Layout
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
I
mage
Layout
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
G
=
1
;
...
@@ -54,8 +55,8 @@ int main()
...
@@ -54,8 +55,8 @@ int main()
// We have NHWGC in memory space (G is dummy)
// We have NHWGC in memory space (G is dummy)
// However, CK's API only accept length and stride with order of GNCHW
// However, CK's API only accept length and stride with order of GNCHW
// Hence, we need to adjust the order of stride
// Hence, we need to adjust the order of stride
std
::
array
<
ck
::
index_t
,
5
>
i
n
_strides
{
C
,
Hi
*
Wi
*
G
*
C
,
1
,
Wi
*
G
*
C
,
G
*
C
};
std
::
array
<
ck
::
index_t
,
5
>
i
mage
_strides
{
C
,
Hi
*
Wi
*
G
*
C
,
1
,
Wi
*
G
*
C
,
G
*
C
};
std
::
array
<
ck
::
index_t
,
2
>
out
_strides
{
Y
*
X
*
C
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
gemm
_strides
{
Y
*
X
*
C
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
filter_strides
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
filter_strides
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
filter_dilations
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
NumDimSpatial
>
filter_dilations
{
1
,
1
};
...
@@ -65,8 +66,13 @@ int main()
...
@@ -65,8 +66,13 @@ int main()
SimpleDeviceMem
in
(
sizeof
(
InDataType
)
*
N
*
Hi
*
Wi
*
G
*
C
);
SimpleDeviceMem
in
(
sizeof
(
InDataType
)
*
N
*
Hi
*
Wi
*
G
*
C
);
SimpleDeviceMem
out
(
sizeof
(
OutDataType
)
*
N
*
Ho
*
Wo
*
Y
*
X
*
C
);
SimpleDeviceMem
out
(
sizeof
(
OutDataType
)
*
N
*
Ho
*
Wo
*
Y
*
X
*
C
);
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
using
namespace
ck
::
conv_tensor_rearrange_op
;
DeviceImageToColumn
<
NumDimSpatial
,
InLayout
,
InDataType
,
OutDataType
>
;
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceConvTensorRearrange
<
NumDimSpatial
,
ImageLayout
,
InDataType
,
OutDataType
,
ImageToColumn
>
;
// get device op instances
// get device op instances
const
auto
op_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
const
auto
op_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
...
@@ -92,8 +98,8 @@ int main()
...
@@ -92,8 +98,8 @@ int main()
in_spatial_lengths
,
in_spatial_lengths
,
out_spatial_lengths
,
out_spatial_lengths
,
wei_spatial_lengths
,
wei_spatial_lengths
,
i
n
_strides
,
i
mage
_strides
,
out
_strides
,
gemm
_strides
,
filter_strides
,
filter_strides
,
filter_dilations
,
filter_dilations
,
input_left_pads
,
input_left_pads
,
...
@@ -148,8 +154,8 @@ int main()
...
@@ -148,8 +154,8 @@ int main()
in_spatial_lengths
,
in_spatial_lengths
,
out_spatial_lengths
,
out_spatial_lengths
,
wei_spatial_lengths
,
wei_spatial_lengths
,
i
n
_strides
,
i
mage
_strides
,
out
_strides
,
gemm
_strides
,
filter_strides
,
filter_strides
,
filter_dilations
,
filter_dilations
,
input_left_pads
,
input_left_pads
,
...
...
example/52_im
age_to
_col
umn
/CMakeLists.txt
→
example/52_im
2col
_col
2im
/CMakeLists.txt
View file @
b24d93a1
...
@@ -2,9 +2,11 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
...
@@ -2,9 +2,11 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
set
(
target 0
)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
add_custom_target
(
example_im
age_to
_col
umn
)
add_custom_target
(
example_im
2col
_col
2im
)
add_example_executable
(
example_image_to_column_f32 image_to_column_f32.cpp
)
add_example_executable
(
example_image_to_column_f32 image_to_column_f32.cpp
)
add_dependencies
(
example_image_to_column example_image_to_column_f32
)
add_dependencies
(
example_im2col_col2im example_image_to_column_f32
)
add_example_executable
(
example_column_to_image_f32 column_to_image_f32.cpp
)
add_dependencies
(
example_im2col_col2im example_column_to_image_f32
)
set
(
target 1
)
set
(
target 1
)
endif
()
endif
()
endforeach
()
endforeach
()
example/52_im2col_col2im/column_to_image_f32.cpp
0 → 100644
View file @
b24d93a1
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
#include "common.hpp"
using
InDataType
=
FP32
;
// ck::bhalf_t;//FP32;
using
OutDataType
=
FP32
;
// ck::bhalf_t;//FP32;
using
ImLayout
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
ColumnToImageOp
=
ck
::
conv_tensor_rearrange_op
::
ColumnToImage
;
// clang-format off
using
DeviceColToImgInstance
=
ck
::
tensor_operation
::
device
::
DeviceColumnToImageImpl
//#####################| Num| ImLayout| InDataType| OutDataType| Block| MPer| KPer| Thread| Scalar|
//#####################| Dim| | | | Size| Block| Block| Cluster| Per|
//#####################| Spatial| | | | | | | Lengths| Vector|
//#####################| | | | | | | | | |
<
NDimSpatial
,
ImLayout
,
InDataType
,
OutDataType
,
256
,
128
,
128
,
S
<
16
,
16
>
,
1
>
;
// clang-format on
bool
RunColumnToImage
(
const
ExecutionConfig
&
config
,
const
ck
::
utils
::
conv
::
ConvParam
&
conv_params
)
{
const
auto
N
=
conv_params
.
N_
;
const
auto
C
=
conv_params
.
C_
;
const
ck
::
index_t
NDoHoWo
=
N
*
ck
::
accumulate_n
<
ck
::
index_t
>
(
conv_params
.
output_spatial_lengths_
.
begin
(),
NDimSpatial
,
1
,
std
::
multiplies
<>
());
const
ck
::
index_t
CZYX
=
C
*
ck
::
accumulate_n
<
ck
::
index_t
>
(
conv_params
.
filter_spatial_lengths_
.
begin
(),
NDimSpatial
,
1
,
std
::
multiplies
<>
());
const
auto
in_desc
=
HostTensorDescriptor
({
NDoHoWo
,
CZYX
});
const
auto
out_desc
=
ck
::
utils
::
conv
::
make_input_host_tensor_descriptor_g_n_c_wis_packed
<
ImLayout
>
(
conv_params
);
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
+
3
>
image_g_n_c_wis_strides
{};
std
::
array
<
ck
::
index_t
,
2
>
gemm_m_k_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
{};
auto
copy
=
[](
const
auto
&
x
,
auto
&
y
)
{
std
::
copy
(
x
.
begin
(),
x
.
end
(),
y
.
begin
());
};
copy
(
conv_params
.
input_spatial_lengths_
,
input_spatial_lengths
);
copy
(
conv_params
.
filter_spatial_lengths_
,
filter_spatial_lengths
);
copy
(
conv_params
.
output_spatial_lengths_
,
output_spatial_lengths
);
copy
(
in_desc
.
GetStrides
(),
gemm_m_k_strides
);
copy
(
out_desc
.
GetStrides
(),
image_g_n_c_wis_strides
);
copy
(
conv_params
.
conv_filter_strides_
,
conv_filter_strides
);
copy
(
conv_params
.
conv_filter_dilations_
,
conv_filter_dilations
);
copy
(
conv_params
.
input_left_pads_
,
input_left_pads
);
copy
(
conv_params
.
input_right_pads_
,
input_right_pads
);
Tensor
<
InDataType
>
in
(
in_desc
);
Tensor
<
OutDataType
>
out_device
(
out_desc
);
Tensor
<
OutDataType
>
out_host
(
out_desc
);
std
::
cout
<<
"in: "
<<
in
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"out: "
<<
out_device
.
mDesc
<<
std
::
endl
;
switch
(
config
.
init_method
)
{
case
0
:
break
;
case
1
:
in
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
1
,
2
});
break
;
default:
in
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
-
0.5
,
0.5
});
}
DeviceMem
in_device_buf
(
sizeof
(
InDataType
)
*
in
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
out_device_buf
(
sizeof
(
OutDataType
)
*
out_device
.
mDesc
.
GetElementSpaceSize
());
in_device_buf
.
ToDevice
(
in
.
mData
.
data
());
// reset input to zero
out_device_buf
.
SetZero
();
static_assert
(
std
::
is_default_constructible_v
<
DeviceColToImgInstance
>
);
// do conv
auto
col2img
=
DeviceColToImgInstance
{};
auto
invoker
=
col2img
.
MakeInvoker
();
auto
argument
=
col2img
.
MakeArgument
(
in_device_buf
.
GetDeviceBuffer
(),
out_device_buf
.
GetDeviceBuffer
(),
N
,
C
,
input_spatial_lengths
,
filter_spatial_lengths
,
output_spatial_lengths
,
image_g_n_c_wis_strides
,
gemm_m_k_strides
,
conv_filter_strides
,
conv_filter_dilations
,
input_left_pads
,
input_right_pads
);
if
(
!
col2img
.
IsSupportedArgument
(
argument
))
{
std
::
cerr
<<
"wrong! device_col2img with the specified compilation parameters does "
"not support this col2img problem"
<<
std
::
endl
;
return
false
;
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
config
.
time_kernel
});
std
::
size_t
num_btype
=
NDoHoWo
*
CZYX
*
(
sizeof
(
OutDataType
)
+
sizeof
(
InDataType
));
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s"
<<
std
::
endl
;
if
(
config
.
do_verification
)
{
auto
ref_column_to_image
=
ck
::
tensor_operation
::
host
::
ReferenceColumnToImage
<
NDimSpatial
,
ImLayout
,
InDataType
,
OutDataType
>
();
auto
ref_invoker
=
ref_column_to_image
.
MakeInvoker
();
auto
ref_argument
=
ref_column_to_image
.
MakeArgument
(
in
,
out_host
,
conv_params
.
filter_spatial_lengths_
,
conv_params
.
conv_filter_strides_
,
conv_params
.
conv_filter_dilations_
,
conv_params
.
input_left_pads_
,
conv_params
.
input_right_pads_
);
if
(
!
ref_column_to_image
.
IsSupportedArgument
(
&
ref_argument
))
{
std
::
cerr
<<
"wrong! ref_col2img with the specified compilation parameters does "
"not support this col2img problem"
<<
std
::
endl
;
return
false
;
}
ref_invoker
.
Run
(
ref_argument
);
out_device_buf
.
FromDevice
(
out_device
.
mData
.
data
());
return
ck
::
utils
::
check_err
(
out_device
.
mData
,
out_host
.
mData
);
}
return
true
;
}
int
RunColumnToImageExample
(
int
argc
,
char
*
argv
[])
{
ExecutionConfig
config
;
ck
::
utils
::
conv
::
ConvParam
conv_params
=
DefaultConvParams
;
if
(
!
parse_cmd_args
(
argc
,
argv
,
config
,
conv_params
))
{
return
EXIT_FAILURE
;
}
if
(
conv_params
.
num_dim_spatial_
!=
NDimSpatial
)
{
std
::
cerr
<<
"unsupported # of spatial dimensions"
<<
std
::
endl
;
return
EXIT_FAILURE
;
}
return
!
RunColumnToImage
(
config
,
conv_params
);
}
int
main
(
int
argc
,
char
*
argv
[])
{
return
RunColumnToImageExample
(
argc
,
argv
);
}
example/52_im
age_to
_col
umn
/common.hpp
→
example/52_im
2col
_col
2im
/common.hpp
View file @
b24d93a1
...
@@ -10,6 +10,7 @@
...
@@ -10,6 +10,7 @@
#include "ck/ck.hpp"
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_image_to_column_impl.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_image_to_column_impl.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_column_to_image_impl.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/algorithm.hpp"
...
@@ -20,6 +21,7 @@
...
@@ -20,6 +21,7 @@
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_image_to_column.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_image_to_column.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_column_to_image.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
@@ -32,7 +34,7 @@ struct ExecutionConfig final
...
@@ -32,7 +34,7 @@ struct ExecutionConfig final
{
{
bool
do_verification
=
true
;
bool
do_verification
=
true
;
int
init_method
=
1
;
int
init_method
=
1
;
bool
time_kernel
=
tru
e
;
bool
time_kernel
=
fals
e
;
};
};
#define DefaultConvParams \
#define DefaultConvParams \
...
...
example/52_im
age_to
_col
umn
/image_to_column_f32.cpp
→
example/52_im
2col
_col
2im
/image_to_column_f32.cpp
View file @
b24d93a1
...
@@ -6,15 +6,16 @@
...
@@ -6,15 +6,16 @@
using
InDataType
=
FP32
;
using
InDataType
=
FP32
;
using
OutDataType
=
FP32
;
using
OutDataType
=
FP32
;
using
InLayout
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
ImLayout
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
ImageToColumnOp
=
ck
::
conv_tensor_rearrange_op
::
ImageToColumn
;
// clang-format off
// clang-format off
using
DeviceImgToColInstance
=
ck
::
tensor_operation
::
device
::
DeviceImageToColumnImpl
using
DeviceImgToColInstance
=
ck
::
tensor_operation
::
device
::
DeviceImageToColumnImpl
//#####################| Num| I
n
Layout| InDataType| OutDataType| Block| MPer| KPer| Thread| Scalar|
//#####################| Num| I
m
Layout| InDataType| OutDataType| Block| MPer| KPer| Thread| Scalar|
//#####################| Dim| | | | Size| Block| Block| Cluster| Per|
//#####################| Dim| | | | Size| Block| Block| Cluster| Per|
//#####################| Spatial| | | | | | | Lengths| Vector|
//#####################| Spatial| | | | | | | Lengths| Vector|
//#####################| | | | | | | | | |
//#####################| | | | | | | | | |
<
NDimSpatial
,
I
n
Layout
,
InDataType
,
OutDataType
,
256
,
128
,
128
,
S
<
16
,
16
>
,
1
>
;
<
NDimSpatial
,
I
m
Layout
,
InDataType
,
OutDataType
,
256
,
128
,
128
,
S
<
16
,
16
>
,
1
>
;
// clang-format on
// clang-format on
bool
RunImageToColumn
(
const
ExecutionConfig
&
config
,
const
ck
::
utils
::
conv
::
ConvParam
&
conv_params
)
bool
RunImageToColumn
(
const
ExecutionConfig
&
config
,
const
ck
::
utils
::
conv
::
ConvParam
&
conv_params
)
...
@@ -31,14 +32,14 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
...
@@ -31,14 +32,14 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
conv_params
.
filter_spatial_lengths_
.
begin
(),
NDimSpatial
,
1
,
std
::
multiplies
<>
());
conv_params
.
filter_spatial_lengths_
.
begin
(),
NDimSpatial
,
1
,
std
::
multiplies
<>
());
const
auto
in_desc
=
const
auto
in_desc
=
ck
::
utils
::
conv
::
make_input_host_tensor_descriptor_g_n_c_wis_packed
<
I
n
Layout
>
(
conv_params
);
ck
::
utils
::
conv
::
make_input_host_tensor_descriptor_g_n_c_wis_packed
<
I
m
Layout
>
(
conv_params
);
const
auto
out_desc
=
HostTensorDescriptor
({
NDoHoWo
,
CZYX
});
const
auto
out_desc
=
HostTensorDescriptor
({
NDoHoWo
,
CZYX
});
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
+
3
>
i
nput
_g_n_c_wis_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
+
3
>
i
mage
_g_n_c_wis_strides
{};
std
::
array
<
ck
::
index_t
,
2
>
output
_m_k_strides
{};
std
::
array
<
ck
::
index_t
,
2
>
gemm
_m_k_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
{};
...
@@ -49,8 +50,8 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
...
@@ -49,8 +50,8 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
copy
(
conv_params
.
input_spatial_lengths_
,
input_spatial_lengths
);
copy
(
conv_params
.
input_spatial_lengths_
,
input_spatial_lengths
);
copy
(
conv_params
.
filter_spatial_lengths_
,
filter_spatial_lengths
);
copy
(
conv_params
.
filter_spatial_lengths_
,
filter_spatial_lengths
);
copy
(
conv_params
.
output_spatial_lengths_
,
output_spatial_lengths
);
copy
(
conv_params
.
output_spatial_lengths_
,
output_spatial_lengths
);
copy
(
in_desc
.
GetStrides
(),
i
nput
_g_n_c_wis_strides
);
copy
(
in_desc
.
GetStrides
(),
i
mage
_g_n_c_wis_strides
);
copy
(
out_desc
.
GetStrides
(),
output
_m_k_strides
);
copy
(
out_desc
.
GetStrides
(),
gemm
_m_k_strides
);
copy
(
conv_params
.
conv_filter_strides_
,
conv_filter_strides
);
copy
(
conv_params
.
conv_filter_strides_
,
conv_filter_strides
);
copy
(
conv_params
.
conv_filter_dilations_
,
conv_filter_dilations
);
copy
(
conv_params
.
conv_filter_dilations_
,
conv_filter_dilations
);
copy
(
conv_params
.
input_left_pads_
,
input_left_pads
);
copy
(
conv_params
.
input_left_pads_
,
input_left_pads
);
...
@@ -90,8 +91,8 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
...
@@ -90,8 +91,8 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
input_spatial_lengths
,
input_spatial_lengths
,
filter_spatial_lengths
,
filter_spatial_lengths
,
output_spatial_lengths
,
output_spatial_lengths
,
i
nput
_g_n_c_wis_strides
,
i
mage
_g_n_c_wis_strides
,
output
_m_k_strides
,
gemm
_m_k_strides
,
conv_filter_strides
,
conv_filter_strides
,
conv_filter_dilations
,
conv_filter_dilations
,
input_left_pads
,
input_left_pads
,
...
@@ -114,7 +115,7 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
...
@@ -114,7 +115,7 @@ bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::Conv
if
(
config
.
do_verification
)
if
(
config
.
do_verification
)
{
{
auto
ref_image_to_column
=
ck
::
tensor_operation
::
host
::
auto
ref_image_to_column
=
ck
::
tensor_operation
::
host
::
ReferenceImageToColumn
<
NDimSpatial
,
I
n
Layout
,
InDataType
,
OutDataType
>
();
ReferenceImageToColumn
<
NDimSpatial
,
I
m
Layout
,
InDataType
,
OutDataType
>
();
auto
ref_invoker
=
ref_image_to_column
.
MakeInvoker
();
auto
ref_invoker
=
ref_image_to_column
.
MakeInvoker
();
...
...
example/60_gemm_multiABD/CMakeLists.txt
0 → 100644
View file @
b24d93a1
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
list
(
APPEND gpu_list2 gfx908 gfx90a gfx940 gfx941 gfx942
)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list2 AND target EQUAL 0
)
add_example_executable
(
example_gemm_multiABD_xdl_fp16 gemm_multiABD_xdl_fp16.cpp
)
set
(
target 1
)
endif
()
endforeach
()
endif
()
example/60_gemm_multiABD/gemm_multiABD_xdl_fp16.cpp
0 → 100644
View file @
b24d93a1
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
ADataType
=
F16
;
using
BDataType
=
F16
;
using
AccDataType
=
F32
;
using
CShuffleDataType
=
F32
;
using
DDataType
=
F16
;
using
EDataType
=
F16
;
using
ALayout
=
Row
;
using
BLayout
=
Col
;
using
DLayout
=
Row
;
using
ELayout
=
Row
;
struct
AddScale
{
static
constexpr
auto
I0
=
ck
::
Number
<
0
>
{};
static
constexpr
auto
I1
=
ck
::
Number
<
1
>
{};
static
constexpr
auto
I2
=
ck
::
Number
<
2
>
{};
static
constexpr
auto
I3
=
ck
::
Number
<
3
>
{};
__host__
__device__
constexpr
void
operator
()(
ck
::
half4_t
&
a
,
const
ck
::
half4_t
&
a0
,
const
ck
::
half4_t
&
a1
)
const
{
const
auto
a0_v_t
=
ck
::
vector_type
<
ck
::
half_t
,
4
>
{
a0
};
const
auto
a1_v_t
=
ck
::
vector_type
<
ck
::
half_t
,
4
>
{
a1
};
auto
r_v_t
=
ck
::
vector_type
<
ck
::
half_t
,
4
>
{};
r_v_t
.
AsType
<
ck
::
half_t
>
()(
I0
)
=
scale
*
(
a0_v_t
.
AsType
<
ck
::
half_t
>
()[
I0
]
+
a1_v_t
.
AsType
<
ck
::
half_t
>
()[
I0
]);
r_v_t
.
AsType
<
ck
::
half_t
>
()(
I1
)
=
scale
*
(
a0_v_t
.
AsType
<
ck
::
half_t
>
()[
I1
]
+
a1_v_t
.
AsType
<
ck
::
half_t
>
()[
I1
]);
r_v_t
.
AsType
<
ck
::
half_t
>
()(
I2
)
=
scale
*
(
a0_v_t
.
AsType
<
ck
::
half_t
>
()[
I2
]
+
a1_v_t
.
AsType
<
ck
::
half_t
>
()[
I2
]);
r_v_t
.
AsType
<
ck
::
half_t
>
()(
I3
)
=
scale
*
(
a0_v_t
.
AsType
<
ck
::
half_t
>
()[
I3
]
+
a1_v_t
.
AsType
<
ck
::
half_t
>
()[
I3
]);
a
=
r_v_t
.
AsType
<
ck
::
half4_t
>
()[
I0
];
}
__host__
__device__
constexpr
void
operator
()(
ck
::
half_t
&
a
,
const
ck
::
half_t
&
a0
,
const
ck
::
half_t
&
a1
)
const
{
a
=
scale
*
(
a0
+
a1
);
}
static
constexpr
ck
::
index_t
vec_len
=
4
;
float
scale
=
1.0
;
};
struct
AlphaBetaAdd
{
AlphaBetaAdd
(
float
alpha
,
float
beta
)
:
alpha_
(
alpha
),
beta_
(
beta
){};
template
<
typename
E
,
typename
C
,
typename
D
>
__host__
__device__
constexpr
void
operator
()(
E
&
e
,
const
C
&
c
,
const
D
&
d
)
const
;
template
<
>
__host__
__device__
constexpr
void
operator
()
<
ck
::
half_t
,
float
,
ck
::
half_t
>
(
ck
::
half_t
&
e
,
const
float
&
c
,
const
ck
::
half_t
&
d
)
const
{
e
=
ck
::
type_convert
<
ck
::
half_t
>
(
alpha_
*
c
+
beta_
*
ck
::
type_convert
<
float
>
(
d
));
};
float
alpha_
;
float
beta_
;
};
using
AElementOp
=
AddScale
;
using
BElementOp
=
PassThrough
;
using
CDEElementOp
=
AlphaBetaAdd
;
static
constexpr
auto
GemmSpec
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
MNKPadding
;
using
DeviceOpInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemmMultipleABD_Xdl_CShuffle
<
ck
::
Tuple
<
ALayout
,
ALayout
>
,
ck
::
Tuple
<
BLayout
>
,
ck
::
Tuple
<
DLayout
>
,
ELayout
,
ck
::
Tuple
<
ADataType
,
ADataType
>
,
ck
::
Tuple
<
BDataType
>
,
AccDataType
,
CShuffleDataType
,
ck
::
Tuple
<
DDataType
>
,
EDataType
,
AElementOp
,
BElementOp
,
CDEElementOp
,
GemmSpec
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
;
int
main
(
int
argc
,
char
*
argv
[])
{
bool
do_verification
=
true
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
// GEMM shape
ck
::
index_t
M
=
3840
;
ck
::
index_t
N
=
4096
;
ck
::
index_t
K
=
4096
;
ck
::
index_t
StrideA
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideD
=
4096
;
ck
::
index_t
StrideE
=
4096
;
float
alpha
=
1.0
f
;
float
beta
=
1.0
f
;
if
(
argc
==
1
)
{
// use default case
}
else
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
}
else
if
(
argc
==
6
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
alpha
=
std
::
stof
(
argv
[
4
]);
beta
=
std
::
stof
(
argv
[
5
]);
}
else
if
(
argc
==
13
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
M
=
std
::
stoi
(
argv
[
4
]);
N
=
std
::
stoi
(
argv
[
5
]);
K
=
std
::
stoi
(
argv
[
6
]);
StrideA
=
std
::
stoi
(
argv
[
7
]);
StrideB
=
std
::
stoi
(
argv
[
8
]);
StrideD
=
std
::
stoi
(
argv
[
9
]);
StrideE
=
std
::
stoi
(
argv
[
10
]);
alpha
=
std
::
stof
(
argv
[
11
]);
beta
=
std
::
stof
(
argv
[
12
]);
}
else
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideD, StrideE, alpha, "
"beta
\n
"
);
exit
(
0
);
}
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
using
namespace
ck
::
literals
;
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
({
row
,
col
},
{
stride
,
1
_uz
});
}
else
{
return
HostTensorDescriptor
({
row
,
col
},
{
1
_uz
,
stride
});
}
};
Tensor
<
ADataType
>
a0_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
ADataType
>
a1_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
DDataType
>
d_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
StrideD
,
DLayout
{}));
Tensor
<
EDataType
>
e_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
Tensor
<
EDataType
>
e_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
std
::
cout
<<
"a0_m_k: "
<<
a0_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"a1_m_k: "
<<
a1_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"d_m_n: "
<<
d_m_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"e_m_n: "
<<
e_m_n_host_result
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a0_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
a1_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
d_m_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
DDataType
>
{
-
5
,
5
});
break
;
default:
a0_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
a1_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
d_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
DDataType
>
{
-
0.5
,
0.5
});
}
DeviceMem
a0_device_buf
(
sizeof
(
ADataType
)
*
a0_m_k
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
a1_device_buf
(
sizeof
(
ADataType
)
*
a1_m_k
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
b_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
d_device_buf
(
sizeof
(
DDataType
)
*
d_m_n
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
e_device_buf
(
sizeof
(
EDataType
)
*
e_m_n_device_result
.
mDesc
.
GetElementSpaceSize
());
a0_device_buf
.
ToDevice
(
a0_m_k
.
mData
.
data
());
a1_device_buf
.
ToDevice
(
a1_m_k
.
mData
.
data
());
b_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
d_device_buf
.
ToDevice
(
d_m_n
.
mData
.
data
());
e_device_buf
.
ToDevice
(
e_m_n_device_result
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{
0.2
};
auto
b_element_op
=
BElementOp
{};
auto
cde_element_op
=
CDEElementOp
{
alpha
,
beta
};
// do GEMM
auto
device_op
=
DeviceOpInstance
{};
auto
invoker
=
device_op
.
MakeInvoker
();
auto
argument
=
device_op
.
MakeArgument
(
std
::
array
<
const
void
*
,
2
>
{
a0_device_buf
.
GetDeviceBuffer
(),
a1_device_buf
.
GetDeviceBuffer
()},
std
::
array
<
const
void
*
,
1
>
{
b_device_buf
.
GetDeviceBuffer
()},
std
::
array
<
const
void
*
,
1
>
{
d_device_buf
.
GetDeviceBuffer
()},
e_device_buf
.
GetDeviceBuffer
(),
M
,
N
,
K
,
std
::
array
<
ck
::
index_t
,
2
>
{
StrideA
,
StrideA
},
std
::
array
<
ck
::
index_t
,
1
>
{
StrideB
},
std
::
array
<
ck
::
index_t
,
1
>
{
StrideD
},
StrideE
,
a_element_op
,
b_element_op
,
cde_element_op
);
if
(
!
device_op
.
IsSupportedArgument
(
argument
))
{
throw
std
::
runtime_error
(
"wrong! device_gemm with the specified compilation parameters does "
"not support this GEMM problem"
);
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
;
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
N
+
sizeof
(
EDataType
)
*
M
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s"
<<
std
::
endl
;
e_device_buf
.
FromDevice
(
e_m_n_device_result
.
mData
.
data
());
if
(
do_verification
)
{
Tensor
<
CShuffleDataType
>
c_m_n
({
M
,
N
});
Tensor
<
ADataType
>
a_m_k
({
M
,
K
});
for
(
int
m
=
0
;
m
<
M
;
++
m
)
{
for
(
int
k
=
0
;
k
<
K
;
++
k
)
{
a_element_op
(
a_m_k
(
m
,
k
),
a0_m_k
(
m
,
k
),
a1_m_k
(
m
,
k
));
}
}
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
CShuffleDataType
,
AccDataType
,
PassThrough
,
BElementOp
,
PassThrough
>
;
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n
,
PassThrough
{},
b_element_op
,
PassThrough
{});
ref_invoker
.
Run
(
ref_argument
);
for
(
int
m
=
0
;
m
<
M
;
++
m
)
{
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
cde_element_op
(
e_m_n_host_result
(
m
,
n
),
c_m_n
(
m
,
n
),
d_m_n
(
m
,
n
));
}
}
e_device_buf
.
FromDevice
(
e_m_n_device_result
.
mData
.
data
());
return
ck
::
utils
::
check_err
(
e_m_n_device_result
,
e_m_n_host_result
)
?
0
:
1
;
}
return
0
;
}
include/ck/host_utility/kernel_launch.hpp
View file @
b24d93a1
...
@@ -34,6 +34,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
...
@@ -34,6 +34,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
#endif
#endif
// warm up
// warm up
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
const
int
nrepeat
=
10
;
const
int
nrepeat
=
10
;
#if DEBUG_LOG
#if DEBUG_LOG
...
@@ -50,6 +51,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
...
@@ -50,6 +51,7 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
for
(
int
i
=
0
;
i
<
nrepeat
;
++
i
)
for
(
int
i
=
0
;
i
<
nrepeat
;
++
i
)
{
{
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
}
}
hip_check_error
(
hipEventRecord
(
stop
,
stream_config
.
stream_id_
));
hip_check_error
(
hipEventRecord
(
stop
,
stream_config
.
stream_id_
));
...
@@ -64,11 +66,13 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
...
@@ -64,11 +66,13 @@ float launch_and_time_kernel(const StreamConfig& stream_config,
else
else
{
{
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
return
0
;
return
0
;
}
}
#else
#else
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
return
0
;
return
0
;
#endif
#endif
...
@@ -101,6 +105,7 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
...
@@ -101,6 +105,7 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
// warm up
// warm up
preprocess
();
preprocess
();
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
const
int
nrepeat
=
10
;
const
int
nrepeat
=
10
;
#if DEBUG_LOG
#if DEBUG_LOG
...
@@ -118,6 +123,7 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
...
@@ -118,6 +123,7 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
{
{
preprocess
();
preprocess
();
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
}
}
hip_check_error
(
hipEventRecord
(
stop
,
stream_config
.
stream_id_
));
hip_check_error
(
hipEventRecord
(
stop
,
stream_config
.
stream_id_
));
...
@@ -133,11 +139,13 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
...
@@ -133,11 +139,13 @@ float launch_and_time_kernel_with_preprocess(const StreamConfig& stream_config,
{
{
preprocess
();
preprocess
();
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
return
0
;
return
0
;
}
}
#else
#else
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
kernel
<<<
grid_dim
,
block_dim
,
lds_byte
,
stream_config
.
stream_id_
>>>
(
args
...);
hip_check_error
(
hipGetLastError
());
return
0
;
return
0
;
#endif
#endif
...
...
include/ck/tensor_operation/gpu/block/thread_group_tensor_slice_transfer_v7r2.hpp
0 → 100644
View file @
b24d93a1
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/utility/common_header.hpp"
#include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
#include "ck/tensor_description/cluster_descriptor.hpp"
#include "ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer_v7r2.hpp"
#include "ck/utility/is_detected.hpp"
namespace
ck
{
// Thread-group level multi-source, multi-destination tensor slice data movement
// Assume:
// 1. All sources and destinations are DynamicBuffer
// 2. Same VectorDim and ScalerPerVector for all sources and destinations
// 3. DstInMemOps are per destination tensor
// 4. ThreadTransferSrcResetCoordinateAfterRunFlags are per source tensor
// 5. ThreadTransferDstResetCoordinateAfterRunFlags are per destination tensor
//
// Does following things to avoid scratch memory issue
// 1. Pass tensor descritpors by reference (or tuple of references)
// 2. Does not keep reference to tensor descriptor
// 3. Does not construct new tensor coordinate when call Run()
template
<
typename
ThreadGroup
,
typename
SrcDatas
,
typename
DstDatas
,
typename
SrcDescs
,
typename
DstDescs
,
typename
ElementwiseOperation
,
typename
DstInMemOps
,
// Sequence<InMemoryDataOperationEnum ...>
typename
SliceLengths
,
typename
ThreadClusterLengths
,
typename
ThreadClusterArrangeOrder
,
typename
SrcDimAccessOrder
,
typename
DstDimAccessOrder
,
index_t
SrcVectorDim
,
index_t
DstVectorDim
,
index_t
SrcScalarPerVector
,
index_t
DstScalarPerVector
,
typename
ThreadTransferSrcResetCoordinateAfterRunFlags
,
typename
ThreadTransferDstResetCoordinateAfterRunFlags
>
struct
ThreadGroupTensorSliceTransfer_v7r2
{
static
constexpr
index_t
nDim
=
remove_cvref_t
<
tuple_element_t
<
0
,
SrcDescs
>>::
GetNumOfDimension
();
static
constexpr
index_t
nSrc
=
remove_cvref_t
<
SrcDescs
>::
Size
();
static
constexpr
index_t
nDst
=
remove_cvref_t
<
DstDescs
>::
Size
();
using
Index
=
MultiIndex
<
nDim
>
;
static
constexpr
auto
thread_slice_lengths
=
SliceLengths
{}
/
ThreadClusterLengths
{};
__device__
constexpr
ThreadGroupTensorSliceTransfer_v7r2
(
const
SrcDescs
&
src_descs
,
const
StaticallyIndexedArray
<
Index
,
nSrc
>&
src_block_slice_origins
,
const
DstDescs
&
dst_descs
,
const
StaticallyIndexedArray
<
Index
,
nDst
>&
dst_block_slice_origins
,
const
ElementwiseOperation
&
element_op
)
:
threadwise_transfer_
(
src_descs
,
StaticallyIndexedArray
<
Index
,
nSrc
>
{},
dst_descs
,
StaticallyIndexedArray
<
Index
,
nDst
>
{},
element_op
)
{
static_assert
(
nSrc
==
SrcDatas
::
Size
()
&&
nSrc
==
SrcDescs
::
Size
()
&&
nSrc
==
ThreadTransferSrcResetCoordinateAfterRunFlags
::
Size
()
&&
nDst
==
DstDatas
::
Size
()
&&
nDst
==
DstDescs
::
Size
()
&&
nDst
==
ThreadTransferDstResetCoordinateAfterRunFlags
::
Size
(),
"wrong!"
);
static_for
<
0
,
nSrc
,
1
>
{}([
&
](
auto
i
)
{
static_assert
(
nDim
==
remove_cvref_t
<
tuple_element_t
<
i
.
value
,
SrcDescs
>>::
GetNumOfDimension
(),
"wrong!"
);
});
static_for
<
0
,
nDst
,
1
>
{}([
&
](
auto
i
)
{
static_assert
(
nDim
==
remove_cvref_t
<
tuple_element_t
<
i
.
value
,
DstDescs
>>::
GetNumOfDimension
(),
"wrong!"
);
});
static_assert
(
nDim
==
ThreadClusterLengths
::
Size
()
&&
nDim
==
ThreadClusterArrangeOrder
::
Size
()
&&
nDim
==
SrcDimAccessOrder
::
Size
()
&&
nDim
==
DstDimAccessOrder
::
Size
(),
"wrong! nDim not consistent"
);
static_assert
(
is_same
<
SliceLengths
,
decltype
(
thread_slice_lengths
*
ThreadClusterLengths
{})
>
{},
"wrong! threads should be mapped to cover entire slicing window"
);
static_assert
(
ThreadGroup
::
GetNumOfThread
()
>=
thread_cluster_desc_
.
GetElementSize
(),
"wrong! ThreadGroup::GetNumOfThread() too small"
);
if
(
ThreadGroup
::
GetNumOfThread
()
==
thread_cluster_desc_
.
GetElementSize
()
or
ThreadGroup
::
GetThreadId
()
<
thread_cluster_desc_
.
GetElementSize
())
{
const
auto
thread_cluster_idx
=
thread_cluster_desc_
.
CalculateBottomIndex
(
make_multi_index
(
get_thread_local_1d_id
()));
const
auto
thread_data_idx_begin
=
thread_cluster_idx
*
thread_slice_lengths
;
const
auto
src_thread_slice_origins
=
generate_tuple
(
[
&
](
auto
i
)
{
return
src_block_slice_origins
[
i
]
+
thread_data_idx_begin
;
},
Number
<
nSrc
>
{});
const
auto
dst_thread_slice_origins
=
generate_tuple
(
[
&
](
auto
i
)
{
return
dst_block_slice_origins
[
i
]
+
thread_data_idx_begin
;
},
Number
<
nDst
>
{});
threadwise_transfer_
.
SetSrcSliceOrigins
(
src_descs
,
src_thread_slice_origins
);
threadwise_transfer_
.
SetDstSliceOrigins
(
dst_descs
,
dst_thread_slice_origins
);
}
}
template
<
typename
SrcBuffers
>
__device__
void
RunRead
(
const
SrcDescs
&
src_descs
,
const
SrcBuffers
&
src_bufs
)
{
if
(
ThreadGroup
::
GetNumOfThread
()
==
thread_cluster_desc_
.
GetElementSize
()
or
ThreadGroup
::
GetThreadId
()
<
thread_cluster_desc_
.
GetElementSize
())
{
threadwise_transfer_
.
RunRead
(
src_descs
,
src_bufs
);
}
}
template
<
typename
T
>
using
is_tuple
=
decltype
(
std
::
declval
<
T
&>
().
IsTuple
());
template
<
typename
DstBuffers
>
__device__
void
RunWrite
(
const
DstDescs
&
dst_descs
,
DstBuffers
dst_bufs
)
{
if
(
ThreadGroup
::
GetNumOfThread
()
==
thread_cluster_desc_
.
GetElementSize
()
or
ThreadGroup
::
GetThreadId
()
<
thread_cluster_desc_
.
GetElementSize
())
{
if
constexpr
(
is_detected
<
is_tuple
,
decltype
(
dst_bufs
)
>::
value
)
threadwise_transfer_
.
RunWrite
(
dst_descs
,
dst_bufs
);
else
threadwise_transfer_
.
RunWrite
(
dst_descs
,
tie
(
dst_bufs
));
}
}
template
<
typename
SrcBuffers
,
typename
DstBuffers
>
__device__
void
Run
(
const
SrcDescs
&
src_descs
,
const
SrcBuffers
&
src_bufs
,
const
DstDescs
&
dst_descs
,
DstBuffers
dst_bufs
)
{
RunRead
(
src_descs
,
src_bufs
);
RunWrite
(
dst_descs
,
dst_bufs
);
}
template
<
index_t
ISrc
>
__device__
void
MoveSrcSliceWindow
(
const
SrcDescs
&
src_descs
,
Number
<
ISrc
>
iSrc
,
const
Index
&
step
)
{
if
(
ThreadGroup
::
GetNumOfThread
()
==
thread_cluster_desc_
.
GetElementSize
()
or
ThreadGroup
::
GetThreadId
()
<
thread_cluster_desc_
.
GetElementSize
())
{
threadwise_transfer_
.
MoveSrcSliceWindow
(
src_descs
,
iSrc
,
step
);
}
}
__device__
void
MoveSrcSliceWindow
(
const
SrcDescs
&
src_descs
,
const
Index
&
step
)
{
static_for
<
0
,
SrcDescs
::
Size
(),
1
>
{}(
[
&
](
auto
i
)
{
MoveSrcSliceWindow
(
src_descs
,
i
,
step
);
});
}
template
<
index_t
IDst
>
__device__
void
MoveDstSliceWindow
(
const
DstDescs
&
dst_descs
,
Number
<
IDst
>
iDst
,
const
Index
&
step
)
{
if
(
ThreadGroup
::
GetNumOfThread
()
==
thread_cluster_desc_
.
GetElementSize
()
or
ThreadGroup
::
GetThreadId
()
<
thread_cluster_desc_
.
GetElementSize
())
{
threadwise_transfer_
.
MoveDstSliceWindow
(
dst_descs
,
iDst
,
step
);
}
}
__device__
void
MoveDstSliceWindow
(
const
DstDescs
&
dst_descs
,
const
Index
&
step
)
{
static_for
<
0
,
DstDescs
::
Size
(),
1
>
{}(
[
&
](
auto
i
)
{
MoveDstSliceWindow
(
dst_descs
,
i
,
step
);
});
}
private:
static
constexpr
auto
thread_cluster_desc_
=
make_cluster_descriptor
(
ThreadClusterLengths
{},
ThreadClusterArrangeOrder
{});
using
ThreadwiseTransfer
=
ThreadwiseTensorSliceTransfer_v7r2
<
SrcDatas
,
DstDatas
,
SrcDescs
,
DstDescs
,
ElementwiseOperation
,
DstInMemOps
,
decltype
(
thread_slice_lengths
),
SrcDimAccessOrder
,
DstDimAccessOrder
,
SrcVectorDim
,
DstVectorDim
,
SrcScalarPerVector
,
DstScalarPerVector
,
ThreadTransferSrcResetCoordinateAfterRunFlags
,
ThreadTransferDstResetCoordinateAfterRunFlags
>
;
ThreadwiseTransfer
threadwise_transfer_
;
};
}
// namespace ck
include/ck/tensor_operation/gpu/device/conv_tensor_rearrange_op.hpp
0 → 100644
View file @
b24d93a1
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
namespace
ck
{
namespace
conv_tensor_rearrange_op
{
struct
BaseConvTensorRearrangeOp
{
};
struct
ImageToColumn
:
public
BaseConvTensorRearrangeOp
{
static
constexpr
const
char
*
name
=
"Image to Column"
;
};
struct
ColumnToImage
:
public
BaseConvTensorRearrangeOp
{
static
constexpr
const
char
*
name
=
"Column to Image"
;
};
template
<
typename
Op
,
typename
std
::
enable_if
<
std
::
is_base_of
<
BaseConvTensorRearrangeOp
,
Op
>
::
value
,
bool
>::
type
=
false
>
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
BaseConvTensorRearrangeOp
&
)
{
os
<<
Op
::
name
;
return
os
;
}
}
// namespace conv_tensor_rearrange_op
}
// namespace ck
include/ck/tensor_operation/gpu/device/device_
image_to_column
.hpp
→
include/ck/tensor_operation/gpu/device/device_
conv_tensor_rearrange
.hpp
View file @
b24d93a1
...
@@ -12,21 +12,26 @@ namespace tensor_operation {
...
@@ -12,21 +12,26 @@ namespace tensor_operation {
namespace
device
{
namespace
device
{
/**
/**
* \brief
Image to column
.
* \brief
Convolution Tensor Rearrange
.
*
*
* This Device operator converts image ([G, N, Di, Hi, Wi, C]) to the gemm
* This Device operator supports conversion image ([G, N, Di, Hi, Wi, C]) to
* problem([N * Do * Ho * Wo, Z * Y * X * C]). G must be equal to 1.
* the gemm problem([N * Do * Ho * Wo, Z * Y * X * C]) (Image to Column) and
* conversion gemm form to the image (Column to Image).
*
* Note that G must be equal to 1.
*
*
* \tparam NDimSpatial Number of spatial dimensions.
* \tparam NDimSpatial Number of spatial dimensions.
* \tparam I
nput
Layout Input Layout.
* \tparam I
mage
Layout Input Layout.
* \tparam InputDataType Input Data Type.
* \tparam InputDataType Input Data Type.
* \tparam OutputDataType Output Data Type.
* \tparam OutputDataType Output Data Type.
* \tparam ConvTensorRearrangeOp Operation type: ImageToColumn, ColumnToImage.
*/
*/
template
<
index_t
NDimSpatial
,
template
<
index_t
NDimSpatial
,
typename
I
nput
Layout
,
typename
I
mage
Layout
,
typename
InputDataType
,
typename
InputDataType
,
typename
OutputDataType
>
typename
OutputDataType
,
struct
DeviceImageToColumn
:
public
BaseOperator
typename
ConvTensorRearrangeOp
>
struct
DeviceConvTensorRearrange
:
public
BaseOperator
{
{
/**
/**
...
@@ -39,8 +44,8 @@ struct DeviceImageToColumn : public BaseOperator
...
@@ -39,8 +44,8 @@ struct DeviceImageToColumn : public BaseOperator
* \param input_spatial_lengths Input spatial lengths.
* \param input_spatial_lengths Input spatial lengths.
* \param filter_spatial_lengths Filter spatial lengths.
* \param filter_spatial_lengths Filter spatial lengths.
* \param output_spatial_lengths Output spatial lengths.
* \param output_spatial_lengths Output spatial lengths.
* \param i
nput
_g_n_c_wis_strides I
nput
strides in order [G, N, C, D, H, W].
* \param i
mage
_g_n_c_wis_strides I
mage
strides in order [G, N, C, D, H, W].
* \param
output
_m_k_strides
Output
strides.
* \param
gemm
_m_k_strides
Gemm form
strides.
* \param conv_filter_strides Convolution filter strides.
* \param conv_filter_strides Convolution filter strides.
* \param conv_filter_dilations Convolution filter dilations.
* \param conv_filter_dilations Convolution filter dilations.
* \param input_left_pads Convolution left pads.
* \param input_left_pads Convolution left pads.
...
@@ -55,8 +60,8 @@ struct DeviceImageToColumn : public BaseOperator
...
@@ -55,8 +60,8 @@ struct DeviceImageToColumn : public BaseOperator
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
nput
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
mage
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
2
>&
output
_m_k_strides
,
const
std
::
array
<
index_t
,
2
>&
gemm
_m_k_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
...
...
include/ck/tensor_operation/gpu/device/device_gemm_multiple_abd.hpp
0 → 100644
View file @
b24d93a1
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <array>
#include "ck/tensor_operation/gpu/device/device_base.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
// GEMM:
// input : A0[M, K], B0[K, N],
// input : D0[M, N], D1[M, N], ...
// output : E[M, N]
// C = a_op(A) * b_op(B)
// E = cde_op(C, D0, D1, ...)
// Assume:
// D0, D1, ... and E have the same layout
template
<
typename
AsLayout
,
typename
BsLayout
,
typename
DsLayout
,
typename
ELayout
,
typename
AsDataType
,
typename
BsDataType
,
typename
DsDataType
,
typename
EDataType
,
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CDEElementwiseOperation
>
struct
DeviceGemmMultipleABD
:
public
BaseOperator
{
static
constexpr
index_t
NumATensor
=
AsDataType
::
Size
();
static
constexpr
index_t
NumBTensor
=
BsDataType
::
Size
();
static
constexpr
index_t
NumDTensor
=
DsDataType
::
Size
();
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
std
::
array
<
const
void
*
,
NumATensor
>
p_as
,
std
::
array
<
const
void
*
,
NumBTensor
>
p_bs
,
std
::
array
<
const
void
*
,
NumDTensor
>
p_ds
,
void
*
p_e
,
ck
::
index_t
M
,
ck
::
index_t
N
,
ck
::
index_t
K
,
std
::
array
<
ck
::
index_t
,
NumATensor
>
StrideAs
,
std
::
array
<
ck
::
index_t
,
NumBTensor
>
StrideBs
,
std
::
array
<
ck
::
index_t
,
NumDTensor
>
StrideDs
,
ck
::
index_t
StrideE
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
CDEElementwiseOperation
cde_element_op
)
=
0
;
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
include/ck/tensor_operation/gpu/device/impl/device_column_to_image_impl.hpp
0 → 100644
View file @
b24d93a1
This diff is collapsed.
Click to expand it.
include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp
0 → 100644
View file @
b24d93a1
This diff is collapsed.
Click to expand it.
include/ck/tensor_operation/gpu/device/impl/device_image_to_column_impl.hpp
View file @
b24d93a1
...
@@ -5,64 +5,41 @@
...
@@ -5,64 +5,41 @@
#include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
#include "ck/tensor_operation/gpu/device/device_
image_to_column
.hpp"
#include "ck/tensor_operation/gpu/device/device_
conv_tensor_rearrange
.hpp"
#include "ck/tensor_operation/gpu/grid/gridwise_
image_to_column
.hpp"
#include "ck/tensor_operation/gpu/grid/gridwise_
tensor_rearrange
.hpp"
#include "ck/host_utility/kernel_launch.hpp"
#include "ck/host_utility/kernel_launch.hpp"
#include "ck/tensor_operation/gpu/grid/block_to_ctile_map.hpp"
#include "ck/tensor_operation/gpu/grid/block_to_ctile_map.hpp"
#include "ck/tensor_operation/operator_transform/transform_conv_fwd_to_gemm.hpp"
#include "ck/tensor_operation/operator_transform/transform_conv_fwd_to_gemm.hpp"
#include "ck/tensor_operation/gpu/device/convolution_forward_specialization.hpp"
#include "ck/tensor_operation/gpu/device/convolution_forward_specialization.hpp"
#include "ck/tensor_operation/gpu/device/matrix_padder.hpp"
#include "ck/tensor_operation/gpu/device/matrix_padder.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/conv_tensor_rearrange_op.hpp"
#include "ck/host_utility/io.hpp"
#include "ck/host_utility/io.hpp"
namespace
ck
{
namespace
ck
{
namespace
tensor_operation
{
namespace
tensor_operation
{
namespace
device
{
namespace
device
{
template
<
typename
InputGridDesc
,
typename
InputDataType
,
typename
OutputGridDesc
,
typename
OutputDataType
,
typename
Block2ETileMap
,
typename
GridwiseImageToColumnKernel
>
__global__
void
#if CK_USE_LAUNCH_BOUNDS
__launch_bounds__
(
CK_MAX_THREAD_PER_BLOCK
,
CK_MIN_BLOCK_PER_CU
)
#endif
kernel_image_to_column
(
const
InputGridDesc
in_grid_desc
,
const
InputDataType
*
__restrict__
p_in_global
,
const
OutputGridDesc
out_grid_desc
,
OutputDataType
*
__restrict__
p_out_global
,
const
Block2ETileMap
block_2_tile_map
)
{
#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || defined(__gfx908__) || \
defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx1030__) || defined(__gfx1100__) || \
defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx941__) || defined(__gfx942__))
GridwiseImageToColumnKernel
::
Run
(
in_grid_desc
,
p_in_global
,
out_grid_desc
,
p_out_global
,
block_2_tile_map
);
#else
ignore
=
in_grid_desc
;
ignore
=
p_in_global
;
ignore
=
out_grid_desc
;
ignore
=
p_out_global
;
ignore
=
block_2_tile_map
;
#endif
}
// Image to column for input layout NDHWC:
// Image to column for input layout NDHWC:
// input : input image [N, Di, Hi, Wi, C]
,
// input : input image [N, Di, Hi, Wi, C]
// output :
output image
[N * Do * Ho * Wo, Z * Y * X * C]
// output :
gemm form
[N * Do * Ho * Wo, Z * Y * X * C]
template
<
index_t
NDimSpatial
,
template
<
index_t
NDimSpatial
,
typename
I
nput
Layout
,
typename
I
mage
Layout
,
typename
InputDataType
,
typename
InputDataType
,
typename
OutputDataType
,
typename
OutputDataType
,
index_t
BlockSize
,
index_t
BlockSize
,
index_t
MPerBlock
,
index_t
MPerBlock
,
index_t
KPerBlock
,
index_t
KPerBlock
,
typename
ThreadClusterLengths
,
typename
ThreadClusterLengths
,
index_t
ScalarPerVector
>
index_t
ScalarPerVector
,
typename
std
::
enable_if
<
NDimSpatial
>
=
1
&&
NDimSpatial
<=
3
,
bool
>::
type
=
false
>
struct
DeviceImageToColumnImpl
struct
DeviceImageToColumnImpl
:
public
DeviceImageToColumn
<
NDimSpatial
,
InputLayout
,
InputDataType
,
OutputDataType
>
:
public
DeviceConvTensorRearrange
<
NDimSpatial
,
ImageLayout
,
InputDataType
,
OutputDataType
,
conv_tensor_rearrange_op
::
ImageToColumn
>
{
{
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I0
=
Number
<
0
>
{};
...
@@ -83,7 +60,7 @@ struct DeviceImageToColumnImpl
...
@@ -83,7 +60,7 @@ struct DeviceImageToColumnImpl
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
nput
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
mage
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
...
@@ -110,9 +87,9 @@ struct DeviceImageToColumnImpl
...
@@ -110,9 +87,9 @@ struct DeviceImageToColumnImpl
c_g_n_k_wos_lengths
[
I1
]
=
N
;
c_g_n_k_wos_lengths
[
I1
]
=
N
;
const
auto
in_gemmmraw_gemmkraw_desc
=
const
auto
in_gemmmraw_gemmkraw_desc
=
conv_to_gemm_transformer
.
template
MakeADescriptor_M_K
<
I
nput
Layout
>(
conv_to_gemm_transformer
.
template
MakeADescriptor_M_K
<
I
mage
Layout
>(
a_g_n_c_wis_lengths
,
a_g_n_c_wis_lengths
,
i
nput
_g_n_c_wis_strides
,
i
mage
_g_n_c_wis_strides
,
b_g_k_c_xs_lengths
,
b_g_k_c_xs_lengths
,
{},
// not needed for A Descriptor
{},
// not needed for A Descriptor
c_g_n_k_wos_lengths
,
c_g_n_k_wos_lengths
,
...
@@ -132,7 +109,7 @@ struct DeviceImageToColumnImpl
...
@@ -132,7 +109,7 @@ struct DeviceImageToColumnImpl
const
ck
::
index_t
C
,
const
ck
::
index_t
C
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
2
>&
output
_m_k_strides
)
const
std
::
array
<
index_t
,
2
>&
gemm
_m_k_strides
)
{
{
const
index_t
NDoHoWo
=
const
index_t
NDoHoWo
=
N
*
ck
::
accumulate_n
<
index_t
>
(
N
*
ck
::
accumulate_n
<
index_t
>
(
...
@@ -141,7 +118,7 @@ struct DeviceImageToColumnImpl
...
@@ -141,7 +118,7 @@ struct DeviceImageToColumnImpl
C
*
ck
::
accumulate_n
<
index_t
>
(
C
*
ck
::
accumulate_n
<
index_t
>
(
filter_spatial_lengths
.
begin
(),
NDimSpatial
,
1
,
std
::
multiplies
<>
());
filter_spatial_lengths
.
begin
(),
NDimSpatial
,
1
,
std
::
multiplies
<>
());
const
auto
desc_mraw_kraw
=
make_naive_tensor_descriptor
(
const
auto
desc_mraw_kraw
=
make_naive_tensor_descriptor
(
make_tuple
(
NDoHoWo
,
CZYX
),
make_tuple
(
output
_m_k_strides
[
I0
],
output
_m_k_strides
[
I1
]));
make_tuple
(
NDoHoWo
,
CZYX
),
make_tuple
(
gemm
_m_k_strides
[
I0
],
gemm
_m_k_strides
[
I1
]));
const
auto
desc_m_k
=
matrix_padder
.
PadADescriptor_M_K
(
desc_mraw_kraw
);
const
auto
desc_m_k
=
matrix_padder
.
PadADescriptor_M_K
(
desc_mraw_kraw
);
return
desc_m_k
;
return
desc_m_k
;
...
@@ -155,28 +132,29 @@ struct DeviceImageToColumnImpl
...
@@ -155,28 +132,29 @@ struct DeviceImageToColumnImpl
decltype
(
BlockToCTileMap_M00_N0_M01Adapt
<
MPerBlock
,
KPerBlock
,
OutputGridDesc
>
(
decltype
(
BlockToCTileMap_M00_N0_M01Adapt
<
MPerBlock
,
KPerBlock
,
OutputGridDesc
>
(
OutputGridDesc
{}))
>
;
OutputGridDesc
{}))
>
;
using
GridwiseImageToColumnKernel
=
GridwiseImageToColumn
<
InputGridDesc
,
using
GridwiseTensorRearrangeKernel
=
GridwiseTensorRearrange
<
InputGridDesc
,
InputDataType
,
InputDataType
,
OutputGridDesc
,
OutputGridDesc
,
OutputDataType
,
OutputDataType
,
BlockSize
,
BlockSize
,
MPerBlock
,
MPerBlock
,
KPerBlock
,
KPerBlock
,
ThreadClusterLengths
,
ThreadClusterLengths
,
ScalarPerVector
,
ScalarPerVector
,
Block2ETileMap
>
;
InMemoryDataOperationEnum
::
Set
,
Block2ETileMap
>
;
struct
Argument
:
public
BaseArgument
struct
Argument
:
public
BaseArgument
{
{
Argument
(
const
void
*
p_in
,
// input image
Argument
(
const
void
*
p_in
,
// input image
void
*
p_out
,
//
output image
void
*
p_out
,
//
gemm form
const
ck
::
index_t
N
,
const
ck
::
index_t
N
,
const
ck
::
index_t
C
,
const
ck
::
index_t
C
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
nput
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
mage
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
2
>&
output
_m_k_strides
,
const
std
::
array
<
index_t
,
2
>&
gemm
_m_k_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
...
@@ -185,7 +163,7 @@ struct DeviceImageToColumnImpl
...
@@ -185,7 +163,7 @@ struct DeviceImageToColumnImpl
X_
(
filter_spatial_lengths
[
NDimSpatial
-
I1
]),
X_
(
filter_spatial_lengths
[
NDimSpatial
-
I1
]),
p_in_
{
static_cast
<
const
InputDataType
*>
(
p_in
)},
p_in_
{
static_cast
<
const
InputDataType
*>
(
p_in
)},
p_out_
{
static_cast
<
OutputDataType
*>
(
p_out
)},
p_out_
{
static_cast
<
OutputDataType
*>
(
p_out
)},
i
nput
_g_n_c_wis_strides_
{
i
nput
_g_n_c_wis_strides
},
i
mage
_g_n_c_wis_strides_
{
i
mage
_g_n_c_wis_strides
},
conv_filter_strides_
{
conv_filter_strides
},
conv_filter_strides_
{
conv_filter_strides
},
conv_filter_dilations_
{
conv_filter_dilations
},
conv_filter_dilations_
{
conv_filter_dilations
},
input_left_pads_
{
input_left_pads
},
input_left_pads_
{
input_left_pads
},
...
@@ -197,7 +175,7 @@ struct DeviceImageToColumnImpl
...
@@ -197,7 +175,7 @@ struct DeviceImageToColumnImpl
input_spatial_lengths
,
input_spatial_lengths
,
filter_spatial_lengths
,
filter_spatial_lengths
,
output_spatial_lengths
,
output_spatial_lengths
,
i
nput
_g_n_c_wis_strides
,
i
mage
_g_n_c_wis_strides
,
conv_filter_strides
,
conv_filter_strides
,
conv_filter_dilations
,
conv_filter_dilations
,
...
@@ -205,7 +183,7 @@ struct DeviceImageToColumnImpl
...
@@ -205,7 +183,7 @@ struct DeviceImageToColumnImpl
input_right_pads
);
input_right_pads
);
out_grid_desc_m_k_
=
MakeOutDescriptor_M_K
(
out_grid_desc_m_k_
=
MakeOutDescriptor_M_K
(
N
,
C
,
filter_spatial_lengths
,
output_spatial_lengths
,
output
_m_k_strides
);
N
,
C
,
filter_spatial_lengths
,
output_spatial_lengths
,
gemm
_m_k_strides
);
}
}
void
Print
()
const
void
Print
()
const
...
@@ -220,7 +198,7 @@ struct DeviceImageToColumnImpl
...
@@ -220,7 +198,7 @@ struct DeviceImageToColumnImpl
const
InputDataType
*
p_in_
;
const
InputDataType
*
p_in_
;
OutputDataType
*
p_out_
;
OutputDataType
*
p_out_
;
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
nput
_g_n_c_wis_strides_
;
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
mage
_g_n_c_wis_strides_
;
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides_
;
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides_
;
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations_
;
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations_
;
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads_
;
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads_
;
...
@@ -243,12 +221,12 @@ struct DeviceImageToColumnImpl
...
@@ -243,12 +221,12 @@ struct DeviceImageToColumnImpl
BlockToCTileMap_M00_N0_M01Adapt
<
MPerBlock
,
KPerBlock
,
OutputGridDesc
>
(
BlockToCTileMap_M00_N0_M01Adapt
<
MPerBlock
,
KPerBlock
,
OutputGridDesc
>
(
arg
.
out_grid_desc_m_k_
);
arg
.
out_grid_desc_m_k_
);
const
index_t
grid_size
=
block_2_tile_map
.
CalculateGridSize
(
arg
.
out_grid_desc_m_k_
);
const
index_t
grid_size
=
block_2_tile_map
.
CalculateGridSize
(
arg
.
out_grid_desc_m_k_
);
const
auto
kernel
=
kernel_
image_to_column
<
InputGridDesc
,
const
auto
kernel
=
kernel_
tensor_rearrange
<
InputGridDesc
,
InputDataType
,
InputDataType
,
OutputGridDesc
,
OutputGridDesc
,
OutputDataType
,
OutputDataType
,
Block2ETileMap
,
Block2ETileMap
,
Gridwise
ImageToColumn
Kernel
>
;
Gridwise
TensorRearrange
Kernel
>
;
float
elapsed_time
=
launch_and_time_kernel
(
stream_config
,
float
elapsed_time
=
launch_and_time_kernel
(
stream_config
,
kernel
,
kernel
,
...
@@ -273,12 +251,8 @@ struct DeviceImageToColumnImpl
...
@@ -273,12 +251,8 @@ struct DeviceImageToColumnImpl
bool
IsSupportedArgument
(
const
Argument
&
arg
)
bool
IsSupportedArgument
(
const
Argument
&
arg
)
{
{
using
namespace
tensor_layout
::
convolution
;
using
namespace
tensor_layout
::
convolution
;
if
(
!
(
std
::
is_same_v
<
InputLayout
,
GNWC
>
||
std
::
is_same_v
<
InputLayout
,
GNHWC
>
||
if
constexpr
(
!
(
std
::
is_same_v
<
ImageLayout
,
GNWC
>
||
std
::
is_same_v
<
ImageLayout
,
GNHWC
>
||
std
::
is_same_v
<
InputLayout
,
GNDHWC
>
))
std
::
is_same_v
<
ImageLayout
,
GNDHWC
>
))
{
return
false
;
}
if
(
!
(
NDimSpatial
>=
1
&&
NDimSpatial
<=
3
))
{
{
return
false
;
return
false
;
}
}
...
@@ -287,8 +261,8 @@ struct DeviceImageToColumnImpl
...
@@ -287,8 +261,8 @@ struct DeviceImageToColumnImpl
const
auto
w_pad_right
=
arg
.
input_right_pads_
[
NDimSpatial
-
I1
];
const
auto
w_pad_right
=
arg
.
input_right_pads_
[
NDimSpatial
-
I1
];
const
auto
dilation_x
=
arg
.
conv_filter_dilations_
[
NDimSpatial
-
I1
];
const
auto
dilation_x
=
arg
.
conv_filter_dilations_
[
NDimSpatial
-
I1
];
const
auto
stride_x
=
arg
.
conv_filter_strides_
[
NDimSpatial
-
I1
];
const
auto
stride_x
=
arg
.
conv_filter_strides_
[
NDimSpatial
-
I1
];
bool
is_w_packed
=
arg
.
i
nput
_g_n_c_wis_strides_
[
NDimSpatial
+
I2
]
==
arg
.
C_
;
bool
is_w_packed
=
arg
.
i
mage
_g_n_c_wis_strides_
[
NDimSpatial
+
I2
]
==
arg
.
C_
;
bool
is_c_packed
=
arg
.
i
nput
_g_n_c_wis_strides_
[
I2
]
==
1
;
bool
is_c_packed
=
arg
.
i
mage
_g_n_c_wis_strides_
[
I2
]
==
1
;
// check vector acces with c not packed
// check vector acces with c not packed
if
(
!
is_c_packed
&&
ScalarPerVector
!=
1
)
if
(
!
is_c_packed
&&
ScalarPerVector
!=
1
)
...
@@ -310,8 +284,8 @@ struct DeviceImageToColumnImpl
...
@@ -310,8 +284,8 @@ struct DeviceImageToColumnImpl
if
(
dilation_x
>
1
&&
arg
.
C_
%
ScalarPerVector
!=
0
)
if
(
dilation_x
>
1
&&
arg
.
C_
%
ScalarPerVector
!=
0
)
return
false
;
return
false
;
return
Gridwise
ImageToColumn
Kernel
::
CheckValidity
(
arg
.
in_grid_desc_m_k_
,
return
Gridwise
TensorRearrange
Kernel
::
CheckValidity
(
arg
.
in_grid_desc_m_k_
,
arg
.
out_grid_desc_m_k_
);
arg
.
out_grid_desc_m_k_
);
}
}
bool
IsSupportedArgument
(
const
BaseArgument
*
p_arg
)
override
bool
IsSupportedArgument
(
const
BaseArgument
*
p_arg
)
override
...
@@ -320,14 +294,14 @@ struct DeviceImageToColumnImpl
...
@@ -320,14 +294,14 @@ struct DeviceImageToColumnImpl
}
}
static
auto
MakeArgument
(
const
void
*
p_in
,
// input image
static
auto
MakeArgument
(
const
void
*
p_in
,
// input image
void
*
p_out
,
//
output image
void
*
p_out
,
//
gemm form
const
ck
::
index_t
N
,
const
ck
::
index_t
N
,
const
ck
::
index_t
C
,
const
ck
::
index_t
C
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
nput
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
mage
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
2
>&
output
_m_k_strides
,
const
std
::
array
<
index_t
,
2
>&
gemm
_m_k_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
...
@@ -340,8 +314,8 @@ struct DeviceImageToColumnImpl
...
@@ -340,8 +314,8 @@ struct DeviceImageToColumnImpl
input_spatial_lengths
,
input_spatial_lengths
,
filter_spatial_lengths
,
filter_spatial_lengths
,
output_spatial_lengths
,
output_spatial_lengths
,
i
nput
_g_n_c_wis_strides
,
i
mage
_g_n_c_wis_strides
,
output
_m_k_strides
,
gemm
_m_k_strides
,
conv_filter_strides
,
conv_filter_strides
,
conv_filter_dilations
,
conv_filter_dilations
,
input_left_pads
,
input_left_pads
,
...
@@ -352,14 +326,14 @@ struct DeviceImageToColumnImpl
...
@@ -352,14 +326,14 @@ struct DeviceImageToColumnImpl
std
::
unique_ptr
<
BaseArgument
>
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
void
*
p_in
,
// input image
MakeArgumentPointer
(
const
void
*
p_in
,
// input image
void
*
p_out
,
//
output image
void
*
p_out
,
//
gemm form
const
ck
::
index_t
N
,
const
ck
::
index_t
N
,
const
ck
::
index_t
C
,
const
ck
::
index_t
C
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
filter_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
output_spatial_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
nput
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
i
mage
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
2
>&
output
_m_k_strides
,
const
std
::
array
<
index_t
,
2
>&
gemm
_m_k_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
...
@@ -372,8 +346,8 @@ struct DeviceImageToColumnImpl
...
@@ -372,8 +346,8 @@ struct DeviceImageToColumnImpl
input_spatial_lengths
,
input_spatial_lengths
,
filter_spatial_lengths
,
filter_spatial_lengths
,
output_spatial_lengths
,
output_spatial_lengths
,
i
nput
_g_n_c_wis_strides
,
i
mage
_g_n_c_wis_strides
,
output
_m_k_strides
,
gemm
_m_k_strides
,
conv_filter_strides
,
conv_filter_strides
,
conv_filter_dilations
,
conv_filter_dilations
,
input_left_pads
,
input_left_pads
,
...
...
include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_abd_xdl_cshuffle.hpp
0 → 100644
View file @
b24d93a1
This diff is collapsed.
Click to expand it.
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment