Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
6b9a4bd5
Commit
6b9a4bd5
authored
Apr 23, 2024
by
Jun Liu
Browse files
Merge branch 'amd-develop-staging-0423' into amd-master
parents
56de337f
c5f1cdf7
Changes
364
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1073 additions
and
349 deletions
+1073
-349
library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt
...tensor_operation_instance/gpu/quantization/CMakeLists.txt
+1
-0
profiler/README.md
profiler/README.md
+13
-11
profiler/include/profiler/profile_contraction_impl.hpp
profiler/include/profiler/profile_contraction_impl.hpp
+39
-41
profiler/include/profiler/profile_contraction_utils.hpp
profiler/include/profiler/profile_contraction_utils.hpp
+25
-3
profiler/include/profiler/profile_grouped_conv_fwd_impl.hpp
profiler/include/profiler/profile_grouped_conv_fwd_impl.hpp
+7
-3
profiler/include/profiler/profile_grouped_gemm_two_stage_impl.hpp
.../include/profiler/profile_grouped_gemm_two_stage_impl.hpp
+366
-0
profiler/include/profiler/profile_permute_scale_impl.hpp
profiler/include/profiler/profile_permute_scale_impl.hpp
+20
-40
profiler/src/CMakeLists.txt
profiler/src/CMakeLists.txt
+92
-76
profiler/src/profile_contraction_bilinear.cpp
profiler/src/profile_contraction_bilinear.cpp
+98
-56
profiler/src/profile_contraction_scale.cpp
profiler/src/profile_contraction_scale.cpp
+93
-54
profiler/src/profile_grouped_conv_fwd.cpp
profiler/src/profile_grouped_conv_fwd.cpp
+58
-28
profiler/src/profile_grouped_gemm_two_stage.cpp
profiler/src/profile_grouped_gemm_two_stage.cpp
+157
-0
profiler/src/profile_permute_scale.cpp
profiler/src/profile_permute_scale.cpp
+24
-5
script/profile_permute_scale.sh
script/profile_permute_scale.sh
+43
-0
test/CMakeLists.txt
test/CMakeLists.txt
+25
-1
test/batched_gemm/CMakeLists.txt
test/batched_gemm/CMakeLists.txt
+3
-8
test/batched_gemm/test_batched_gemm_xdl.cpp
test/batched_gemm/test_batched_gemm_xdl.cpp
+0
-0
test/batched_gemm_gemm/CMakeLists.txt
test/batched_gemm_gemm/CMakeLists.txt
+6
-13
test/batched_gemm_gemm/test_batched_gemm_gemm_fp16_xdl.cpp
test/batched_gemm_gemm/test_batched_gemm_gemm_fp16_xdl.cpp
+0
-0
test/batched_gemm_reduce/CMakeLists.txt
test/batched_gemm_reduce/CMakeLists.txt
+3
-10
No files found.
library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt
View file @
6b9a4bd5
# ONLY XDL_AND_DL_KERNELS
set
(
CONV2D_PERLAYER_QUANT_SRC conv2d_fwd/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp
)
set
(
CONV2D_PERCHANNEL_QUANT_SRC conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp
)
set
(
CONV2D_BIAS_PERLAYER_QUANT_SRC conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp
)
...
...
profiler/README.md
View file @
6b9a4bd5
...
...
@@ -52,21 +52,23 @@ Best Perf: 1.42509 ms, 102.988 TFlops, 234.086 GB/s
#arg1: tensor operation (contraction_bilinear=CONTRACTION+Bilinear)
#arg2: data type (0: fp32; 1: f64; 2: f16; 3: bf16)
#arg3: compute data type (0: fp32; 1: f64; 2: f16; 3: bf16)
#arg4: matrix layout (0: A[m0, m1, k0, k1] * B[k0, k1, n0, n1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
#arg4: Number of dimension for M, N and K (one for all)
#arg5: matrix layout (0: A[m0, m1, k0, k1] * B[k0, k1, n0, n1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
# 1: A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
# 2: A[k0, k1, m0, m1] * B[k0, k1, n0, n1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
# 3: A[k0, k1, m0, m1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1])
#arg5: verification (0: no; 1: yes)
#arg6: initialization (0: no init; 1: integer value; 2: decimal value)
#arg7: print tensor value (0: no; 1: yes)
#arg8: time kernel (0: no, 1: yes)
#arg9: alpha
#arg10: beta
#arg11 to 16: M0, M1, N0, N1, K0, K1
#arg17 to 32: Strides for A, B, D and E (skip for default)
#arg6: verification (0: no; 1: yes)
#arg7: initialization (0: no init; 1: integer value; 2: decimal
# value)
#arg8: print tensor value (0: no; 1: yes)
#arg9: time kernel (0: no, 1: yes)
#arg10: alpha
#arg11: beta
#arg12 to 17/29: M0, M1, N0, N1, K0, K1
#arg18/30 to 33/77: Strides for A, B, D and E (skip for default)
################ op datatype compute_datatype layout verify init log time alpha beta M0 M1 N0 N1 K0 K1
./bin/ckProfiler contraction_bilinear 0 0 1 0 0 0 1 1.0 1.0 128 128 128 128 128 128
################ op datatype compute_datatype
num_dim
layout verify init log time alpha beta M0 M1 N0 N1 K0 K1
./bin/ckProfiler contraction_bilinear 0 0
2
1 0 0 0 1 1.0 1.0 128 128 128 128 128 128
```
Result (MI100)
...
...
profiler/include/profiler/profile_contraction_impl.hpp
View file @
6b9a4bd5
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2023
-2024
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
...
...
@@ -22,6 +22,7 @@
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_contraction.hpp"
#include "ck/library/utility/numeric.hpp"
#include "ck/host_utility/io.hpp"
...
...
@@ -34,7 +35,8 @@ using Scale = ck::tensor_operation::element_wise::Scale;
using
F32
=
float
;
using
F64
=
double
;
template
<
typename
ALayout
,
template
<
index_t
NumDimMNK
,
typename
ALayout
,
typename
BLayout
,
typename
CDELayout
,
typename
DataType
,
...
...
@@ -104,18 +106,24 @@ int profile_contraction_impl(ck::index_t do_verification,
e_device_buf
.
SetZero
();
d_device_buf
.
ToDevice
(
d_m_n
.
mData
.
data
());
const
std
::
vector
<
index_t
>
a_ms_ks_lengths
=
{
M
[
0
],
M
[
1
],
K
[
0
],
K
[
1
]};
const
std
::
vector
<
index_t
>
b_ns_ks_lengths
=
{
N
[
0
],
N
[
1
],
K
[
0
],
K
[
1
]};
const
std
::
vector
<
index_t
>
e_ms_ns_lengths
=
{
M
[
0
],
M
[
1
],
N
[
0
],
N
[
1
]};
const
std
::
vector
<
index_t
>
d_m_n_lengths
=
{
M
[
0
],
M
[
1
],
N
[
0
],
N
[
1
]};
auto
merge_dims
=
[](
const
std
::
vector
<
ck
::
index_t
>&
dims01
,
const
std
::
vector
<
ck
::
index_t
>&
dims23
)
{
std
::
vector
<
ck
::
index_t
>
dims_szt
(
dims01
.
begin
(),
dims01
.
end
());
dims_szt
.
insert
(
dims_szt
.
end
(),
dims23
.
begin
(),
dims23
.
end
());
return
dims_szt
;
};
const
std
::
vector
<
index_t
>
a_ms_ks_lengths
=
merge_dims
(
M
,
K
);
const
std
::
vector
<
index_t
>
b_ns_ks_lengths
=
merge_dims
(
N
,
K
);
const
std
::
vector
<
index_t
>
e_ms_ns_lengths
=
merge_dims
(
M
,
N
);
const
std
::
vector
<
index_t
>
d_m_n_lengths
=
merge_dims
(
M
,
N
);
const
auto
a_element_op
=
AElementOp
{};
const
auto
b_element_op
=
BElementOp
{};
constexpr
ck
::
index_t
NumDim
=
2
;
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceContractionMultipleD
<
NumDim
,
NumDim
,
NumDim
,
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceContractionMultipleD
<
NumDimMNK
,
NumDimMNK
,
NumDimMNK
,
DataType
,
DataType
,
DTupleDataType
,
...
...
@@ -138,9 +146,9 @@ int profile_contraction_impl(ck::index_t do_verification,
if
(
do_verification
)
{
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceContraction_M2_N2_K2
<
NumDim
,
NumDim
,
NumDim
,
ck
::
tensor_operation
::
host
::
ReferenceContraction_M2_N2_K2
<
NumDim
MNK
,
NumDim
MNK
,
NumDim
MNK
,
DataType
,
DataType
,
DataType
,
...
...
@@ -159,33 +167,20 @@ int profile_contraction_impl(ck::index_t do_verification,
ref_invoker
.
Run
(
ref_argument
);
for
(
size_t
m0
=
0
;
m0
<
e_m_n_host_result
.
mDesc
.
GetLengths
()[
0
];
++
m0
)
{
for
(
size_t
m1
=
0
;
m1
<
e_m_n_host_result
.
mDesc
.
GetLengths
()[
1
];
++
m1
)
e_m_n_host_result
.
ForEach
([
&
](
auto
&
self
,
auto
idx
)
{
if
constexpr
(
is_same
<
CDElementOp
,
Bilinear
>::
value
)
{
for
(
size_t
n0
=
0
;
n0
<
e_m_n_host_result
.
mDesc
.
GetLengths
()[
2
];
++
n0
)
{
for
(
size_t
n1
=
0
;
n1
<
e_m_n_host_result
.
mDesc
.
GetLengths
()[
3
];
++
n1
)
{
if
constexpr
(
is_same
<
CDElementOp
,
Bilinear
>::
value
)
{
cde_element_op
(
e_m_n_host_result
(
m0
,
m1
,
n0
,
n1
),
c_m_n_host_result
(
m0
,
m1
,
n0
,
n1
),
d_m_n
(
m0
,
m1
,
n0
,
n1
));
}
else
if
constexpr
(
is_same
<
CDElementOp
,
Scale
>::
value
)
{
cde_element_op
(
e_m_n_host_result
(
m0
,
m1
,
n0
,
n1
),
c_m_n_host_result
(
m0
,
m1
,
n0
,
n1
));
}
else
{
static_assert
(
"Unsupported CDElementOp in contraction profiler."
);
}
}
}
cde_element_op
(
self
(
idx
),
c_m_n_host_result
(
idx
),
d_m_n
(
idx
));
}
}
else
if
constexpr
(
is_same
<
CDElementOp
,
Scale
>::
value
)
{
cde_element_op
(
self
(
idx
),
c_m_n_host_result
(
idx
));
}
else
{
static_assert
(
"Unsupported CDElementOp in contraction profiler."
);
}
});
}
std
::
string
best_op_name
;
...
...
@@ -242,9 +237,12 @@ int profile_contraction_impl(ck::index_t do_verification,
auto
invoker_ptr
=
op_ptr
->
MakeInvokerPointer
();
auto
nelems_m
=
M
[
0
]
*
M
[
1
];
auto
nelems_n
=
N
[
0
]
*
N
[
1
];
auto
nelems_k
=
K
[
0
]
*
K
[
1
];
auto
nelems_m
=
ck
::
accumulate_n
<
ck
::
index_t
>
(
a_ms_ks_lengths
.
begin
(),
NumDimMNK
,
1
,
std
::
multiplies
<>
{});
auto
nelems_n
=
ck
::
accumulate_n
<
ck
::
index_t
>
(
b_ns_ks_lengths
.
begin
(),
NumDimMNK
,
1
,
std
::
multiplies
<>
{});
auto
nelems_k
=
ck
::
accumulate_n
<
ck
::
index_t
>
(
a_ms_ks_lengths
.
begin
()
+
NumDimMNK
,
NumDimMNK
,
1
,
std
::
multiplies
<>
{});
if
(
op_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
...
...
profiler/include/profiler/profile_contraction_utils.hpp
View file @
6b9a4bd5
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2023
-2024
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
...
...
@@ -48,14 +48,36 @@ inline void collect_index_params(char* argv[],
// Defualt strides for row-major: {Dim1 * Dim2 * Dim3, Dim2 * Dim3, Dim3, 1}
// Defualt strides for column-major: {Dim1, 1, Dim0 * Dim1 * Dim3, Dim0 * Dim1}
// M1, 1, M0 * M1 * K1, M0 * M1
// K0, K1, M0, M1
inline
void
assign_default_strides
(
Row
,
std
::
vector
<
ck
::
index_t
>&
strides
,
std
::
vector
<
ck
::
index_t
>
dims
)
{
strides
=
{
dims
[
1
]
*
dims
[
2
]
*
dims
[
3
],
dims
[
2
]
*
dims
[
3
],
dims
[
3
],
1
};
ck
::
index_t
stride
=
1
;
for
(
ck
::
index_t
s
=
strides
.
size
()
-
1
;
s
>=
0
;
s
--
)
{
strides
[
s
]
=
stride
;
stride
*=
dims
[
s
];
}
}
inline
void
assign_default_strides
(
Col
,
std
::
vector
<
ck
::
index_t
>&
strides
,
std
::
vector
<
ck
::
index_t
>
dims
)
{
strides
=
{
dims
[
1
],
1
,
dims
[
0
]
*
dims
[
1
]
*
dims
[
3
],
dims
[
0
]
*
dims
[
1
]};
// Assign second half of strides
ck
::
index_t
stride
=
1
;
for
(
ck
::
index_t
s
=
strides
.
size
()
/
2
-
1
;
s
>=
0
;
s
--
)
{
strides
[
s
]
=
stride
;
stride
*=
dims
[
s
];
}
// Assign first half of strides
for
(
ck
::
index_t
s
=
strides
.
size
()
-
1
;
s
>
static_cast
<
ck
::
index_t
>
(
strides
.
size
())
/
2
-
1
;
s
--
)
{
strides
[
s
]
=
stride
;
stride
*=
dims
[
s
];
}
}
profiler/include/profiler/profile_grouped_conv_fwd_impl.hpp
View file @
6b9a4bd5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
...
...
@@ -31,7 +31,9 @@ template <ck::index_t NDimSpatial,
typename
OutLayout
,
typename
InDataType
,
typename
WeiDataType
,
typename
OutDataType
>
typename
OutDataType
,
typename
AComputeType
=
InDataType
,
typename
BComputeType
=
AComputeType
>
bool
profile_grouped_conv_fwd_impl
(
int
do_verification
,
int
init_method
,
bool
do_log
,
...
...
@@ -209,7 +211,9 @@ bool profile_grouped_conv_fwd_impl(int do_verification,
OutDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
>
;
OutElementOp
,
AComputeType
,
BComputeType
>
;
// get device op instances
const
auto
op_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
...
...
profiler/include/profiler/profile_grouped_gemm_two_stage_impl.hpp
0 → 100644
View file @
6b9a4bd5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp"
#include "ck/tensor_operation/gpu/device/device_grouped_gemm_splitk.hpp"
#include "ck/tensor_operation/gpu/device/device_grouped_gemm_multiple_d_splitk.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/tensor_operation_instance/gpu/grouped_gemm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/convolution_parameter.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/utility/fill.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
ADataType
,
typename
BDataType
,
typename
CDataType
,
typename
AccDataType
,
typename
ALayout
,
typename
BLayout
,
typename
CLayout
>
bool
profile_grouped_gemm_two_stage_impl
(
int
do_verification
,
int
init_method
,
bool
do_log
,
bool
time_kernel
,
const
std
::
vector
<
int
>&
Ms
,
const
std
::
vector
<
int
>&
Ns
,
const
std
::
vector
<
int
>&
Ks
,
const
std
::
vector
<
int
>&
StrideAs
,
const
std
::
vector
<
int
>&
StrideBs
,
const
std
::
vector
<
int
>&
StrideCs
,
int
kbatch
=
1
,
int
n_warmup
=
1
,
int
n_iter
=
10
)
{
bool
pass
=
true
;
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
using
namespace
ck
::
literals
;
if
(
is_same
<
decltype
(
layout
),
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
({
row
,
col
},
{
stride
,
1
_uz
});
}
else
{
return
HostTensorDescriptor
({
row
,
col
},
{
1
_uz
,
stride
});
}
};
std
::
size_t
group_count
=
Ms
.
size
();
if
(
!
(
group_count
==
Ns
.
size
()
&&
group_count
==
Ks
.
size
()
&&
group_count
==
StrideAs
.
size
()
&&
group_count
==
StrideBs
.
size
()
&&
group_count
==
StrideCs
.
size
()))
{
throw
std
::
runtime_error
(
"wrong! inconsistent M/N/Ks, StrideA/B/Cs size
\n
"
);
}
std
::
vector
<
Tensor
<
ADataType
>>
a_m_k
;
std
::
vector
<
Tensor
<
BDataType
>>
b_k_n
;
std
::
vector
<
Tensor
<
CDataType
>>
c_m_n_host_results
;
std
::
vector
<
Tensor
<
CDataType
>>
c_m_n_device_results
;
for
(
std
::
size_t
i
=
0
;
i
<
group_count
;
i
++
)
{
a_m_k
.
push_back
(
Tensor
<
ADataType
>
(
f_host_tensor_descriptor
(
Ms
[
i
],
Ks
[
i
],
StrideAs
[
i
],
ALayout
{})));
b_k_n
.
push_back
(
Tensor
<
BDataType
>
(
f_host_tensor_descriptor
(
Ks
[
i
],
Ns
[
i
],
StrideBs
[
i
],
BLayout
{})));
c_m_n_device_results
.
push_back
(
Tensor
<
CDataType
>
(
f_host_tensor_descriptor
(
Ms
[
i
],
Ns
[
i
],
StrideCs
[
i
],
CLayout
{})));
c_m_n_host_results
.
push_back
(
Tensor
<
CDataType
>
(
f_host_tensor_descriptor
(
Ms
[
i
],
Ns
[
i
],
StrideCs
[
i
],
CLayout
{})));
#if DEBUG_LOG
std
::
cout
<<
"group: "
<<
i
<<
" a_m_k["
<<
i
<<
"]:"
<<
a_m_k
[
i
].
mDesc
<<
", b_k_n["
<<
i
<<
"]:"
<<
b_k_n
[
i
].
mDesc
<<
", c_m_n_device_results["
<<
i
<<
"]:"
<<
c_m_n_device_results
[
i
].
mDesc
<<
std
::
endl
;
#endif // DEBUG_LOG
std
::
size_t
num_thread
=
1
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a_m_k
[
i
].
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
},
num_thread
);
b_k_n
[
i
].
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
},
num_thread
);
break
;
default:
a_m_k
[
i
].
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
},
num_thread
);
b_k_n
[
i
].
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
},
num_thread
);
}
}
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
BElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
const
auto
a_element_op
=
AElementOp
{};
const
auto
b_element_op
=
BElementOp
{};
const
auto
c_element_op
=
CElementOp
{};
using
DeviceMemPtr
=
std
::
unique_ptr
<
DeviceMem
>
;
std
::
vector
<
DeviceMemPtr
>
a_device_buf
,
b_device_buf
,
c_device_buf
;
a_device_buf
.
reserve
(
group_count
);
b_device_buf
.
reserve
(
group_count
);
c_device_buf
.
reserve
(
group_count
);
std
::
vector
<
const
void
*>
p_a
,
p_b
;
std
::
vector
<
void
*>
p_c
;
p_a
.
reserve
(
group_count
);
p_b
.
reserve
(
group_count
);
p_c
.
reserve
(
group_count
);
std
::
vector
<
ck
::
tensor_operation
::
device
::
GemmDesc
>
gemm_descs
;
gemm_descs
.
reserve
(
group_count
);
for
(
std
::
size_t
i
=
0
;
i
<
group_count
;
i
++
)
{
a_device_buf
.
emplace_back
(
std
::
make_unique
<
DeviceMem
>
(
sizeof
(
ADataType
)
*
a_m_k
[
i
].
mDesc
.
GetElementSpaceSize
()));
b_device_buf
.
emplace_back
(
std
::
make_unique
<
DeviceMem
>
(
sizeof
(
BDataType
)
*
b_k_n
[
i
].
mDesc
.
GetElementSpaceSize
()));
c_device_buf
.
emplace_back
(
std
::
make_unique
<
DeviceMem
>
(
sizeof
(
CDataType
)
*
c_m_n_device_results
[
i
].
mDesc
.
GetElementSpaceSize
()));
a_device_buf
[
i
]
->
ToDevice
(
a_m_k
[
i
].
mData
.
data
());
b_device_buf
[
i
]
->
ToDevice
(
b_k_n
[
i
].
mData
.
data
());
gemm_descs
.
push_back
({
Ms
[
i
],
Ns
[
i
],
Ks
[
i
],
StrideAs
[
i
],
StrideBs
[
i
],
StrideCs
[
i
],
{}});
p_a
.
push_back
(
a_device_buf
[
i
]
->
GetDeviceBuffer
());
p_b
.
push_back
(
b_device_buf
[
i
]
->
GetDeviceBuffer
());
p_c
.
push_back
(
c_device_buf
[
i
]
->
GetDeviceBuffer
());
}
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceGroupedGemm
<
ALayout
,
BLayout
,
ck
::
Tuple
<>
,
CLayout
,
ADataType
,
BDataType
,
ck
::
Tuple
<>
,
CDataType
,
AElementOp
,
BElementOp
,
CElementOp
>
;
const
auto
op_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
if
(
op_ptrs
.
size
()
<=
0
)
{
throw
std
::
runtime_error
(
"wrong! no device GEMM instance found"
);
}
std
::
string
best_gemm_name
;
float
best_ave_time
=
0
;
float
best_tflops
=
0
;
float
best_gb_per_sec
=
0
;
float
best_kbatch
=
0
;
auto
p_ds
=
std
::
vector
<
std
::
array
<
const
void
*
,
0
>>
{};
if
(
do_verification
)
{
for
(
std
::
size_t
i
=
0
;
i
<
gemm_descs
.
size
();
i
++
)
{
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
CDataType
,
AccDataType
,
AElementOp
,
BElementOp
,
CElementOp
>
;
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
[
i
],
b_k_n
[
i
],
c_m_n_host_results
[
i
],
a_element_op
,
b_element_op
,
c_element_op
);
ref_invoker
.
Run
(
ref_argument
);
}
}
// profile device GEMM instances
for
(
auto
&
gemm_ptr
:
op_ptrs
)
{
auto
argument_ptr
=
gemm_ptr
->
MakeArgumentPointer
(
p_a
,
p_b
,
p_ds
,
p_c
,
gemm_descs
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
{},
ck
::
tensor_operation
::
element_wise
::
PassThrough
{},
ck
::
tensor_operation
::
element_wise
::
PassThrough
{});
auto
invoker_ptr
=
gemm_ptr
->
MakeInvokerPointer
();
DeviceMem
gemm_desc_workspace
(
gemm_ptr
->
GetWorkSpaceSize
(
argument_ptr
.
get
()));
gemm_ptr
->
SetWorkSpacePointer
(
argument_ptr
.
get
(),
gemm_desc_workspace
.
GetDeviceBuffer
());
std
::
string
gemm_name
=
gemm_ptr
->
GetTypeString
();
using
DeviceOpSplitK
=
ck
::
tensor_operation
::
device
::
DeviceGroupedGemmMultipleDSplitK
<
ALayout
,
BLayout
,
ck
::
Tuple
<>
,
CLayout
,
ADataType
,
BDataType
,
ck
::
Tuple
<>
,
CDataType
,
AElementOp
,
BElementOp
,
CElementOp
>
;
// skip non-splitk grouped_gemm
if
(
dynamic_cast
<
DeviceOpSplitK
*>
(
gemm_ptr
.
get
())
==
nullptr
)
{
continue
;
}
std
::
vector
<
int
>
kbatch_list
=
{
1
,
2
,
4
,
8
,
12
,
16
,
20
,
24
,
32
,
48
,
64
};
if
(
kbatch
>
0
)
{
kbatch_list
=
{
kbatch
};
}
for
(
std
::
size_t
j
=
0
;
j
<
kbatch_list
.
size
();
j
++
)
{
auto
kbatch_curr
=
kbatch_list
[
j
];
dynamic_cast
<
DeviceOpSplitK
*>
(
gemm_ptr
.
get
())
->
SetKBatchSize
(
argument_ptr
.
get
(),
kbatch_curr
);
DeviceMem
gemm_arg_dev_mem
(
dynamic_cast
<
DeviceOpSplitK
*>
(
gemm_ptr
.
get
())
->
GetDeviceKernelArgSize
(
argument_ptr
.
get
()));
dynamic_cast
<
DeviceOpSplitK
*>
(
gemm_ptr
.
get
())
->
SetDeviceKernelArgs
(
argument_ptr
.
get
(),
gemm_arg_dev_mem
.
GetDeviceBuffer
());
if
(
gemm_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
gemm_desc_workspace
.
SetZero
();
for
(
std
::
size_t
i
=
0
;
i
<
gemm_descs
.
size
();
i
++
)
c_device_buf
[
i
]
->
SetZero
();
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
false
,
0
,
n_warmup
,
n_iter
});
if
(
do_verification
)
{
bool
instance_pass
=
true
;
for
(
std
::
size_t
i
=
0
;
i
<
gemm_descs
.
size
();
i
++
)
{
c_device_buf
[
i
]
->
FromDevice
(
c_m_n_device_results
[
i
].
mData
.
data
());
if
(
std
::
is_same_v
<
CDataType
,
ck
::
half_t
>
&&
kbatch_curr
>
1
)
{
instance_pass
=
instance_pass
&&
ck
::
utils
::
check_err
(
c_m_n_device_results
[
i
],
c_m_n_host_results
[
i
],
"Error: Incorrect results!"
,
0.06
);
}
else
{
instance_pass
=
instance_pass
&&
ck
::
utils
::
check_err
(
c_m_n_device_results
[
i
],
c_m_n_host_results
[
i
]);
}
if
(
do_log
)
{
LogRangeAsType
<
float
>
(
std
::
cout
<<
"a : "
,
a_m_k
[
i
].
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"b: "
,
b_k_n
[
i
].
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"c_device: "
,
c_m_n_device_results
[
i
].
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"c_host : "
,
c_m_n_host_results
[
i
].
mData
,
","
)
<<
std
::
endl
;
}
}
std
::
cout
<<
"Instance: "
<<
gemm_name
<<
" verification "
<<
(
instance_pass
?
"SUCCEED"
:
"FAILED"
)
<<
std
::
endl
;
pass
=
pass
&&
instance_pass
;
}
float
ave_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
,
0
,
n_warmup
,
n_iter
});
if
(
time_kernel
)
{
std
::
size_t
flop
=
0
,
num_btype
=
0
;
for
(
std
::
size_t
i
=
0
;
i
<
gemm_descs
.
size
();
i
++
)
{
flop
+=
std
::
size_t
(
2
)
*
Ms
[
i
]
*
Ns
[
i
]
*
Ks
[
i
];
num_btype
+=
sizeof
(
ADataType
)
*
Ms
[
i
]
*
Ks
[
i
]
+
sizeof
(
BDataType
)
*
Ks
[
i
]
*
Ns
[
i
]
+
sizeof
(
CDataType
)
*
Ms
[
i
]
*
Ns
[
i
];
}
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
gemm_name
<<
", KBatch "
<<
kbatch_curr
<<
std
::
endl
;
if
(
tflops
>
best_tflops
)
{
best_gemm_name
=
gemm_name
;
best_tflops
=
tflops
;
best_ave_time
=
ave_time
;
best_gb_per_sec
=
gb_per_sec
;
best_kbatch
=
kbatch_curr
;
}
}
}
else
{
std
::
cout
<<
"Instance: "
<<
gemm_name
<<
", does not support this GEMM problem"
<<
std
::
endl
;
}
}
}
if
(
time_kernel
)
{
std
::
cout
<<
"Best Perf: "
<<
best_ave_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_gemm_name
<<
", KBatch = "
<<
best_kbatch
<<
std
::
endl
;
}
return
pass
;
}
}
// namespace profiler
}
// namespace ck
profiler/include/profiler/profile_permute_scale_impl.hpp
View file @
6b9a4bd5
...
...
@@ -8,12 +8,14 @@
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise
_scale
.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_elementwise_
scale
_impl.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_elementwise_
dynamic_vector_dims
_impl.hpp"
#include "ck/library/tensor_operation_instance/gpu/permute_scale.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_elementwise.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
...
@@ -21,25 +23,6 @@
#include "ck/library/utility/literals.hpp"
namespace
ck
{
template
<
typename
HostTensorA
,
typename
HostTensorB
,
typename
AElementOp
,
typename
BElementOp
,
typename
ScaleElementOp
>
void
reference_permute_scale
(
HostTensorB
&
b_tensor
,
const
HostTensorA
&
a_tensor
,
AElementOp
a_tensor_op
,
BElementOp
b_tensor_op
,
ScaleElementOp
scale_op
)
{
b_tensor
.
ForEach
([
&
](
auto
&
self
,
auto
idx
)
{
auto
tmp_val
=
a_tensor
(
idx
);
b_tensor_op
(
tmp_val
,
tmp_val
);
scale_op
(
tmp_val
,
tmp_val
);
a_tensor_op
(
self
(
idx
),
tmp_val
);
});
}
namespace
profiler
{
template
<
typename
ADataType
,
typename
BDataType
,
index_t
NumDim
>
...
...
@@ -54,12 +37,11 @@ bool profile_permute_scale_impl(int do_verification,
bool
pass
=
true
;
bool
instance_found
=
false
;
using
ElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
UnaryOp
=
ck
::
tensor_operation
::
element_wise
::
UnarySquare
;
using
Scale
=
ck
::
tensor_operation
::
element_wise
::
Scale
;
using
ElementOp
=
ck
::
tensor_operation
::
element_wise
::
Scale
;
float
scale
=
2.
f
;
Tensor
<
ADataType
>
a
(
lengths_vector
,
input_strides_vector
);
std
::
array
<
Tensor
<
ADataType
>
,
1
>
as
=
{
Tensor
<
ADataType
>
(
lengths_vector
,
input_strides_vector
)};
Tensor
<
ADataType
>&
a
=
as
[
0
];
Tensor
<
BDataType
>
b
(
lengths_vector
,
output_strides_vector
);
Tensor
<
BDataType
>
host_b
(
lengths_vector
,
output_strides_vector
);
...
...
@@ -80,12 +62,8 @@ bool profile_permute_scale_impl(int do_verification,
std
::
array
<
const
void
*
,
1
>
input
=
{
a_device_buf
.
GetDeviceBuffer
()};
std
::
array
<
void
*
,
1
>
output
=
{
b_device_buf
.
GetDeviceBuffer
()};
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
ADataType
>
,
ck
::
Tuple
<
BDataType
>
,
ElementOp
,
UnaryOp
,
Scale
,
NumDim
>
;
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
ADataType
>
,
ck
::
Tuple
<
BDataType
>
,
ElementOp
,
NumDim
>
;
// get device op instances
const
auto
op_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
...
...
@@ -100,7 +78,14 @@ bool profile_permute_scale_impl(int do_verification,
if
(
do_verification
)
{
reference_permute_scale
(
host_b
,
a
,
ElementOp
{},
UnaryOp
{},
Scale
{
scale
});
using
ReferenceElementwiseInstance
=
ck
::
tensor_operation
::
host
::
ReferenceElementwise
<
1
,
ADataType
,
BDataType
,
ElementOp
>
;
auto
ref_elementwise
=
ReferenceElementwiseInstance
{};
auto
ref_invoker
=
ref_elementwise
.
MakeInvoker
();
auto
ref_argument
=
ref_elementwise
.
MakeArgument
(
as
,
host_b
,
ElementOp
{
scale
});
ref_invoker
.
Run
(
ref_argument
);
}
auto
copy
=
[](
const
auto
&
x
,
auto
&
y
)
{
std
::
copy
(
x
.
begin
(),
x
.
end
(),
y
.
begin
());
};
...
...
@@ -113,14 +98,8 @@ bool profile_permute_scale_impl(int do_verification,
for
(
auto
&
op_ptr
:
op_ptrs
)
{
auto
argument_ptr
=
op_ptr
->
MakeArgumentPointer
(
lengths
,
{
input_strides
},
{
output_strides
},
input
,
output
,
ElementOp
{},
UnaryOp
{},
Scale
{
scale
});
auto
argument_ptr
=
op_ptr
->
MakeArgumentPointer
(
lengths
,
{
input_strides
},
{
output_strides
},
input
,
output
,
ElementOp
{
scale
});
auto
invoker_ptr
=
op_ptr
->
MakeInvokerPointer
();
...
...
@@ -141,6 +120,7 @@ bool profile_permute_scale_impl(int do_verification,
if
(
do_log
)
{
LogRangeAsType
<
float
>
(
std
::
cout
<<
"a : "
,
a
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"host_b: "
,
host_b
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"b: "
,
b
.
mData
,
","
)
<<
std
::
endl
;
}
}
...
...
profiler/src/CMakeLists.txt
View file @
6b9a4bd5
...
...
@@ -2,19 +2,6 @@
set
(
PROFILER_SOURCES
profiler.cpp
profile_gemm.cpp
profile_gemm_splitk.cpp
profile_gemm_bias_add_reduce.cpp
profile_gemm_add_multiply.cpp
profile_gemm_multiply_add.cpp
profile_gemm_reduce.cpp
profile_batched_gemm.cpp
profile_batched_gemm_reduce.cpp
profile_conv_fwd.cpp
profile_conv_fwd_bias_relu.cpp
profile_conv_fwd_bias_relu_add.cpp
profile_conv_bwd_data.cpp
profile_grouped_conv_fwd.cpp
profile_grouped_conv_bwd_weight.cpp
profile_reduce.cpp
profile_groupnorm_bwd_data.cpp
profile_groupnorm_fwd.cpp
...
...
@@ -29,36 +16,58 @@ set(PROFILER_SOURCES
profile_batchnorm_fwd.cpp
profile_batchnorm_bwd.cpp
profile_batchnorm_infer.cpp
profile_grouped_conv_bwd_data.cpp
profile_conv_tensor_rearrange.cpp
profile_transpose.cpp
profile_permute_scale.cpp
)
if
(
DL_KERNELS
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm_multi_d.cpp
)
if
(
GPU_TARGETS MATCHES
"gfx9"
)
if
(
DTYPES MATCHES
"fp32"
OR DTYPES MATCHES
"fp64"
OR NOT DEFINED DTYPES
)
list
(
APPEND PROFILER_SOURCES profile_contraction_bilinear.cpp
)
list
(
APPEND PROFILER_SOURCES profile_contraction_scale.cpp
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
list
(
APPEND PROFILER_SOURCES profile_gemm_reduce.cpp
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm_gemm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm_add_relu_gemm_add.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_add_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_streamk.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_relu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_silu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_relu_add_layernorm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm_fixed_nk.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm_two_stage.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm_fastgelu.cpp
)
endif
()
list
(
APPEND PROFILER_SOURCES profile_gemm_multiply_add.cpp
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm_reduce.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_multiply.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_bias_add_reduce.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_splitk.cpp
)
list
(
APPEND PROFILER_SOURCES profile_conv_fwd_bias_relu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_conv_fwd_bias_relu_add.cpp
)
list
(
APPEND PROFILER_SOURCES profile_conv_bwd_data.cpp
)
list
(
APPEND PROFILER_SOURCES profile_conv_fwd.cpp
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm_gemm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_streamk.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_bilinear.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_relu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_silu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_add_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_relu_add_layernorm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm_add_relu_gemm_add.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm_fixed_nk.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm_fastgelu.cpp
)
if
(
GPU_TARGETS MATCHES
"gfx11"
OR GPU_TARGETS MATCHES
"gfx9"
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
list
(
APPEND PROFILER_SOURCES profile_gemm_bilinear.cpp
)
endif
()
list
(
APPEND PROFILER_SOURCES profile_grouped_conv_fwd.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_conv_bwd_data.cpp
)
list
(
APPEND PROFILER_SOURCES profile_grouped_conv_bwd_weight.cpp
)
endif
()
if
(
D
TYPES MATCHES
"fp32"
OR DTYPES MATCHES
"fp64"
OR NOT DEFINED DTYPE
S
)
list
(
APPEND PROFILER_SOURCES profile_
contraction_bilinear
.cpp
)
list
(
APPEND PROFILER_SOURCES profile_
contraction_scale
.cpp
)
if
(
D
L_KERNEL
S
)
list
(
APPEND PROFILER_SOURCES profile_
batched_gemm_multi_d
.cpp
)
list
(
APPEND PROFILER_SOURCES profile_
grouped_conv_bwd_weight
.cpp
)
endif
()
set
(
PROFILER_EXECUTABLE ckProfiler
)
...
...
@@ -68,25 +77,6 @@ target_compile_options(${PROFILER_EXECUTABLE} PRIVATE -Wno-global-constructors)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE utility getopt::getopt
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_splitk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_multiply_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_multiply_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bias_add_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv1d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv2d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv1d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv3d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv1d_bwd_weight_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv2d_bwd_weight_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_bwd_weight_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_bwd_gamma_beta_instance
)
...
...
@@ -96,39 +86,65 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batchnorm_instance)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_pool3d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_avg_pool3d_bwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_max_pool_bwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv2d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_image_to_column_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_column_to_image_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_transpose_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_permute_scale_instance
)
if
(
DTYPES MATCHES
"fp32"
OR DTYPES MATCHES
"fp64"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_contraction_bilinear_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_contraction_scale_instance
)
if
(
GPU_TARGETS MATCHES
"gfx9"
)
if
(
DTYPES MATCHES
"fp32"
OR DTYPES MATCHES
"fp64"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_contraction_bilinear_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_contraction_scale_instance
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_add_relu_gemm_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_streamk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_silu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_add_layernorm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_gemm_fixed_nk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_gemm_fastgelu_instance
)
endif
()
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_multiply_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_splitk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_multiply_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bias_add_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv1d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv1d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv3d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv1d_bwd_weight_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv2d_bwd_weight_instance
)
endif
()
if
(
GPU_TARGETS MATCHES
"gfx9"
OR GPU_TARGETS MATCHES
"gfx11"
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bilinear_instance
)
endif
()
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv2d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv2d_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_bwd_weight_instance
)
endif
()
if
(
DL_KERNELS
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_multi_d_instance
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_silu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_add_layernorm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bilinear_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_streamk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_add_relu_gemm_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_gemm_fixed_nk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_gemm_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv1d_bwd_weight_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv2d_bwd_weight_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_bwd_weight_instance
)
endif
()
rocm_install
(
TARGETS
${
PROFILER_EXECUTABLE
}
COMPONENT profiler
)
profiler/src/profile_contraction_bilinear.cpp
View file @
6b9a4bd5
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2023
-2024
, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
...
...
@@ -19,7 +19,8 @@ static void print_helper_msg()
std
::
cout
<<
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
<<
"arg2: data type (0: fp32; 1: f64; 2: f16; 3: bf16)
\n
"
<<
"arg3: compute data type (0: fp32; 1: f64; 2: f16; 3: bf16)
\n
"
<<
"arg4: matrix layout (0: A[m0, m1, k0, k1] * B[k0, k1, n0, n1] + "
<<
"arg4: Number of dimension for M, N and K (one for all)
\n
"
<<
"arg5: matrix layout (0: A[m0, m1, k0, k1] * B[k0, k1, n0, n1] + "
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
\n
"
<<
" 1: A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + "
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
\n
"
...
...
@@ -27,23 +28,23 @@ static void print_helper_msg()
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
\n
"
<<
" 3: A[k0, k1, m0, m1] * B[n0, n1, k0, k1] + "
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1])
\n
"
<<
"arg
5
: verification (0: no; 1: yes)
\n
"
<<
"arg
6
: initialization (0: no init; 1: integer value; 2: decimal "
<<
"arg
6
: verification (0: no; 1: yes)
\n
"
<<
"arg
7
: initialization (0: no init; 1: integer value; 2: decimal "
<<
"value)
\n
"
<<
"arg
7
: print tensor value (0: no; 1: yes)
\n
"
<<
"arg
8
: time kernel (0: no, 1: yes)
\n
"
<<
"arg
9
: alpha
\n
"
<<
"arg1
0
: beta
\n
"
<<
"arg1
1
to 1
6
: M0, M1, N0, N1, K0, K1
\n
"
<<
"arg1
7
to 3
2
: Strides for A, B, D and E (skip for default)
\n
"
<<
"arg
8
: print tensor value (0: no; 1: yes)
\n
"
<<
"arg
9
: time kernel (0: no, 1: yes)
\n
"
<<
"arg
10
: alpha
\n
"
<<
"arg1
1
: beta
\n
"
<<
"arg1
2
to 1
7/29
: M0, M1, N0, N1, K0, K1
\n
"
<<
"arg1
8/30
to 3
3/77
: Strides for A, B, D and E (skip for default)
\n
"
<<
std
::
endl
;
}
int
profile_contraction_bilinear
(
int
argc
,
char
*
argv
[])
{
const
bool
default_strides
=
argc
==
1
7
;
const
bool
default_strides
=
argc
==
1
8
||
30
;
if
(
argc
!=
3
3
&&
argc
!=
1
7
)
if
(
argc
!=
3
4
&&
argc
!=
7
8
&&
!
default_strides
)
{
print_helper_msg
();
exit
(
1
);
...
...
@@ -51,32 +52,33 @@ int profile_contraction_bilinear(int argc, char* argv[])
const
auto
data_type
=
static_cast
<
ContractionDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
compute_data_type
=
static_cast
<
ContractionComputeDataType
>
(
std
::
stoi
(
argv
[
3
]));
const
auto
layout
=
static_cast
<
ContractionMatrixLayout
>
(
std
::
stoi
(
argv
[
4
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
5
]);
const
ck
::
index_t
init_method
=
std
::
stoi
(
argv
[
6
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
7
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
8
]);
const
float
alpha
=
std
::
stof
(
argv
[
9
]);
const
float
beta
=
std
::
stof
(
argv
[
10
]);
const
ck
::
index_t
NumDimMNK
=
std
::
stoi
(
argv
[
4
]);
const
auto
layout
=
static_cast
<
ContractionMatrixLayout
>
(
std
::
stoi
(
argv
[
5
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
6
]);
const
ck
::
index_t
init_method
=
std
::
stoi
(
argv
[
7
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
8
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
9
]);
const
float
alpha
=
std
::
stof
(
argv
[
10
]);
const
float
beta
=
std
::
stof
(
argv
[
11
]);
std
::
vector
<
ck
::
index_t
>
M
;
std
::
vector
<
ck
::
index_t
>
N
;
std
::
vector
<
ck
::
index_t
>
K
;
const
ck
::
index_t
dims_arg_num
=
1
1
;
collect_index_params
(
argv
,
M
,
dims_arg_num
,
2
);
collect_index_params
(
argv
,
N
,
dims_arg_num
+
2
,
2
);
collect_index_params
(
argv
,
K
,
dims_arg_num
+
4
,
2
);
std
::
vector
<
ck
::
index_t
>
StridesA
;
std
::
vector
<
ck
::
index_t
>
StridesB
;
std
::
vector
<
ck
::
index_t
>
StridesE
;
std
::
vector
<
ck
::
index_t
>
StridesD
;
const
ck
::
index_t
dims_arg_num
=
1
2
;
collect_index_params
(
argv
,
M
,
dims_arg_num
,
NumDimMNK
);
collect_index_params
(
argv
,
N
,
dims_arg_num
+
NumDimMNK
,
NumDimMNK
);
collect_index_params
(
argv
,
K
,
dims_arg_num
+
NumDimMNK
*
2
,
NumDimMNK
);
std
::
vector
<
ck
::
index_t
>
StridesA
(
NumDimMNK
*
2
)
;
std
::
vector
<
ck
::
index_t
>
StridesB
(
NumDimMNK
*
2
)
;
std
::
vector
<
ck
::
index_t
>
StridesE
(
NumDimMNK
*
2
)
;
std
::
vector
<
ck
::
index_t
>
StridesD
(
NumDimMNK
*
2
)
;
if
(
!
default_strides
)
{
collect_index_params
(
argv
,
StridesA
,
dims_arg_num
+
6
,
4
);
collect_index_params
(
argv
,
StridesB
,
dims_arg_num
+
10
,
4
);
collect_index_params
(
argv
,
StridesE
,
dims_arg_num
+
14
,
4
);
collect_index_params
(
argv
,
StridesD
,
dims_arg_num
+
18
,
4
);
collect_index_params
(
argv
,
StridesA
,
dims_arg_num
+
NumDimMNK
*
3
,
NumDimMNK
*
2
);
collect_index_params
(
argv
,
StridesB
,
dims_arg_num
+
NumDimMNK
*
5
,
NumDimMNK
*
2
);
collect_index_params
(
argv
,
StridesE
,
dims_arg_num
+
NumDimMNK
*
7
,
NumDimMNK
*
2
);
collect_index_params
(
argv
,
StridesD
,
dims_arg_num
+
NumDimMNK
*
9
,
NumDimMNK
*
2
);
}
using
F16
=
ck
::
half_t
;
...
...
@@ -95,31 +97,71 @@ int profile_contraction_bilinear(int argc, char* argv[])
if
(
default_strides
)
{
assign_default_strides
(
a_layout
,
StridesA
,
{
M
[
0
],
M
[
1
],
K
[
0
],
K
[
1
]});
assign_default_strides
(
b_layout
,
StridesB
,
{
N
[
0
],
N
[
1
],
K
[
0
],
K
[
1
]});
assign_default_strides
(
cde_layout
,
StridesE
,
{
M
[
0
],
M
[
1
],
N
[
0
],
N
[
1
]});
assign_default_strides
(
cde_layout
,
StridesD
,
{
M
[
0
],
M
[
1
],
N
[
0
],
N
[
1
]});
auto
merge_dims
=
[](
const
std
::
vector
<
ck
::
index_t
>&
dims01
,
const
std
::
vector
<
ck
::
index_t
>&
dims23
)
{
std
::
vector
<
ck
::
index_t
>
dims_szt
(
dims01
.
begin
(),
dims01
.
end
());
dims_szt
.
insert
(
dims_szt
.
end
(),
dims23
.
begin
(),
dims23
.
end
());
return
dims_szt
;
};
assign_default_strides
(
a_layout
,
StridesA
,
merge_dims
(
M
,
K
));
assign_default_strides
(
b_layout
,
StridesB
,
merge_dims
(
N
,
K
));
assign_default_strides
(
cde_layout
,
StridesE
,
merge_dims
(
M
,
N
));
assign_default_strides
(
cde_layout
,
StridesD
,
merge_dims
(
M
,
N
));
}
if
(
NumDimMNK
==
2
)
{
bool
pass
=
ck
::
profiler
::
profile_contraction_impl
<
2
,
ALayout
,
BLayout
,
CDELayout
,
DataType
,
ComputeDataType
,
ck
::
Tuple
<
DataType
>
,
Bilinear
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Bilinear
{
alpha
,
beta
},
M
,
N
,
K
,
StridesA
,
StridesB
,
StridesE
,
StridesD
);
return
pass
;
}
else
if
(
NumDimMNK
==
6
)
{
bool
pass
=
ck
::
profiler
::
profile_contraction_impl
<
6
,
ALayout
,
BLayout
,
CDELayout
,
DataType
,
ComputeDataType
,
ck
::
Tuple
<
DataType
>
,
Bilinear
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Bilinear
{
alpha
,
beta
},
M
,
N
,
K
,
StridesA
,
StridesB
,
StridesE
,
StridesD
);
return
pass
;
}
else
{
throw
std
::
runtime_error
(
"Not supported NumDimMNK"
);
return
false
;
}
bool
pass
=
ck
::
profiler
::
profile_contraction_impl
<
ALayout
,
BLayout
,
CDELayout
,
DataType
,
ComputeDataType
,
ck
::
Tuple
<
DataType
>
,
Bilinear
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Bilinear
{
alpha
,
beta
},
M
,
N
,
K
,
StridesA
,
StridesB
,
StridesE
,
StridesD
);
return
pass
;
};
auto
run_profile_for_datatype
=
[
&
](
auto
type
,
auto
compute_type
)
{
...
...
profiler/src/profile_contraction_scale.cpp
View file @
6b9a4bd5
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2023
-2024
, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
...
...
@@ -19,7 +19,8 @@ static void print_helper_msg()
std
::
cout
<<
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
<<
"arg2: data type (0: fp32; 1: f64; 2: f16; 3: bf16)
\n
"
<<
"arg3: compute data type (0: fp32; 1: f64; 2: f16; 3: bf16)
\n
"
<<
"arg4: matrix layout (0: A[m0, m1, k0, k1] * B[k0, k1, n0, n1] + "
<<
"arg4: Number of dimension for M, N and K (one for all)
\n
"
<<
"arg5: matrix layout (0: A[m0, m1, k0, k1] * B[k0, k1, n0, n1] + "
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
\n
"
<<
" 1: A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + "
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
\n
"
...
...
@@ -27,22 +28,22 @@ static void print_helper_msg()
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1];
\n
"
<<
" 3: A[k0, k1, m0, m1] * B[n0, n1, k0, k1] + "
"D[m0, m1, n0, n1] = E[m0, m1, n0, n1])
\n
"
<<
"arg
5
: verification (0: no; 1: yes)
\n
"
<<
"arg
6
: initialization (0: no init; 1: integer value; 2: decimal "
<<
"arg
6
: verification (0: no; 1: yes)
\n
"
<<
"arg
7
: initialization (0: no init; 1: integer value; 2: decimal "
<<
"value)
\n
"
<<
"arg
7
: print tensor value (0: no; 1: yes)
\n
"
<<
"arg
8
: time kernel (0: no, 1: yes)
\n
"
<<
"arg
9
: alpha
\n
"
<<
"arg1
0
to 1
5
: M0, M1, N0, N1, K0, K1
\n
"
<<
"arg1
6
to 3
1
: Strides for A, B,
D and
E (skip for default)
\n
"
<<
"arg
8
: print tensor value (0: no; 1: yes)
\n
"
<<
"arg
9
: time kernel (0: no, 1: yes)
\n
"
<<
"arg
10
: alpha
\n
"
<<
"arg1
1
to 1
6/28
: M0, M1, N0, N1, K0, K1
\n
"
<<
"arg1
7/29
to 3
2/63
: Strides for A, B, E (skip for default)
\n
"
<<
std
::
endl
;
}
int
profile_contraction_scale
(
int
argc
,
char
*
argv
[])
{
const
bool
default_strides
=
argc
==
1
6
;
const
bool
default_strides
=
argc
==
1
7
||
argc
==
29
;
if
(
argc
!=
3
2
&&
argc
!=
1
6
)
if
(
argc
!=
2
9
&&
argc
!=
6
5
&&
!
default_strides
)
{
print_helper_msg
();
exit
(
1
);
...
...
@@ -50,31 +51,30 @@ int profile_contraction_scale(int argc, char* argv[])
const
auto
data_type
=
static_cast
<
ContractionDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
compute_data_type
=
static_cast
<
ContractionComputeDataType
>
(
std
::
stoi
(
argv
[
3
]));
const
auto
layout
=
static_cast
<
ContractionMatrixLayout
>
(
std
::
stoi
(
argv
[
4
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
5
]);
const
ck
::
index_t
init_method
=
std
::
stoi
(
argv
[
6
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
7
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
8
]);
const
float
alpha
=
std
::
stof
(
argv
[
9
]);
const
ck
::
index_t
NumDimMNK
=
std
::
stoi
(
argv
[
4
]);
const
auto
layout
=
static_cast
<
ContractionMatrixLayout
>
(
std
::
stoi
(
argv
[
5
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
6
]);
const
ck
::
index_t
init_method
=
std
::
stoi
(
argv
[
7
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
8
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
9
]);
const
float
alpha
=
std
::
stof
(
argv
[
10
]);
std
::
vector
<
ck
::
index_t
>
M
;
std
::
vector
<
ck
::
index_t
>
N
;
std
::
vector
<
ck
::
index_t
>
K
;
const
ck
::
index_t
dims_arg_num
=
10
;
collect_index_params
(
argv
,
M
,
dims_arg_num
,
2
);
collect_index_params
(
argv
,
N
,
dims_arg_num
+
2
,
2
);
collect_index_params
(
argv
,
K
,
dims_arg_num
+
4
,
2
);
std
::
vector
<
ck
::
index_t
>
StridesA
;
std
::
vector
<
ck
::
index_t
>
StridesB
;
std
::
vector
<
ck
::
index_t
>
StridesE
;
std
::
vector
<
ck
::
index_t
>
StridesD
;
const
ck
::
index_t
dims_arg_num
=
11
;
collect_index_params
(
argv
,
M
,
dims_arg_num
,
NumDimMNK
);
collect_index_params
(
argv
,
N
,
dims_arg_num
+
NumDimMNK
,
NumDimMNK
);
collect_index_params
(
argv
,
K
,
dims_arg_num
+
NumDimMNK
*
2
,
NumDimMNK
);
std
::
vector
<
ck
::
index_t
>
StridesA
(
NumDimMNK
*
2
);
std
::
vector
<
ck
::
index_t
>
StridesB
(
NumDimMNK
*
2
);
std
::
vector
<
ck
::
index_t
>
StridesE
(
NumDimMNK
*
2
);
if
(
!
default_strides
)
{
collect_index_params
(
argv
,
StridesA
,
dims_arg_num
+
6
,
4
);
collect_index_params
(
argv
,
StridesB
,
dims_arg_num
+
10
,
4
);
collect_index_params
(
argv
,
StridesE
,
dims_arg_num
+
14
,
4
);
collect_index_params
(
argv
,
StridesD
,
dims_arg_num
+
18
,
4
);
collect_index_params
(
argv
,
StridesA
,
dims_arg_num
+
NumDimMNK
*
3
,
NumDimMNK
*
2
);
collect_index_params
(
argv
,
StridesB
,
dims_arg_num
+
NumDimMNK
*
5
,
NumDimMNK
*
2
);
collect_index_params
(
argv
,
StridesE
,
dims_arg_num
+
NumDimMNK
*
7
,
NumDimMNK
*
2
);
}
using
F16
=
ck
::
half_t
;
...
...
@@ -93,32 +93,71 @@ int profile_contraction_scale(int argc, char* argv[])
if
(
default_strides
)
{
assign_default_strides
(
a_layout
,
StridesA
,
{
M
[
0
],
M
[
1
],
K
[
0
],
K
[
1
]});
assign_default_strides
(
b_layout
,
StridesB
,
{
N
[
0
],
N
[
1
],
K
[
0
],
K
[
1
]});
assign_default_strides
(
cde_layout
,
StridesE
,
{
M
[
0
],
M
[
1
],
N
[
0
],
N
[
1
]});
assign_default_strides
(
cde_layout
,
StridesD
,
{
M
[
0
],
M
[
1
],
N
[
0
],
N
[
1
]});
auto
merge_dims
=
[](
const
std
::
vector
<
ck
::
index_t
>&
dims01
,
const
std
::
vector
<
ck
::
index_t
>&
dims23
)
{
std
::
vector
<
ck
::
index_t
>
dims_szt
(
dims01
.
begin
(),
dims01
.
end
());
dims_szt
.
insert
(
dims_szt
.
end
(),
dims23
.
begin
(),
dims23
.
end
());
return
dims_szt
;
};
assign_default_strides
(
a_layout
,
StridesA
,
merge_dims
(
M
,
K
));
assign_default_strides
(
b_layout
,
StridesB
,
merge_dims
(
N
,
K
));
assign_default_strides
(
cde_layout
,
StridesE
,
merge_dims
(
M
,
N
));
}
bool
pass
=
ck
::
profiler
::
profile_contraction_impl
<
ALayout
,
BLayout
,
CDELayout
,
DataType
,
ComputeDataType
,
ck
::
Tuple
<>
,
Scale
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Scale
{
alpha
},
M
,
N
,
K
,
StridesA
,
StridesB
,
StridesE
,
StridesD
);
return
pass
;
if
(
NumDimMNK
==
2
)
{
bool
pass
=
ck
::
profiler
::
profile_contraction_impl
<
2
,
ALayout
,
BLayout
,
CDELayout
,
DataType
,
ComputeDataType
,
ck
::
Tuple
<>
,
Scale
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Scale
{
alpha
},
M
,
N
,
K
,
StridesA
,
StridesB
,
StridesE
,
StridesE
);
return
pass
;
}
else
if
(
NumDimMNK
==
6
)
{
bool
pass
=
ck
::
profiler
::
profile_contraction_impl
<
6
,
ALayout
,
BLayout
,
CDELayout
,
DataType
,
ComputeDataType
,
ck
::
Tuple
<>
,
Scale
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Scale
{
alpha
},
M
,
N
,
K
,
StridesA
,
StridesB
,
StridesE
,
StridesE
);
return
pass
;
}
else
{
throw
std
::
runtime_error
(
"Not supported NumDimMNK"
);
return
false
;
}
};
auto
run_profile_for_datatype
=
[
&
](
auto
type
,
auto
compute_type
)
{
...
...
profiler/src/profile_grouped_conv_fwd.cpp
View file @
6b9a4bd5
...
...
@@ -24,6 +24,9 @@ enum struct ConvDataType
BF16_BF16_BF16
,
// 2
INT8_INT8_INT8
,
// 3
F8_F8_F8
,
// 4
BF8_BF8_F8
,
// 5
F8_BF8_F8
,
// 6
BF8_F8_F8
,
// 7
};
#define OP_NAME "grouped_conv_fwd"
...
...
@@ -38,7 +41,10 @@ static void print_helper_msg()
<<
" 1: Input fp16, Weight fp16, Output fp16
\n
"
<<
" 2: Input bf16, Weight bf16, Output bf16
\n
"
<<
" 3: Input int8, Weight int8, Output int8
\n
"
<<
" 4: Input fp8, Weight fp8, Output fp8)
\n
"
<<
" 4: Input fp8, Weight fp8, Output fp8
\n
"
<<
" 5: Input bf8, Weight bf8, Output fp8
\n
"
<<
" 6: Input fp8, Weight bf8, Output fp8
\n
"
<<
" 7: Input bf8, Weight fp8, Output fp8)
\n
"
<<
"arg3: tensor layout (0: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, N, Ho, Wo, K]
\n
"
<<
" 1: Input[N, Hi, Wi, G, C], Weight[G, K, Y, X, C], Output[N, Ho, Wo, G, K])
\n
"
<<
"arg4: verification (0: no, 1: yes)
\n
"
...
...
@@ -82,6 +88,7 @@ int profile_grouped_conv_fwd(int argc, char* argv[])
using
BF16
=
ck
::
bhalf_t
;
using
INT8
=
int8_t
;
using
F8
=
ck
::
f8_t
;
using
BF8
=
ck
::
bf8_t
;
//
using
GNWC
=
ck
::
tensor_layout
::
convolution
::
GNWC
;
...
...
@@ -115,7 +122,9 @@ int profile_grouped_conv_fwd(int argc, char* argv[])
auto
out_layout
,
auto
in_type
,
auto
wei_type
,
auto
out_type
)
{
auto
out_type
,
auto
a_compute_type
,
auto
b_compute_type
)
{
constexpr
ck
::
index_t
NDimSpatial
=
num_dim_spatial_tmp
.
value
;
using
InLayout
=
decltype
(
in_layout
);
...
...
@@ -126,13 +135,18 @@ int profile_grouped_conv_fwd(int argc, char* argv[])
using
WeiDataType
=
decltype
(
wei_type
);
using
OutDataType
=
decltype
(
out_type
);
using
AComputeType
=
decltype
(
a_compute_type
);
using
BComputeType
=
decltype
(
b_compute_type
);
bool
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
NDimSpatial
,
InLayout
,
WeiLayout
,
OutLayout
,
InDataType
,
WeiDataType
,
OutDataType
>
(
OutDataType
,
AComputeType
,
BComputeType
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
);
return
pass
?
0
:
1
;
...
...
@@ -143,57 +157,59 @@ int profile_grouped_conv_fwd(int argc, char* argv[])
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
F32
{},
F32
{},
F32
{});
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
F32
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
F16
{},
F16
{},
F16
{});
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
F16
{},
F16
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
BF16
{},
BF16
{},
BF16
{});
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
INT8
{},
INT8
{},
INT8
{});
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
2
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
F32
{},
F32
{},
F32
{});
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
F32
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
F16
{},
F16
{},
F16
{});
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
F16
{},
F16
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
BF16
{},
BF16
{},
BF16
{});
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
INT8
{},
INT8
{},
INT8
{});
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
3
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
F32
{},
F32
{},
F32
{});
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
F32
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
F16
{},
F16
{},
F16
{});
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
F16
{},
F16
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
BF16
{},
BF16
{},
BF16
{});
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
INT8
{},
INT8
{},
INT8
{});
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{});
}
}
// NHWGC_GKYXC_NHWGK
...
...
@@ -201,61 +217,75 @@ int profile_grouped_conv_fwd(int argc, char* argv[])
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
F32
{},
F32
{},
F32
{});
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
F32
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
F16
{},
F16
{},
F16
{});
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
F16
{},
F16
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
BF16
{},
BF16
{},
BF16
{});
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
INT8
{},
INT8
{},
INT8
{});
return
profile
(
I1
,
NWGC
{},
GKXC
{},
NWGK
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
2
&&
layout
==
ConvLayout
::
NHWGC_GKYXC_NHWGK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
F32
{},
F32
{},
F32
{});
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
F32
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
F16
{},
F16
{},
F16
{});
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
F16
{},
F16
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
BF16
{},
BF16
{},
BF16
{});
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
INT8
{},
INT8
{},
INT8
{});
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
3
&&
layout
==
ConvLayout
::
NHWGC_GKYXC_NHWGK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F32
{},
F32
{},
F32
{});
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F32
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F16
{},
F16
{},
F16
{});
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F16
{},
F16
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
BF16
{},
BF16
{},
BF16
{});
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
INT8
{},
INT8
{},
INT8
{});
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{},
INT8
{});
}
else
if
(
data_type
==
ConvDataType
::
F8_F8_F8
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F8
{},
F8
{},
F8
{});
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F8
{},
F8
{},
F8
{},
F8
{},
F8
{});
}
else
if
(
data_type
==
ConvDataType
::
BF8_BF8_F8
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
BF8
{},
BF8
{},
F8
{},
BF8
{},
BF8
{});
}
else
if
(
data_type
==
ConvDataType
::
F8_BF8_F8
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F8
{},
BF8
{},
F8
{},
F8
{},
BF8
{});
}
else
if
(
data_type
==
ConvDataType
::
BF8_F8_F8
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
BF8
{},
F8
{},
F8
{},
BF8
{},
F8
{});
}
}
...
...
profiler/src/profile_grouped_gemm_two_stage.cpp
0 → 100644
View file @
6b9a4bd5
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_grouped_gemm_two_stage_impl.hpp"
#include "profiler_operation_registry.hpp"
enum
struct
GemmMatrixLayout
{
MK_KN_MN
,
// 0
};
enum
struct
GemmDataType
{
F16_F16_F16
,
// 0
BF16_INT8_BF16
// 1
};
#define OP_NAME "grouped_gemm_two_stage"
#define OP_DESC "Grouped GEMM TwoStage"
namespace
{
std
::
vector
<
int
>
argToIntArray
(
char
*
input
)
{
std
::
vector
<
int
>
out
;
std
::
istringstream
in
(
input
);
std
::
string
item
;
while
(
std
::
getline
(
in
,
item
,
','
))
{
out
.
push_back
(
std
::
stoi
(
item
));
}
return
out
;
}
int
profile_grouped_gemm_two_stage
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<
14
)
{
std
::
cout
<<
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
<<
"arg2: data type (0: fp16; 1: bf16@int8)
\n
"
<<
"arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n]);
\n
"
<<
"arg4: verification (0: no; 1: yes)
\n
"
<<
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg6: print tensor value (0: no; 1: yes)
\n
"
<<
"arg7: time kernel (0=n0, 1=yes)
\n
"
<<
"arg8 to 13: Ms, Ns, Ks, StrideAs, StrideBs, StrideCs (e.g., 256,256 128,128 64,64 "
"64,64 64,64 128,128)
\n
"
<<
"arg15: kbatch value (default 1)
\n
"
<<
"optional:
\n
"
<<
"arg16: number of warm-up cycles (default 1)
\n
"
<<
"arg17: number of iterations (default 10)
\n
"
<<
std
::
endl
;
exit
(
1
);
}
const
auto
data_type
=
static_cast
<
GemmDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
layout
=
static_cast
<
GemmMatrixLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
4
]);
const
int
init_method
=
std
::
stoi
(
argv
[
5
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
6
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
7
]);
const
auto
Ms
=
argToIntArray
(
argv
[
8
]);
const
auto
Ns
=
argToIntArray
(
argv
[
9
]);
const
auto
Ks
=
argToIntArray
(
argv
[
10
]);
auto
StrideAs
=
argToIntArray
(
argv
[
11
]);
auto
StrideBs
=
argToIntArray
(
argv
[
12
]);
auto
StrideCs
=
argToIntArray
(
argv
[
13
]);
const
int
kbatch
=
argc
==
15
?
std
::
stoi
(
argv
[
14
])
:
1
;
const
int
DefaultStrideA
=
Ks
[
0
];
const
int
DefaultStrideB
=
Ns
[
0
];
const
int
DefaultStrideC
=
Ns
[
0
];
for
(
size_t
i
=
0
;
i
<
Ms
.
size
();
++
i
)
{
StrideAs
[
i
]
=
StrideAs
[
i
]
==
-
1
?
DefaultStrideA
:
StrideAs
[
i
];
StrideBs
[
i
]
=
StrideBs
[
i
]
==
-
1
?
DefaultStrideB
:
StrideBs
[
i
];
StrideCs
[
i
]
=
StrideCs
[
i
]
==
-
1
?
DefaultStrideC
:
StrideCs
[
i
];
}
int
n_warmup
=
1
;
int
n_iter
=
10
;
if
(
argc
==
17
)
{
n_warmup
=
std
::
stoi
(
argv
[
16
]);
n_iter
=
std
::
stoi
(
argv
[
17
]);
}
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
ck
::
profiler
::
profile_grouped_gemm_two_stage_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
float
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Ms
,
Ns
,
Ks
,
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
,
n_warmup
,
n_iter
);
}
else
if
(
data_type
==
GemmDataType
::
BF16_INT8_BF16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
ck
::
profiler
::
profile_grouped_gemm_two_stage_impl
<
ck
::
bhalf_t
,
int8_t
,
ck
::
bhalf_t
,
float
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
Ms
,
Ns
,
Ks
,
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
,
n_warmup
,
n_iter
);
}
else
{
throw
std
::
runtime_error
(
"wrong! this GEMM data_type & layout is not implemented"
);
}
return
0
;
}
}
// anonymous namespace
REGISTER_PROFILER_OPERATION
(
OP_NAME
,
OP_DESC
,
profile_grouped_gemm_two_stage
);
profiler/src/profile_permute_scale.cpp
View file @
6b9a4bd5
...
...
@@ -37,6 +37,20 @@ static void print_helper_msg()
// clang-format on
}
void
init_strides
(
const
std
::
vector
<
ck
::
index_t
>&
lengths
,
const
std
::
vector
<
ck
::
index_t
>&
dims_order
,
std
::
vector
<
ck
::
index_t
>&
strides
)
{
ck
::
index_t
stride
=
1
;
for
(
ck
::
index_t
d
=
lengths
.
size
()
-
1
;
d
>=
0
;
d
--
)
{
ck
::
index_t
dim
=
dims_order
[
d
];
strides
[
dim
]
=
stride
;
stride
*=
lengths
[
dim
];
}
}
}
// namespace
int
profile_permute_scale
(
int
argc
,
char
*
argv
[])
...
...
@@ -58,16 +72,21 @@ int profile_permute_scale(int argc, char* argv[])
const
int
num_dims
=
dims_argc
/
3
;
std
::
vector
<
ck
::
index_t
>
lengths
(
num_dims
);
std
::
vector
<
ck
::
index_t
>
input_
stri
de
s
(
num_dims
);
std
::
vector
<
ck
::
index_t
>
output_
stri
de
s
(
num_dims
);
std
::
vector
<
ck
::
index_t
>
input_
dims_or
de
r
(
num_dims
);
std
::
vector
<
ck
::
index_t
>
output_
dims_or
de
r
(
num_dims
);
for
(
int
i
=
0
;
i
<
num_dims
;
i
++
)
{
lengths
[
i
]
=
std
::
stoi
(
argv
[
control_argc
+
i
]);
input_
stri
de
s
[
i
]
=
std
::
stoi
(
argv
[
control_argc
+
num_dims
+
i
]);
output_
stri
de
s
[
i
]
=
std
::
stoi
(
argv
[
control_argc
+
2
*
num_dims
+
i
]);
lengths
[
i
]
=
std
::
stoi
(
argv
[
control_argc
+
i
]);
input_
dims_or
de
r
[
i
]
=
std
::
stoi
(
argv
[
control_argc
+
num_dims
+
i
]);
output_
dims_or
de
r
[
i
]
=
std
::
stoi
(
argv
[
control_argc
+
2
*
num_dims
+
i
]);
}
std
::
vector
<
ck
::
index_t
>
input_strides
(
num_dims
);
std
::
vector
<
ck
::
index_t
>
output_strides
(
num_dims
);
init_strides
(
lengths
,
input_dims_order
,
input_strides
);
init_strides
(
lengths
,
output_dims_order
,
output_strides
);
using
F32
=
float
;
using
F16
=
ck
::
half_t
;
...
...
script/profile_permute_scale.sh
0 → 100755
View file @
6b9a4bd5
#!/bin/bash
## GPU visibility
export
HIP_VISIBLE_DEVICES
=
0
DRIVER
=
"../build/bin/ckProfiler"
echo
$DRIVER
OP
=
$1
DATATYPE
=
$2
VERIFY
=
$3
INIT
=
$4
LOG
=
$5
TIME
=
$6
# 1D
######## op datatype verify init log time dims in_strides_order out_strides_order
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
67108864 0 0
# # 2D
# ######## op datatype verify init log time dims in_strides_order out_strides_order
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8192 8192 0 1 1 0
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8192 8192 1 0 0 1
# 3D
######## op datatype verify init log time dims in_strides_order out_strides_order
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 1024 8192 0 1 2 2 1 0
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 1024 8192 2 1 0 0 1 2
# 4D
######## op datatype verify init log time dims in_strides_order out_strides_order
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 2 512 8192 0 1 2 3 3 2 1 0
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 2 512 8192 3 2 1 0 0 1 2 3
# 5D
######## op datatype verify init log time dims in_strides_order out_strides_order
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 2 2 256 8192 0 1 2 3 4 4 3 2 1 0
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 2 2 256 8192 4 3 2 1 0 0 1 2 3 4
# 6D
######## op datatype verify init log time dims in_strides_order out_strides_order
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 2 2 2 128 8192 0 1 2 3 4 5 5 4 3 2 1 0
$DRIVER
$OP
$DATATYPE
$VERIFY
$INIT
$LOG
$TIME
8 2 2 2 128 8192 5 4 3 2 1 0 0 1 2 3 4 5
test/CMakeLists.txt
View file @
6b9a4bd5
...
...
@@ -46,7 +46,18 @@ function(add_test_executable TEST_NAME)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
endforeach
()
foreach
(
source IN LISTS ARGN
)
if
(
NOT GPU_TARGETS MATCHES
"gfx9"
AND source MATCHES
"xdl"
)
message
(
"removing xdl test
${
source
}
"
)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
endforeach
()
foreach
(
source IN LISTS ARGN
)
if
(
NOT GPU_TARGETS MATCHES
"gfx11"
AND source MATCHES
"wmma"
)
message
(
"removing wmma test
${
source
}
"
)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
endforeach
()
#only continue if there are some source files left on the list
if
(
ARGN
)
add_executable
(
${
TEST_NAME
}
${
ARGN
}
)
...
...
@@ -100,6 +111,18 @@ function(add_gtest_executable TEST_NAME)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
endforeach
()
foreach
(
source IN LISTS ARGN
)
if
(
NOT GPU_TARGETS MATCHES
"gfx9"
AND source MATCHES
"xdl"
)
message
(
"removing xdl test
${
source
}
"
)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
endforeach
()
foreach
(
source IN LISTS ARGN
)
if
(
NOT GPU_TARGETS MATCHES
"gfx11"
AND source MATCHES
"wmma"
)
message
(
"removing wmma test
${
source
}
"
)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
endforeach
()
#only continue if there are some source files left on the list
if
(
ARGN
)
add_executable
(
${
TEST_NAME
}
${
ARGN
}
)
...
...
@@ -117,6 +140,7 @@ function(add_gtest_executable TEST_NAME)
set
(
result
${
result
}
PARENT_SCOPE
)
endfunction
()
add_compile_options
(
-Wno-c++20-extensions
)
add_subdirectory
(
magic_number_division
)
add_subdirectory
(
space_filling_curve
)
add_subdirectory
(
conv_util
)
...
...
test/batched_gemm/CMakeLists.txt
View file @
6b9a4bd5
list
(
APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942
)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
add_gtest_executable
(
test_batched_gemm test_batched_gemm.cpp
)
add_gtest_executable
(
test_batched_gemm test_batched_gemm_xdl.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm PRIVATE utility device_batched_gemm_instance
)
set
(
target 1
)
endif
()
endforeach
()
\ No newline at end of file
endif
()
test/batched_gemm/test_batched_gemm.cpp
→
test/batched_gemm/test_batched_gemm
_xdl
.cpp
View file @
6b9a4bd5
File moved
test/batched_gemm_gemm/CMakeLists.txt
View file @
6b9a4bd5
list
(
APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942
)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
add_custom_target
(
test_batched_gemm_gemm
)
add_gtest_executable
(
test_batched_gemm_gemm_fp16 test_batched_gemm_gemm_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_gemm_fp16 PRIVATE utility device_batched_gemm_gemm_instance
)
add_dependencies
(
test_batched_gemm_gemm test_batched_gemm_gemm_fp16
)
set
(
target 1
)
endif
()
endif
()
endforeach
()
\ No newline at end of file
add_gtest_executable
(
test_batched_gemm_gemm_fp16 test_batched_gemm_gemm_fp16_xdl.cpp
)
if
(
result EQUAL 0
)
add_custom_target
(
test_batched_gemm_gemm
)
target_link_libraries
(
test_batched_gemm_gemm_fp16 PRIVATE utility device_batched_gemm_gemm_instance
)
add_dependencies
(
test_batched_gemm_gemm test_batched_gemm_gemm_fp16
)
endif
()
test/batched_gemm_gemm/test_batched_gemm_gemm_fp16.cpp
→
test/batched_gemm_gemm/test_batched_gemm_gemm_fp16
_xdl
.cpp
View file @
6b9a4bd5
File moved
test/batched_gemm_reduce/CMakeLists.txt
View file @
6b9a4bd5
list
(
APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942
)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
add_test_executable
(
test_batched_gemm_reduce_fp16 batched_gemm_reduce_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE utility device_batched_gemm_reduce_instance
)
set
(
target 1
)
endif
()
add_test_executable
(
test_batched_gemm_reduce_fp16 batched_gemm_reduce_fp16_xdl.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE utility device_batched_gemm_reduce_instance
)
endif
()
endforeach
()
Prev
1
…
12
13
14
15
16
17
18
19
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment