Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
644df335
Commit
644df335
authored
Jan 30, 2023
by
rocking
Browse files
Merge branch 'develop' into gemm_layernorm_instance
parents
d99640ab
7494c1c6
Changes
254
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
1243 additions
and
88 deletions
+1243
-88
profiler/include/profiler/profile_batchnorm_infer_impl.hpp
profiler/include/profiler/profile_batchnorm_infer_impl.hpp
+335
-0
profiler/include/profiler/profile_gemm_add_multiply_impl.hpp
profiler/include/profiler/profile_gemm_add_multiply_impl.hpp
+242
-0
profiler/include/profiler/profile_reduce_impl.hpp
profiler/include/profiler/profile_reduce_impl.hpp
+73
-76
profiler/include/profiler/profile_softmax_impl.hpp
profiler/include/profiler/profile_softmax_impl.hpp
+4
-4
profiler/src/CMakeLists.txt
profiler/src/CMakeLists.txt
+3
-0
profiler/src/profile_batchnorm_infer.cpp
profiler/src/profile_batchnorm_infer.cpp
+202
-0
profiler/src/profile_gemm_add_multiply.cpp
profiler/src/profile_gemm_add_multiply.cpp
+158
-0
profiler/src/profile_softmax.cpp
profiler/src/profile_softmax.cpp
+8
-8
test/batchnorm/CMakeLists.txt
test/batchnorm/CMakeLists.txt
+2
-0
test/batchnorm/batchnorm_infer_rank_4.cpp
test/batchnorm/batchnorm_infer_rank_4.cpp
+89
-0
test/gemm/CMakeLists.txt
test/gemm/CMakeLists.txt
+1
-0
test/gemm/gemm_standalone_xdl_fp16.cpp
test/gemm/gemm_standalone_xdl_fp16.cpp
+5
-0
test/gemm/instance/gemm_wavelet_f16_tn_instance.cpp
test/gemm/instance/gemm_wavelet_f16_tn_instance.cpp
+96
-0
test/gemm/instance/gemm_wavelet_f16_tn_instance.hpp
test/gemm/instance/gemm_wavelet_f16_tn_instance.hpp
+25
-0
No files found.
profiler/include/profiler/profile_batchnorm_infer_impl.hpp
0 → 100644
View file @
644df335
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include <stdexcept>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/tensor_operation_instance/gpu/batchnorm_infer.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batchnorm_infer.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
XDataType
,
typename
YDataType
,
typename
AccDataType
,
typename
ScaleDataType
,
typename
BiasDataType
,
typename
MeanVarDataType
,
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
bool
profile_batchnorm_infer_impl
(
int
do_verification
,
int
init_method
,
bool
do_dumpout
,
bool
time_kernel
,
const
std
::
vector
<
size_t
>
inOutLengths
,
const
std
::
vector
<
int
>
reduceDims
,
double
epsilon
)
{
if
(
inOutLengths
.
size
()
!=
Rank
||
reduceDims
.
size
()
!=
NumBatchNormReduceDim
)
{
throw
std
::
runtime_error
(
"Invalid tensor lengths or number of reduce dimensions!"
);
};
std
::
vector
<
size_t
>
scaleBiasMeanVarLengths
;
std
::
vector
<
int
>
invariantDims
;
// used for calculating the effective transferred bytes by each operation
size_t
total_length
;
size_t
invariant_length
=
1
;
total_length
=
std
::
accumulate
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
1
,
std
::
multiplies
<
size_t
>
{});
if
(
std
::
any_of
(
reduceDims
.
begin
(),
reduceDims
.
end
(),
[](
int
d
)
{
return
d
<
0
||
d
>=
Rank
;
}))
throw
std
::
runtime_error
(
"Invalid reduce dimensions!"
);
for
(
int
dim
=
0
;
dim
<
Rank
;
dim
++
)
{
if
(
std
::
none_of
(
reduceDims
.
begin
(),
reduceDims
.
end
(),
[
&
](
int
d
)
{
return
dim
==
d
;
}))
{
invariantDims
.
push_back
(
dim
);
scaleBiasMeanVarLengths
.
push_back
(
inOutLengths
[
dim
]);
invariant_length
*=
inOutLengths
[
dim
];
};
}
// input data of the batchnorm infer algorithm
Tensor
<
XDataType
>
x
(
inOutLengths
);
Tensor
<
ScaleDataType
>
scale
(
scaleBiasMeanVarLengths
);
Tensor
<
BiasDataType
>
bias
(
scaleBiasMeanVarLengths
);
Tensor
<
MeanVarDataType
>
estimatedMean
(
scaleBiasMeanVarLengths
);
Tensor
<
MeanVarDataType
>
estimatedVariance
(
scaleBiasMeanVarLengths
);
// output data of the batchnorm infer algorithm
Tensor
<
YDataType
>
y_ref
(
inOutLengths
);
Tensor
<
YDataType
>
y
(
inOutLengths
);
auto
inOutStrides
=
x
.
mDesc
.
GetStrides
();
auto
scaleBiasMeanVarStrides
=
scale
.
mDesc
.
GetStrides
();
std
::
size_t
num_thread
=
std
::
thread
::
hardware_concurrency
();
const
float
x_mean
=
0.0
f
;
const
float
x_stddev
=
1.0
f
;
const
float
noise_stddev
=
0.04
f
;
// input data in normal distribution
x
.
GenerateTensorValue
(
GeneratorTensor_4
<
XDataType
>
{
x_mean
,
x_stddev
},
num_thread
);
// initialize the estimatedMean to be values with tiny variation to the mean of the x
// values
estimatedMean
.
GenerateTensorValue
(
GeneratorTensor_4
<
MeanVarDataType
>
{
x_mean
,
noise_stddev
},
num_thread
);
// initialize the estimatedVariance to be values with tiny variation to the variance of
// the x values
estimatedVariance
.
GenerateTensorValue
(
GeneratorTensor_4
<
MeanVarDataType
>
{
x_stddev
*
x_stddev
,
noise_stddev
},
num_thread
);
if
(
do_verification
)
{
switch
(
init_method
)
{
case
0
:
scale
.
GenerateTensorValue
(
GeneratorTensor_0
<
ScaleDataType
>
{},
num_thread
);
bias
.
GenerateTensorValue
(
GeneratorTensor_0
<
BiasDataType
>
{},
num_thread
);
break
;
case
1
:
scale
.
GenerateTensorValue
(
GeneratorTensor_1
<
ScaleDataType
>
{
1
},
num_thread
);
bias
.
GenerateTensorValue
(
GeneratorTensor_1
<
BiasDataType
>
{
0
},
num_thread
);
break
;
case
2
:
scale
.
GenerateTensorValue
(
GeneratorTensor_2
<
ScaleDataType
>
{
-
5
,
5
},
num_thread
);
bias
.
GenerateTensorValue
(
GeneratorTensor_2
<
BiasDataType
>
{
-
5
,
5
},
num_thread
);
break
;
default:
scale
.
GenerateTensorValue
(
GeneratorTensor_3
<
ScaleDataType
>
{
-
1.0
f
,
1.0
f
},
num_thread
);
bias
.
GenerateTensorValue
(
GeneratorTensor_3
<
BiasDataType
>
{
-
1.0
f
,
1.0
f
},
num_thread
);
}
};
// these buffers are usually provided by the user application
DeviceMem
x_dev
(
sizeof
(
XDataType
)
*
x
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
y_dev
(
sizeof
(
XDataType
)
*
y
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
scale_dev
(
sizeof
(
ScaleDataType
)
*
scale
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
bias_dev
(
sizeof
(
BiasDataType
)
*
bias
.
mDesc
.
GetElementSpaceSize
());
// estimatedMean_dev
DeviceMem
estimatedMean_dev
(
sizeof
(
MeanVarDataType
)
*
estimatedMean
.
mDesc
.
GetElementSpaceSize
());
// estimatedVariance_dev
DeviceMem
estimatedVariance_dev
(
sizeof
(
MeanVarDataType
)
*
estimatedVariance
.
mDesc
.
GetElementSpaceSize
());
x_dev
.
ToDevice
(
x
.
mData
.
data
());
scale_dev
.
ToDevice
(
scale
.
mData
.
data
());
bias_dev
.
ToDevice
(
bias
.
mData
.
data
());
estimatedMean_dev
.
ToDevice
(
estimatedMean
.
mData
.
data
());
estimatedVariance_dev
.
ToDevice
(
estimatedVariance
.
mData
.
data
());
std
::
array
<
index_t
,
Rank
>
arrInOutLengths
;
std
::
array
<
index_t
,
Rank
>
arrInOutStrides
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
arrScaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
arrScaleBiasMeanVarStrides
;
std
::
array
<
int
,
NumBatchNormReduceDim
>
arrReduceDims
;
std
::
copy
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
arrInOutLengths
.
begin
());
std
::
copy
(
inOutStrides
.
begin
(),
inOutStrides
.
end
(),
arrInOutStrides
.
begin
());
std
::
copy
(
scaleBiasMeanVarLengths
.
begin
(),
scaleBiasMeanVarLengths
.
end
(),
arrScaleBiasMeanVarLengths
.
begin
());
std
::
copy
(
scaleBiasMeanVarStrides
.
begin
(),
scaleBiasMeanVarStrides
.
end
(),
arrScaleBiasMeanVarStrides
.
begin
());
std
::
copy
(
reduceDims
.
begin
(),
reduceDims
.
end
(),
arrReduceDims
.
begin
());
std
::
array
<
index_t
,
Rank
>
aligned_scaleBiasMeanVarStrides
{
0
};
int
i
=
0
;
for
(
auto
dim
:
invariantDims
)
{
assert
(
inOutLengths
[
dim
]
==
scaleBiasMeanVarLengths
[
i
]);
aligned_scaleBiasMeanVarStrides
[
dim
]
=
scaleBiasMeanVarStrides
[
i
];
i
++
;
};
using
Normalize
=
ck
::
tensor_operation
::
element_wise
::
NormalizeInInfer
;
// add device batchnorm-infer instances
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
XDataType
,
MeanVarDataType
,
MeanVarDataType
,
ScaleDataType
,
BiasDataType
>
,
ck
::
Tuple
<
YDataType
>
,
Normalize
,
Rank
>
;
// get device op instances
const
auto
instance_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
instance_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
std
::
string
best_instance_name
;
float
best_avg_time
=
std
::
numeric_limits
<
float
>::
max
();
float
best_gb_per_sec
=
0
;
if
(
do_verification
)
{
using
PassThroughOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
ReferenceBatchNormInferInstance
=
ck
::
tensor_operation
::
host
::
ReferenceBatchNormInfer
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
PassThroughOp
,
Rank
,
NumBatchNormReduceDim
>
;
auto
batchNormInfer_ref
=
ReferenceBatchNormInferInstance
{};
auto
argument_ptr_ref
=
batchNormInfer_ref
.
MakeArgumentPointer
(
arrInOutLengths
,
arrInOutStrides
,
arrInOutStrides
,
arrReduceDims
,
arrScaleBiasMeanVarLengths
,
arrScaleBiasMeanVarStrides
,
arrScaleBiasMeanVarStrides
,
arrScaleBiasMeanVarStrides
,
x
.
mData
.
data
(),
scale
.
mData
.
data
(),
bias
.
mData
.
data
(),
epsilon
,
PassThroughOp
{},
estimatedMean
.
mData
.
data
(),
estimatedVariance
.
mData
.
data
(),
y_ref
.
mData
.
data
());
if
(
!
batchNormInfer_ref
.
IsSupportedArgument
(
argument_ptr_ref
.
get
()))
{
std
::
cout
<<
"The runtime parameters not supported by the reference instance, exiting!"
<<
std
::
endl
;
return
(
false
);
};
auto
invoker_ptr_ref
=
batchNormInfer_ref
.
MakeInvokerPointer
();
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
}
int
num_kernel
=
0
;
bool
pass
=
true
;
for
(
auto
&
inst_ptr
:
instance_ptrs
)
{
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
arrInOutLengths
,
{
arrInOutStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
},
{
arrInOutStrides
},
{
x_dev
.
GetDeviceBuffer
(),
estimatedMean_dev
.
GetDeviceBuffer
(),
estimatedVariance_dev
.
GetDeviceBuffer
(),
scale_dev
.
GetDeviceBuffer
(),
bias_dev
.
GetDeviceBuffer
()},
{
y_dev
.
GetDeviceBuffer
()},
Normalize
{
epsilon
});
if
(
inst_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
num_kernel
++
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" skipped due to unsupported argument: "
<<
std
::
endl
;
}
continue
;
};
auto
invoker_ptr
=
inst_ptr
->
MakeInvokerPointer
();
float
avg_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
size_t
num_bytes
=
0
;
// inputing of x, scale, bias, outputing of y
num_bytes
+=
total_length
*
(
sizeof
(
XDataType
)
+
sizeof
(
YDataType
))
+
invariant_length
*
(
sizeof
(
ScaleDataType
)
+
sizeof
(
BiasDataType
)
+
sizeof
(
MeanVarDataType
));
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
if
(
time_kernel
)
std
::
cout
<<
"Perf: "
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s, "
<<
inst_ptr
->
GetTypeString
()
<<
std
::
endl
;
if
(
avg_time
<
best_avg_time
)
{
best_instance_name
=
inst_ptr
->
GetTypeString
();
best_avg_time
=
avg_time
;
best_gb_per_sec
=
gb_per_sec
;
}
if
(
do_verification
)
{
using
ck
::
utils
::
check_err
;
bool
single_pass
;
y_dev
.
FromDevice
(
y
.
mData
.
data
());
if
constexpr
(
ck
::
is_same_v
<
YDataType
,
ck
::
bhalf_t
>
)
single_pass
=
check_err
(
y
.
mData
,
y_ref
.
mData
,
"y results"
,
1e-2
,
1e-2
);
else
single_pass
=
check_err
(
y
.
mData
,
y_ref
.
mData
,
"y results"
,
4e-3
,
4e-3
);
pass
=
pass
&&
single_pass
;
};
if
(
do_dumpout
)
{
using
ck
::
host_common
::
dumpBufferToFile
;
// clang-format off
dumpBufferToFile
(
"dump_x.bin"
,
x
.
mData
.
data
(),
x
.
mDesc
.
GetElementSize
());
dumpBufferToFile
(
"dump_y.bin"
,
y
.
mData
.
data
(),
y
.
mDesc
.
GetElementSize
());
dumpBufferToFile
(
"dump_y_ref.bin"
,
y_ref
.
mData
.
data
(),
y_ref
.
mDesc
.
GetElementSize
());
// clang-format off
};
}
if
(
time_kernel
)
{
std
::
cout
<<
"best perf = "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_instance_name
<<
std
::
endl
;
}
if
(
num_kernel
==
0
)
{
std
::
cout
<<
"Error: No kernel is applicable"
<<
std
::
endl
;
return
false
;
}
return
pass
;
}
}
// namespace profiler
}
// namespace ck
profiler/include/profiler/profile_gemm_add_multiply_impl.hpp
0 → 100644
View file @
644df335
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/tensor_operation_instance/gpu/gemm_add_multiply.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
ADataType
,
typename
BDataType
,
typename
AccDataType
,
typename
D0DataType
,
typename
D1DataType
,
typename
EDataType
,
typename
ALayout
,
typename
BLayout
,
typename
D0Layout
,
typename
D1Layout
,
typename
ELayout
>
bool
profile_gemm_add_multiply_impl
(
int
do_verification
,
int
init_method
,
bool
/*do_log*/
,
bool
time_kernel
,
int
M
,
int
N
,
int
K
,
int
StrideA
,
int
StrideB
,
int
StrideD0
,
int
StrideD1
,
int
StrideE
)
{
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
using
namespace
ck
::
literals
;
if
(
is_same
<
decltype
(
layout
),
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
({
row
,
col
},
{
stride
,
1
_uz
});
}
else
{
return
HostTensorDescriptor
({
row
,
col
},
{
1
_uz
,
stride
});
}
};
Tensor
<
ADataType
>
a_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
D0DataType
>
d0_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
StrideD0
,
D0Layout
{}));
Tensor
<
D1DataType
>
d1_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
StrideD1
,
D1Layout
{}));
Tensor
<
EDataType
>
e_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
Tensor
<
EDataType
>
e_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"d0_m_n: "
<<
d0_m_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"d1_m_n: "
<<
d1_m_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"e_m_n: "
<<
e_m_n_device_result
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
d0_m_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
D0DataType
>
{
-
5
,
5
});
d1_m_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
D1DataType
>
{
-
1
,
1
});
break
;
default:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
d0_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
D0DataType
>
{
0.0
,
1.0
});
d1_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
D1DataType
>
{
0.0
,
1.0
});
}
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
AddMultiply
=
ck
::
tensor_operation
::
element_wise
::
AddMultiply
;
using
AElementOp
=
PassThrough
;
using
BElementOp
=
PassThrough
;
using
CDEElementOp
=
AddMultiply
;
const
auto
a_element_op
=
AElementOp
{};
const
auto
b_element_op
=
BElementOp
{};
const
auto
cde_element_op
=
CDEElementOp
{};
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceGemmMultipleD
<
ALayout
,
BLayout
,
ck
::
Tuple
<
D0Layout
,
D1Layout
>
,
ELayout
,
ADataType
,
BDataType
,
ck
::
Tuple
<
D0DataType
,
D1DataType
>
,
EDataType
,
PassThrough
,
PassThrough
,
CDEElementOp
>
;
// get device op instances
const
auto
op_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
op_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
// run reference
if
(
do_verification
)
{
Tensor
<
AccDataType
>
c_m_n
({
M
,
N
});
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
AccDataType
,
AccDataType
,
AElementOp
,
BElementOp
,
PassThrough
>
;
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n
,
a_element_op
,
b_element_op
,
PassThrough
{});
ref_invoker
.
Run
(
ref_argument
);
for
(
int
m
=
0
;
m
<
M
;
++
m
)
{
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
cde_element_op
(
e_m_n_host_result
(
m
,
n
),
c_m_n
(
m
,
n
),
d0_m_n
(
m
,
n
),
d1_m_n
(
m
,
n
));
}
}
}
DeviceMem
a_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
b_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
d0_m_n_device_buf
(
sizeof
(
D0DataType
)
*
d0_m_n
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
d1_m_n_device_buf
(
sizeof
(
D1DataType
)
*
d1_m_n
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
e_device_buf
(
sizeof
(
EDataType
)
*
e_m_n_device_result
.
mDesc
.
GetElementSpaceSize
());
a_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
d0_m_n_device_buf
.
ToDevice
(
d0_m_n
.
mData
.
data
());
d1_m_n_device_buf
.
ToDevice
(
d1_m_n
.
mData
.
data
());
std
::
string
best_op_name
;
float
best_ave_time
=
0
;
float
best_tflops
=
0
;
float
best_gb_per_sec
=
0
;
bool
pass
=
true
;
// profile device operation instances
for
(
auto
&
op_ptr
:
op_ptrs
)
{
auto
argument_ptr
=
op_ptr
->
MakeArgumentPointer
(
a_device_buf
.
GetDeviceBuffer
(),
b_device_buf
.
GetDeviceBuffer
(),
std
::
array
<
const
void
*
,
2
>
{
d0_m_n_device_buf
.
GetDeviceBuffer
(),
d1_m_n_device_buf
.
GetDeviceBuffer
()},
e_device_buf
.
GetDeviceBuffer
(),
M
,
N
,
K
,
StrideA
,
StrideB
,
std
::
array
<
ck
::
index_t
,
2
>
{
StrideD0
,
StrideD1
},
StrideE
,
a_element_op
,
b_element_op
,
cde_element_op
);
auto
invoker_ptr
=
op_ptr
->
MakeInvokerPointer
();
std
::
string
op_name
=
op_ptr
->
GetTypeString
();
if
(
op_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
// re-init E to zero before profiling a kernel
e_device_buf
.
SetZero
();
float
ave_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
;
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
N
+
sizeof
(
EDataType
)
*
M
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
op_name
<<
std
::
endl
;
if
(
tflops
>
best_tflops
)
{
best_op_name
=
op_name
;
best_tflops
=
tflops
;
best_ave_time
=
ave_time
;
best_gb_per_sec
=
gb_per_sec
;
}
if
(
do_verification
)
{
e_device_buf
.
FromDevice
(
e_m_n_device_result
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
e_m_n_device_result
,
e_m_n_host_result
);
}
}
else
{
std
::
cout
<<
op_name
<<
" does not support this problem"
<<
std
::
endl
;
}
}
std
::
cout
<<
"Best Perf: "
<<
best_ave_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
return
pass
;
}
}
// namespace profiler
}
// namespace ck
profiler/include/profiler/profile_reduce_impl.hpp
View file @
644df335
...
@@ -6,11 +6,11 @@
...
@@ -6,11 +6,11 @@
#include "ck/utility/reduction_enums.hpp"
#include "ck/utility/reduction_enums.hpp"
#include "ck/tensor_operation/gpu/device/device_reduce.hpp"
#include "ck/tensor_operation/gpu/device/device_reduce.hpp"
#include "ck/library/tensor_operation_instance/gpu/reduce/
device_reduce_instan
ce.hpp"
#include "ck/library/tensor_operation_instance/gpu/reduce/
redu
ce.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/
utility/host
_reduc
tion
.hpp"
#include "ck/library/
reference_tensor_operation/cpu/reference
_reduc
e
.hpp"
#include "ck/library/utility/host_common_util.hpp"
#include "ck/library/utility/host_common_util.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
...
@@ -158,11 +158,6 @@ bool profile_reduce_impl_impl(bool do_verification,
...
@@ -158,11 +158,6 @@ bool profile_reduce_impl_impl(bool do_verification,
constexpr
bool
OutputIndex
=
(
op_support_indices
&&
UseIndex
);
constexpr
bool
OutputIndex
=
(
op_support_indices
&&
UseIndex
);
constexpr
bool
out_support_atomic_add
=
std
::
is_same
<
OutDataType
,
float
>::
value
;
constexpr
bool
op_support_atomic_add
=
!
op_support_indices
&&
ReduceOpId
!=
ReduceTensorOp
::
NORM2
;
constexpr
bool
use_atomic_add
=
(
out_support_atomic_add
&&
op_support_atomic_add
);
// 1) If InDataType is half_t, must use half_t as AccDataType for indexable reduction operations
// 1) If InDataType is half_t, must use half_t as AccDataType for indexable reduction operations
// 2) If InDataType is half_t, must use float as AccDataType for non-indexable reduction
// 2) If InDataType is half_t, must use float as AccDataType for non-indexable reduction
// operations
// operations
...
@@ -200,7 +195,8 @@ bool profile_reduce_impl_impl(bool do_verification,
...
@@ -200,7 +195,8 @@ bool profile_reduce_impl_impl(bool do_verification,
constexpr
bool
invalid_reduce
=
(
invalid_reduce_1
||
invalid_reduce_2
||
invalid_reduce_3
||
constexpr
bool
invalid_reduce
=
(
invalid_reduce_1
||
invalid_reduce_2
||
invalid_reduce_3
||
invalid_reduce_4
||
invalid_reduce_5
||
invalid_reduce_6
);
invalid_reduce_4
||
invalid_reduce_5
||
invalid_reduce_6
);
bool
pass
=
true
;
int
num_kernel
=
0
;
bool
pass
=
true
;
if
constexpr
(
!
invalid_reduce
)
if
constexpr
(
!
invalid_reduce
)
{
{
...
@@ -286,75 +282,25 @@ bool profile_reduce_impl_impl(bool do_verification,
...
@@ -286,75 +282,25 @@ bool profile_reduce_impl_impl(bool do_verification,
reduce_unary_operator
<
ReduceOpId
,
true
,
true
>::
GetElementwiseOperator
(
reduce_unary_operator
<
ReduceOpId
,
true
,
true
>::
GetElementwiseOperator
(
static_cast
<
int32_t
>
(
reduce_total_length
));
static_cast
<
int32_t
>
(
reduce_total_length
));
using
DeviceReduceInstPtr
=
using
ReduceOp
=
ck
::
tensor_operation
::
device
::
DeviceReduce
<
InDataType
,
DeviceReducePtr
<
Rank
,
NumReduceDim
,
InElementwiseOperation
,
AccElementwiseOperation
>
;
AccDataType
,
OutDataType
,
std
::
vector
<
DeviceReduceInstPtr
>
reduce_ptrs
;
Rank
,
NumReduceDim
,
add_device_reduce_instance_threadwise
<
InDataType
,
ReduceOperation
,
AccDataType
,
InElementwiseOperation
,
OutDataType
,
AccElementwiseOperation
,
Rank
,
PropagateNan
,
NumReduceDim
,
OutputIndex
>
;
ReduceOperation
,
const
auto
reduce_ptrs
=
InElementwiseOperation
,
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
AccElementwiseOperation
,
ReduceOp
>::
GetInstances
();
PropagateNan
,
UseIndex
>
(
reduce_ptrs
);
add_device_reduce_instance_blockwise
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOperation
,
InElementwiseOperation
,
AccElementwiseOperation
,
PropagateNan
,
UseIndex
>
(
reduce_ptrs
);
if
constexpr
(
use_atomic_add
)
{
add_device_reduce_instance_multiblock_atomic_add
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOperation
,
InElementwiseOperation
,
AccElementwiseOperation
,
PropagateNan
,
UseIndex
>
(
reduce_ptrs
);
}
if
(
reduce_ptrs
.
empty
())
if
(
reduce_ptrs
.
empty
())
{
{
throw
std
::
runtime_error
(
"Wrong! No device REDUCE instance found"
);
throw
std
::
runtime_error
(
"Wrong! No device REDUCE instance found"
);
};
};
if
(
do_verification
)
{
ReductionHost
<
InDataType
,
AccDataType
,
OutDataType
,
ReduceOperation
,
InElementwiseOperation
,
AccElementwiseOperation
,
Rank
,
NumReduceDim
,
PropagateNan
,
OutputIndex
>
hostReduce
(
in
.
mDesc
,
out_ref
.
mDesc
,
invariantDims
,
reduceDims
);
hostReduce
.
Run
(
alpha
,
in
.
mData
.
data
(),
beta
,
out_ref
.
mData
.
data
(),
out_indices_ref
.
mData
.
data
(),
in_elementwise_op
,
acc_elementwise_op
);
};
std
::
array
<
index_t
,
Rank
>
arrInLengths
;
std
::
array
<
index_t
,
Rank
>
arrInLengths
;
std
::
array
<
index_t
,
Rank
>
arrInStrides
;
std
::
array
<
index_t
,
Rank
>
arrInStrides
;
std
::
array
<
index_t
,
NumOutDim
>
arrOutLengths
;
std
::
array
<
index_t
,
NumOutDim
>
arrOutLengths
;
...
@@ -365,6 +311,49 @@ bool profile_reduce_impl_impl(bool do_verification,
...
@@ -365,6 +311,49 @@ bool profile_reduce_impl_impl(bool do_verification,
ck
::
ranges
::
copy
(
outLengths
,
arrOutLengths
.
begin
());
ck
::
ranges
::
copy
(
outLengths
,
arrOutLengths
.
begin
());
ck
::
ranges
::
copy
(
outStrides
,
arrOutStrides
.
begin
());
ck
::
ranges
::
copy
(
outStrides
,
arrOutStrides
.
begin
());
if
(
do_verification
)
{
using
ReferenceReduceInstance
=
ck
::
tensor_operation
::
host
::
ReferenceReduce
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOperation
,
InElementwiseOperation
,
AccElementwiseOperation
,
PropagateNan
,
OutputIndex
>
;
auto
reduce_ref
=
ReferenceReduceInstance
{};
auto
argument_ptr_ref
=
reduce_ref
.
MakeArgumentPointer
(
arrInLengths
,
arrInStrides
,
arrOutLengths
,
arrOutStrides
,
reduceDims
,
static_cast
<
double
>
(
alpha
),
static_cast
<
double
>
(
beta
),
in
.
mData
.
data
(),
nullptr
,
out_ref
.
mData
.
data
(),
out_indices_ref
.
mData
.
data
(),
in_elementwise_op
,
acc_elementwise_op
);
if
(
!
reduce_ref
.
IsSupportedArgument
(
argument_ptr_ref
.
get
()))
{
std
::
cout
<<
"The runtime parameters not supported by the reduce reference, exiting!"
<<
std
::
endl
;
return
(
false
);
};
auto
invoker_ptr_ref
=
reduce_ref
.
MakeInvokerPointer
();
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
};
for
(
auto
&
reduce_ptr
:
reduce_ptrs
)
for
(
auto
&
reduce_ptr
:
reduce_ptrs
)
{
{
auto
argument_ptr
=
reduce_ptr
->
MakeArgumentPointer
(
arrInLengths
,
auto
argument_ptr
=
reduce_ptr
->
MakeArgumentPointer
(
arrInLengths
,
...
@@ -372,8 +361,8 @@ bool profile_reduce_impl_impl(bool do_verification,
...
@@ -372,8 +361,8 @@ bool profile_reduce_impl_impl(bool do_verification,
arrOutLengths
,
arrOutLengths
,
arrOutStrides
,
arrOutStrides
,
reduceDims
,
reduceDims
,
alpha
,
static_cast
<
double
>
(
alpha
)
,
beta
,
static_cast
<
double
>
(
beta
)
,
in_dev
.
GetDeviceBuffer
(),
in_dev
.
GetDeviceBuffer
(),
nullptr
,
nullptr
,
out_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
...
@@ -383,6 +372,8 @@ bool profile_reduce_impl_impl(bool do_verification,
...
@@ -383,6 +372,8 @@ bool profile_reduce_impl_impl(bool do_verification,
if
(
!
reduce_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
if
(
!
reduce_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
continue
;
continue
;
else
num_kernel
++
;
std
::
string
reduce_name
=
reduce_ptr
->
GetTypeString
();
std
::
string
reduce_name
=
reduce_ptr
->
GetTypeString
();
...
@@ -446,14 +437,20 @@ bool profile_reduce_impl_impl(bool do_verification,
...
@@ -446,14 +437,20 @@ bool profile_reduce_impl_impl(bool do_verification,
};
};
};
};
if
(
time_kernel
)
if
(
time_kernel
&&
num_kernel
>
0
)
std
::
cout
<<
"Best Perf: "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s"
std
::
cout
<<
"Best Perf: "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s"
<<
std
::
endl
;
<<
std
::
endl
;
}
}
else
else
{
{
std
::
cout
<<
"The requested reduction operation is not supported, please check !!!"
throw
std
::
runtime_error
(
<<
std
::
endl
;
"The requested reduction operation is not supported, please check!"
);
};
if
(
num_kernel
==
0
)
{
std
::
cout
<<
"Error: No kernel is applicable"
<<
std
::
endl
;
return
false
;
};
};
return
pass
;
return
pass
;
...
...
profiler/include/profiler/profile_softmax_impl.hpp
View file @
644df335
...
@@ -48,8 +48,8 @@ bool profile_softmax_impl(int do_verification,
...
@@ -48,8 +48,8 @@ bool profile_softmax_impl(int do_verification,
std
::
vector
<
index_t
>
in_length
,
std
::
vector
<
index_t
>
in_length
,
std
::
vector
<
index_t
>
in_strides
,
std
::
vector
<
index_t
>
in_strides
,
std
::
vector
<
index_t
>
reduce_dims
,
std
::
vector
<
index_t
>
reduce_dims
,
AccDataTyp
e
alpha
,
doubl
e
alpha
,
AccDataTyp
e
beta
)
doubl
e
beta
)
{
{
if
(
Rank
!=
in_length
.
size
())
if
(
Rank
!=
in_length
.
size
())
{
{
...
@@ -122,8 +122,8 @@ bool profile_softmax_impl(int do_verification,
...
@@ -122,8 +122,8 @@ bool profile_softmax_impl(int do_verification,
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
in_tensor_lengths
,
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
in_tensor_lengths
,
in_tensor_strides
,
in_tensor_strides
,
reduce_dims
,
reduce_dims
,
&
alpha
,
alpha
,
&
beta
,
beta
,
in_dev
.
GetDeviceBuffer
(),
in_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
PassThrough
{},
PassThrough
{},
...
...
profiler/src/CMakeLists.txt
View file @
644df335
...
@@ -6,6 +6,7 @@ set(PROFILER_SOURCES
...
@@ -6,6 +6,7 @@ set(PROFILER_SOURCES
profile_gemm_bilinear.cpp
profile_gemm_bilinear.cpp
profile_gemm_bias_add_reduce.cpp
profile_gemm_bias_add_reduce.cpp
profile_gemm_add_add_fastgelu.cpp
profile_gemm_add_add_fastgelu.cpp
profile_gemm_add_multiply.cpp
profile_gemm_add_fastgelu.cpp
profile_gemm_add_fastgelu.cpp
profile_gemm_add_relu_add_layernorm.cpp
profile_gemm_add_relu_add_layernorm.cpp
profile_gemm_fastgelu.cpp
profile_gemm_fastgelu.cpp
...
@@ -27,6 +28,7 @@ set(PROFILER_SOURCES
...
@@ -27,6 +28,7 @@ set(PROFILER_SOURCES
profile_softmax.cpp
profile_softmax.cpp
profile_batchnorm_fwd.cpp
profile_batchnorm_fwd.cpp
profile_batchnorm_bwd.cpp
profile_batchnorm_bwd.cpp
profile_batchnorm_infer.cpp
)
)
set
(
PROFILER_EXECUTABLE ckProfiler
)
set
(
PROFILER_EXECUTABLE ckProfiler
)
...
@@ -39,6 +41,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_instance)
...
@@ -39,6 +41,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_instance)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_splitk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_splitk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bilinear_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bilinear_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_multiply_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_add_layernorm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_add_layernorm_instance
)
...
...
profiler/src/profile_batchnorm_infer.cpp
0 → 100644
View file @
644df335
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <getopt.h>
#include "ck/library/utility/host_common_util.hpp"
#include "profiler/profile_batchnorm_infer_impl.hpp"
#include "profiler_operation_registry.hpp"
using
ck
::
index_t
;
using
namespace
std
;
static
const
struct
option
long_options
[]
=
{{
"inOutLengths"
,
required_argument
,
nullptr
,
'D'
},
{
"reduceDims"
,
required_argument
,
nullptr
,
'R'
},
{
"dumpout"
,
required_argument
,
nullptr
,
'o'
},
{
"verify"
,
required_argument
,
nullptr
,
'v'
},
{
"help"
,
no_argument
,
nullptr
,
'?'
},
{
nullptr
,
0
,
nullptr
,
0
}};
class
BatchnormInferArgParser
{
private:
int
option_index
=
0
;
public:
std
::
vector
<
size_t
>
inLengths
;
std
::
vector
<
int
>
reduceDims
;
bool
do_verification
=
false
;
bool
do_dumpout
=
false
;
bool
updateMovingAverage
;
bool
saveMeanAndInvVariance
;
int
data_type
=
0
;
int
init_method
=
2
;
bool
time_kernel
=
false
;
BatchnormInferArgParser
()
=
default
;
~
BatchnormInferArgParser
()
=
default
;
void
show_usage
(
const
char
*
cmd
)
{
// clang-format off
std
::
cout
<<
"Usage of "
<<
cmd
<<
std
::
endl
;
std
::
cout
<<
"--inOutLengths or -D, comma separated list of input tensor dimension lengths, must have 4 integers for nhwc"
<<
std
::
endl
;
std
::
cout
<<
"--reduceDims or -R, comma separated list of dimensions to reduce on"
<<
std
::
endl
;
std
::
cout
<<
"--verify or -v, 1/0 to indicate whether to verify the result by comparing with the host-based batch-normalization"
<<
std
::
endl
;
std
::
cout
<<
"Arg1: data type (0: fp16, 1: fp32, 5: bp16, 6: fp64)"
<<
std
::
endl
;
std
::
cout
<<
"Arg2: init method used for bnScale and bnBias (0=no init, 1=single integer value, 2=scope integer value, 3=decimal value)"
<<
std
::
endl
;
std
::
cout
<<
"Arg3: time kernel (0=no, 1=yes)"
<<
std
::
endl
;
// clang-format on
};
int
operator
()(
int
argc
,
char
*
argv
[])
{
using
ck
::
host_common
::
getTypeValuesFromString
;
int
ch
;
optind
++
;
// to skip the module name
while
(
1
)
{
ch
=
getopt_long
(
argc
,
argv
,
"D:R:v:o:"
,
long_options
,
&
option_index
);
if
(
ch
==
-
1
)
break
;
switch
(
ch
)
{
case
'D'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
inLengths
=
getTypeValuesFromString
<
size_t
>
(
optarg
);
break
;
case
'R'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
reduceDims
=
getTypeValuesFromString
<
int
>
(
optarg
);
break
;
case
'v'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
do_verification
=
static_cast
<
bool
>
(
std
::
atoi
(
optarg
));
break
;
case
'o'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
do_dumpout
=
static_cast
<
bool
>
(
std
::
atoi
(
optarg
));
break
;
case
'?'
:
if
(
std
::
string
(
long_options
[
option_index
].
name
)
==
"help"
)
{
show_usage
(
argv
[
0
]);
return
-
1
;
};
break
;
default:
show_usage
(
argv
[
0
]);
std
::
cerr
<<
"Invalid cmd-line options!"
<<
std
::
endl
;
return
-
1
;
};
};
if
(
optind
+
3
>
argc
)
throw
std
::
runtime_error
(
"Invalid cmd-line arguments, more argumetns are needed!"
);
data_type
=
std
::
atoi
(
argv
[
optind
++
]);
init_method
=
std
::
atoi
(
argv
[
optind
++
]);
time_kernel
=
static_cast
<
bool
>
(
std
::
atoi
(
argv
[
optind
++
]));
if
(
data_type
!=
0
&&
data_type
!=
1
&&
data_type
!=
5
&&
data_type
!=
6
)
return
-
1
;
return
0
;
};
};
// end of class AppArgs
static
const
double
epsilon
=
std
::
numeric_limits
<
float
>::
epsilon
();
int
profile_batchnorm_infer
(
int
argc
,
char
*
argv
[])
{
using
ck
::
profiler
::
profile_batchnorm_infer_impl
;
BatchnormInferArgParser
arg_parser
;
if
(
arg_parser
(
argc
,
argv
)
!=
0
)
return
-
1
;
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
BF16
=
ck
::
bhalf_t
;
using
F64
=
double
;
if
(
arg_parser
.
data_type
==
0
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_infer_impl
<
F16
,
F16
,
F32
,
F16
,
F16
,
F32
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
epsilon
);
};
}
else
if
(
arg_parser
.
data_type
==
1
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_infer_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
epsilon
);
};
}
else
if
(
arg_parser
.
data_type
==
5
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_infer_impl
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
epsilon
);
};
}
else
if
(
arg_parser
.
data_type
==
6
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_infer_impl
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
epsilon
);
};
}
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"bnorm_infer"
,
"Batchnorm inference"
,
profile_batchnorm_infer
);
profiler/src/profile_gemm_add_multiply.cpp
0 → 100644
View file @
644df335
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_gemm_add_multiply_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_add_multiply"
#define OP_DESC "GEMM+Add+MULTIPLY"
int
profile_gemm_add_multiply
(
int
argc
,
char
*
argv
[])
{
enum
struct
MatrixLayout
{
MK_KN_MN_MN_MN
,
// 0
MK_NK_MN_MN_MN
,
// 1
KM_KN_MN_MN_MN
,
// 2
KM_NK_MN_MN_MN
,
// 3
};
enum
struct
MatrixDataType
{
F32_F32_F32_F32_F32
,
// 0
F16_F16_F16_F16_F16
,
// 1
BF16_BF16_BF16_BF16_BF16
,
// 2
INT8_INT8_INT8_INT8_INT8
,
// 3
};
if
(
argc
!=
16
)
{
// clang-format off
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)
\n
"
);
printf
(
"arg3: matrix layout (0: E[m, n] = AddMultiply((A[m, k] * B[k, n] + D0[m, n]) x D1[m, n]);
\n
"
);
printf
(
" 1: E[m, n] = AddMultiply((A[m, k] * B[k, n] + D0[m, n]) x D1[m, n]);
\n
"
);
printf
(
" 2: E[m, n] = AddMultiply((A[m, k] * B[k, n] + D0[m, n]) x D1[m, n]);
\n
"
);
printf
(
" 3: E[m, n] = AddMultiply((A[m, k] * B[k, n] + D0[m, n]) x D1[m, n]))
\n
"
);
printf
(
"arg4: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg6: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg7: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg8 to 15: M, N, K, StrideA, StrideB, StrideD0, StrideD1, StrideE
\n
"
);
// clang-format on
exit
(
1
);
}
const
auto
data_type
=
static_cast
<
MatrixDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
layout
=
static_cast
<
MatrixLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
4
]);
const
int
init_method
=
std
::
stoi
(
argv
[
5
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
6
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
7
]);
const
int
M
=
std
::
stoi
(
argv
[
8
]);
const
int
N
=
std
::
stoi
(
argv
[
9
]);
const
int
K
=
std
::
stoi
(
argv
[
10
]);
const
int
StrideA
=
std
::
stoi
(
argv
[
11
]);
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideD0
=
std
::
stoi
(
argv
[
13
]);
const
int
StrideD1
=
std
::
stoi
(
argv
[
14
]);
const
int
StrideE
=
std
::
stoi
(
argv
[
15
]);
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
auto
profile
=
[
&
](
auto
a_type
,
auto
b_type
,
auto
acc_type
,
auto
d0_type
,
auto
d1_type
,
auto
e_type
,
auto
a_layout
,
auto
b_layout
,
auto
d0_layout
,
auto
d1_layout
,
auto
e_layout
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
using
AccDataType
=
decltype
(
acc_type
);
using
D0DataType
=
decltype
(
d0_type
);
using
D1DataType
=
decltype
(
d1_type
);
using
EDataType
=
decltype
(
e_type
);
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
D0Layout
=
decltype
(
d0_layout
);
using
D1Layout
=
decltype
(
d1_layout
);
using
ELayout
=
decltype
(
e_layout
);
const
int
DefaultStrideA
=
ck
::
is_same_v
<
ALayout
,
Row
>
?
K
:
M
;
const
int
DefaultStrideB
=
ck
::
is_same_v
<
BLayout
,
Row
>
?
N
:
K
;
const
int
DefaultStrideD0
=
ck
::
is_same_v
<
D0Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideD1
=
ck
::
is_same_v
<
D1Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
ELayout
,
Row
>
?
N
:
M
;
bool
pass
=
ck
::
profiler
::
profile_gemm_add_multiply_impl
<
ADataType
,
BDataType
,
AccDataType
,
D0DataType
,
D1DataType
,
EDataType
,
ALayout
,
BLayout
,
D0Layout
,
D1Layout
,
ELayout
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
M
,
N
,
K
,
(
StrideA
<
0
)
?
DefaultStrideA
:
StrideA
,
(
StrideB
<
0
)
?
DefaultStrideB
:
StrideB
,
(
StrideD0
<
0
)
?
DefaultStrideD0
:
StrideD0
,
(
StrideD1
<
0
)
?
DefaultStrideD1
:
StrideD1
,
(
StrideE
<
0
)
?
DefaultStrideE
:
StrideE
);
return
pass
?
0
:
1
;
};
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
MK_NK_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Row
{},
Col
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
KM_KN_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Col
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
KM_NK_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Col
{},
Col
{},
Row
{},
Row
{},
Row
{});
}
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
}
REGISTER_PROFILER_OPERATION
(
OP_NAME
,
OP_DESC
,
profile_gemm_add_multiply
);
profiler/src/profile_softmax.cpp
View file @
644df335
...
@@ -99,8 +99,8 @@ int profile_softmax(int argc, char* argv[])
...
@@ -99,8 +99,8 @@ int profile_softmax(int argc, char* argv[])
length
,
length
,
stride
,
stride
,
reduce
,
reduce
,
float
(
alpha
),
double
(
alpha
),
float
(
beta
));
double
(
beta
));
}
}
else
if
(
data_type
==
SoftmaxDataType
::
F32_F32
)
else
if
(
data_type
==
SoftmaxDataType
::
F32_F32
)
{
{
...
@@ -111,8 +111,8 @@ int profile_softmax(int argc, char* argv[])
...
@@ -111,8 +111,8 @@ int profile_softmax(int argc, char* argv[])
length
,
length
,
stride
,
stride
,
reduce
,
reduce
,
float
(
alpha
),
double
(
alpha
),
float
(
beta
));
double
(
beta
));
}
}
else
else
{
{
...
@@ -131,8 +131,8 @@ int profile_softmax(int argc, char* argv[])
...
@@ -131,8 +131,8 @@ int profile_softmax(int argc, char* argv[])
length
,
length
,
stride
,
stride
,
reduce
,
reduce
,
float
(
alpha
),
double
(
alpha
),
float
(
beta
));
double
(
beta
));
}
}
else
if
(
data_type
==
SoftmaxDataType
::
F32_F32
)
else
if
(
data_type
==
SoftmaxDataType
::
F32_F32
)
{
{
...
@@ -143,8 +143,8 @@ int profile_softmax(int argc, char* argv[])
...
@@ -143,8 +143,8 @@ int profile_softmax(int argc, char* argv[])
length
,
length
,
stride
,
stride
,
reduce
,
reduce
,
float
(
alpha
),
double
(
alpha
),
float
(
beta
));
double
(
beta
));
}
}
else
else
{
{
...
...
test/batchnorm/CMakeLists.txt
View file @
644df335
add_gtest_executable
(
test_batchnorm_fwd_rank_4 batchnorm_fwd_rank_4.cpp
)
add_gtest_executable
(
test_batchnorm_fwd_rank_4 batchnorm_fwd_rank_4.cpp
)
add_gtest_executable
(
test_batchnorm_bwd_rank_4 batchnorm_bwd_rank_4.cpp
)
add_gtest_executable
(
test_batchnorm_bwd_rank_4 batchnorm_bwd_rank_4.cpp
)
add_gtest_executable
(
test_batchnorm_infer_rank_4 batchnorm_infer_rank_4.cpp
)
target_link_libraries
(
test_batchnorm_fwd_rank_4 PRIVATE utility device_batchnorm_instance
)
target_link_libraries
(
test_batchnorm_fwd_rank_4 PRIVATE utility device_batchnorm_instance
)
target_link_libraries
(
test_batchnorm_bwd_rank_4 PRIVATE utility device_batchnorm_instance
)
target_link_libraries
(
test_batchnorm_bwd_rank_4 PRIVATE utility device_batchnorm_instance
)
target_link_libraries
(
test_batchnorm_infer_rank_4 PRIVATE utility device_batchnorm_instance
)
test/batchnorm/batchnorm_infer_rank_4.cpp
0 → 100644
View file @
644df335
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <iostream>
#include <initializer_list>
#include <vector>
#include <tuple>
#include <gtest/gtest.h>
#include "profiler/profile_batchnorm_infer_impl.hpp"
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
BF16
=
ck
::
bhalf_t
;
using
F64
=
double
;
template
<
typename
Tuple
>
class
TestBatchNormInferRank4
:
public
::
testing
::
Test
{
private:
const
double
epsilon
=
std
::
numeric_limits
<
float
>::
epsilon
();
protected:
using
XDataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
YDataType
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
using
AccDataType
=
std
::
tuple_element_t
<
2
,
Tuple
>
;
using
ScaleDataType
=
std
::
tuple_element_t
<
3
,
Tuple
>
;
using
BiasDataType
=
std
::
tuple_element_t
<
4
,
Tuple
>
;
using
MeanVarDataType
=
std
::
tuple_element_t
<
5
,
Tuple
>
;
std
::
vector
<
std
::
vector
<
size_t
>>
list_of_lengths
=
{
{
128
,
16
,
3
,
1024
},
{
128
,
16
,
6
,
512
},
{
4
,
4
,
4
,
4
},
{
32
,
32
,
32
,
32
}};
std
::
vector
<
int
>
reduceDims
;
template
<
int
NumReduceDim
>
void
Run
()
{
for
(
auto
&
inOutLengths
:
list_of_lengths
)
{
bool
pass
=
true
;
EXPECT_FALSE
(
reduceDims
.
size
()
!=
NumReduceDim
);
pass
=
pass
&&
ck
::
profiler
::
profile_batchnorm_infer_impl
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
4
,
NumReduceDim
>
(
true
,
3
,
false
,
false
,
inOutLengths
,
reduceDims
,
epsilon
);
pass
=
pass
&&
ck
::
profiler
::
profile_batchnorm_infer_impl
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
4
,
NumReduceDim
>
(
true
,
3
,
false
,
false
,
inOutLengths
,
reduceDims
,
epsilon
);
EXPECT_TRUE
(
pass
);
}
}
};
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
F16
,
F16
,
F32
,
F16
,
F16
,
F32
>
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
,
std
::
tuple
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
>
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>>
;
TYPED_TEST_SUITE
(
TestBatchNormInferRank4
,
KernelTypes
);
// nhwc
TYPED_TEST
(
TestBatchNormInferRank4
,
nhwc
)
{
this
->
reduceDims
=
{
0
,
1
,
2
};
this
->
template
Run
<
3
>();
}
// nchw
TYPED_TEST
(
TestBatchNormInferRank4
,
nchw
)
{
this
->
reduceDims
=
{
0
,
2
,
3
};
this
->
template
Run
<
3
>();
}
test/gemm/CMakeLists.txt
View file @
644df335
...
@@ -18,6 +18,7 @@ add_library(gemm_standalone_xdl_fp16_instances STATIC
...
@@ -18,6 +18,7 @@ add_library(gemm_standalone_xdl_fp16_instances STATIC
instance/gemm_f16_nn_instance.cpp
instance/gemm_f16_nn_instance.cpp
instance/gemm_f16_nt_instance.cpp
instance/gemm_f16_nt_instance.cpp
instance/gemm_f16_tn_instance.cpp
instance/gemm_f16_tn_instance.cpp
instance/gemm_wavelet_f16_tn_instance.cpp
instance/gemm_f16_tt_instance.cpp
instance/gemm_f16_tt_instance.cpp
)
)
add_test_executable
(
test_gemm_standalone_xdl_fp16 gemm_standalone_xdl_fp16.cpp
)
add_test_executable
(
test_gemm_standalone_xdl_fp16 gemm_standalone_xdl_fp16.cpp
)
...
...
test/gemm/gemm_standalone_xdl_fp16.cpp
View file @
644df335
...
@@ -10,6 +10,7 @@
...
@@ -10,6 +10,7 @@
#include "gemm_f16_nt_instance.hpp"
#include "gemm_f16_nt_instance.hpp"
#include "gemm_f16_tn_instance.hpp"
#include "gemm_f16_tn_instance.hpp"
#include "gemm_f16_tt_instance.hpp"
#include "gemm_f16_tt_instance.hpp"
#include "gemm_wavelet_f16_tn_instance.hpp"
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
...
@@ -74,6 +75,10 @@ int main(int argc, char* argv[])
...
@@ -74,6 +75,10 @@ int main(int argc, char* argv[])
{
GemmParams
{
2048
,
1664
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_f16_tn_256x128
},
{
GemmParams
{
2048
,
1664
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_f16_tn_256x128
},
{
GemmParams
{
1024
,
1664
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_f16_tn_128x128
},
{
GemmParams
{
1024
,
1664
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_f16_tn_128x128
},
{
GemmParams
{
1024
,
832
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_f16_tn_128x64
},
{
GemmParams
{
1024
,
832
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_f16_tn_128x64
},
{
GemmParams
{
2048
,
3328
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_wavelet_f16_tn_256x256
},
{
GemmParams
{
2048
,
1664
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_wavelet_f16_tn_256x128
},
{
GemmParams
{
1024
,
1664
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_wavelet_f16_tn_128x128
},
{
GemmParams
{
1024
,
832
,
4096
},
LayoutConfig
{
true
,
false
,
true
},
add_gemm_wavelet_f16_tn_128x64
},
{
GemmParams
{
2048
,
3328
,
4096
},
LayoutConfig
{
true
,
true
,
true
},
add_gemm_f16_tt_256x256
},
{
GemmParams
{
2048
,
3328
,
4096
},
LayoutConfig
{
true
,
true
,
true
},
add_gemm_f16_tt_256x256
},
{
GemmParams
{
2048
,
1664
,
4096
},
LayoutConfig
{
true
,
true
,
true
},
add_gemm_f16_tt_256x128
},
{
GemmParams
{
2048
,
1664
,
4096
},
LayoutConfig
{
true
,
true
,
true
},
add_gemm_f16_tt_256x128
},
{
GemmParams
{
1024
,
1664
,
4096
},
LayoutConfig
{
true
,
true
,
true
},
add_gemm_f16_tt_128x128
},
{
GemmParams
{
1024
,
1664
,
4096
},
LayoutConfig
{
true
,
true
,
true
},
add_gemm_f16_tt_128x128
},
...
...
test/gemm/instance/gemm_wavelet_f16_tn_instance.cpp
0 → 100644
View file @
644df335
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_waveletmodel_cshuffle.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "gemm_wavelet_f16_tn_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
using
gemm_f16_tn_256x256
=
std
::
tuple
<
// clang-format off
//##################### | ALayout| BLayout| CLayout| AData| BData| AccData| CShuffle| CData| A| B| C| GEMM| NumGemmK| ABBlockTransfer| BlockGemm| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//##################### | | | | Type| Type| Type| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| ThreadGroupSize| ThreadGroupSize| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//##################### | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//##################### | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
DeviceGemm_Xdl_WaveletModel_CShuffle
<
Row
,
Col
,
Row
,
F16
,
F16
,
F32
,
F16
,
F16
,
PassThrough
,
PassThrough
,
PassThrough
,
GemmDefault
,
1
,
256
,
256
,
256
,
256
,
32
,
8
,
8
,
32
,
32
,
4
,
4
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
// clang-format on
>
;
using
gemm_f16_tn_256x128
=
std
::
tuple
<
// clang-format off
//##################### | ALayout| BLayout| CLayout| AData| BData| AccData| CShuffle| CData| A| B| C| GEMM| NumGemmK| ABBlockTransfer| BlockGemm| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//##################### | | | | Type| Type| Type| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| ThreadGroupSize| ThreadGroupSize| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//##################### | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//##################### | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
DeviceGemm_Xdl_WaveletModel_CShuffle
<
Row
,
Col
,
Row
,
F16
,
F16
,
F32
,
F16
,
F16
,
PassThrough
,
PassThrough
,
PassThrough
,
GemmDefault
,
1
,
256
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
// clang-format on
>
;
using
gemm_f16_tn_128x128
=
std
::
tuple
<
// clang-format off
//##################### | ALayout| BLayout| CLayout| AData| BData| AccData| CShuffle| CData| A| B| C| GEMM| NumGemmK| ABBlockTransfer| BlockGemm| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//##################### | | | | Type| Type| Type| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| ThreadGroupSize| ThreadGroupSize| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//##################### | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//##################### | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
DeviceGemm_Xdl_WaveletModel_CShuffle
<
Row
,
Col
,
Row
,
F16
,
F16
,
F32
,
F16
,
F16
,
PassThrough
,
PassThrough
,
PassThrough
,
GemmDefault
,
1
,
256
,
256
,
128
,
128
,
32
,
8
,
8
,
32
,
32
,
2
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
// clang-format on
>
;
using
gemm_f16_tn_128x64
=
std
::
tuple
<
// clang-format off
//##################### | ALayout| BLayout| CLayout| AData| BData| AccData| CShuffle| CData| A| B| C| GEMM| NumGemmK| ABBlockTransfer| BlockGemm| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//##################### | | | | Type| Type| Type| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| ThreadGroupSize| ThreadGroupSize| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//##################### | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//##################### | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
DeviceGemm_Xdl_WaveletModel_CShuffle
<
Row
,
Col
,
Row
,
F16
,
F16
,
F32
,
F16
,
F16
,
PassThrough
,
PassThrough
,
PassThrough
,
GemmDefault
,
1
,
256
,
256
,
128
,
64
,
32
,
8
,
8
,
32
,
32
,
2
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
// clang-format on
>
;
void
add_gemm_wavelet_f16_tn_256x256
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
)
{
add_device_operation_instances
(
instances
,
gemm_f16_tn_256x256
{});
}
void
add_gemm_wavelet_f16_tn_256x128
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
)
{
add_device_operation_instances
(
instances
,
gemm_f16_tn_256x128
{});
}
void
add_gemm_wavelet_f16_tn_128x128
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
)
{
add_device_operation_instances
(
instances
,
gemm_f16_tn_128x128
{});
}
void
add_gemm_wavelet_f16_tn_128x64
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
)
{
add_device_operation_instances
(
instances
,
gemm_f16_tn_128x64
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
test/gemm/instance/gemm_wavelet_f16_tn_instance.hpp
0 → 100644
View file @
644df335
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <memory>
#include <vector>
#include "include/ck/tensor_operation/gpu/device/device_base.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_gemm_wavelet_f16_tn_256x256
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
);
void
add_gemm_wavelet_f16_tn_256x128
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
);
void
add_gemm_wavelet_f16_tn_128x128
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
);
void
add_gemm_wavelet_f16_tn_128x64
(
std
::
vector
<
std
::
unique_ptr
<
BaseOperator
>>&
instances
);
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
Prev
1
…
9
10
11
12
13
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment