Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
d0f355a3
Commit
d0f355a3
authored
Dec 19, 2023
by
Jun Liu
Browse files
Merge branch 'develop' into amd-develop
parents
55a89c74
b305a29e
Changes
81
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
948 additions
and
18 deletions
+948
-18
library/src/tensor_operation_instance/gpu/permute_scale/CMakeLists.txt
...ensor_operation_instance/gpu/permute_scale/CMakeLists.txt
+2
-0
library/src/tensor_operation_instance/gpu/permute_scale/device_permute_scale_instances.cpp
...ance/gpu/permute_scale/device_permute_scale_instances.cpp
+56
-0
library/src/tensor_operation_instance/gpu/transpose/device_transpose_instances_3d.cpp
..._instance/gpu/transpose/device_transpose_instances_3d.cpp
+0
-8
profiler/include/profiler/profile_groupnorm_bwd_data_impl.hpp
...iler/include/profiler/profile_groupnorm_bwd_data_impl.hpp
+250
-0
profiler/include/profiler/profile_layernorm_bwd_data_impl.hpp
...iler/include/profiler/profile_layernorm_bwd_data_impl.hpp
+255
-0
profiler/src/CMakeLists.txt
profiler/src/CMakeLists.txt
+3
-0
profiler/src/profile_groupnorm_bwd_data.cpp
profiler/src/profile_groupnorm_bwd_data.cpp
+104
-0
profiler/src/profile_groupnorm_fwd.cpp
profiler/src/profile_groupnorm_fwd.cpp
+1
-1
profiler/src/profile_layernorm_bwd_data.cpp
profiler/src/profile_layernorm_bwd_data.cpp
+112
-0
profiler/src/profile_layernorm_fwd.cpp
profiler/src/profile_layernorm_fwd.cpp
+2
-2
test/CMakeLists.txt
test/CMakeLists.txt
+2
-0
test/normalization_bwd_data/CMakeLists.txt
test/normalization_bwd_data/CMakeLists.txt
+13
-0
test/normalization_bwd_data/test_groupnorm_bwd_data_fp32.cpp
test/normalization_bwd_data/test_groupnorm_bwd_data_fp32.cpp
+51
-0
test/normalization_bwd_data/test_layernorm2d_bwd_data_fp32.cpp
...normalization_bwd_data/test_layernorm2d_bwd_data_fp32.cpp
+48
-0
test/normalization_fwd/test_groupnorm_fwd_fp16.cpp
test/normalization_fwd/test_groupnorm_fwd_fp16.cpp
+2
-2
test/normalization_fwd/test_groupnorm_fwd_fp32.cpp
test/normalization_fwd/test_groupnorm_fwd_fp32.cpp
+1
-1
test/normalization_fwd/test_layernorm2d_fwd_fp16.cpp
test/normalization_fwd/test_layernorm2d_fwd_fp16.cpp
+2
-2
test/normalization_fwd/test_layernorm4d_fwd_fp16.cpp
test/normalization_fwd/test_layernorm4d_fwd_fp16.cpp
+2
-2
test/permute_scale/CMakeLists.txt
test/permute_scale/CMakeLists.txt
+6
-0
test/permute_scale/test_permute_scale.cpp
test/permute_scale/test_permute_scale.cpp
+36
-0
No files found.
library/src/tensor_operation_instance/gpu/permute_scale/CMakeLists.txt
0 → 100644
View file @
d0f355a3
add_instance_library
(
device_permute_scale_instance
device_permute_scale_instances.cpp
)
library/src/tensor_operation_instance/gpu/permute_scale/device_permute_scale_instances.cpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_elementwise_scale_impl.hpp"
#include "ck/utility/data_type.hpp"
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Pass
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
UnaryOp
=
ck
::
tensor_operation
::
element_wise
::
UnarySquare
;
using
Scale
=
ck
::
tensor_operation
::
element_wise
::
Scale
;
// clang-format off
using
device_permute_scale_f16_instances
=
std
::
tuple
<
DeviceElementwiseImpl
<
ck
::
Tuple
<
F16
>
,
ck
::
Tuple
<
F16
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
1
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
,
DeviceElementwiseImpl
<
ck
::
Tuple
<
F16
>
,
ck
::
Tuple
<
F16
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
8
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
,
DeviceElementwiseImpl
<
ck
::
Tuple
<
F16
>
,
ck
::
Tuple
<
F16
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
4
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
,
DeviceElementwiseImpl
<
ck
::
Tuple
<
F16
>
,
ck
::
Tuple
<
F16
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
2
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
>
;
using
device_permute_scale_f32_instances
=
std
::
tuple
<
DeviceElementwiseImpl
<
ck
::
Tuple
<
F32
>
,
ck
::
Tuple
<
F32
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
1
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
,
DeviceElementwiseImpl
<
ck
::
Tuple
<
F32
>
,
ck
::
Tuple
<
F32
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
8
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
,
DeviceElementwiseImpl
<
ck
::
Tuple
<
F32
>
,
ck
::
Tuple
<
F32
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
4
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
,
DeviceElementwiseImpl
<
ck
::
Tuple
<
F32
>
,
ck
::
Tuple
<
F32
>
,
Pass
,
UnaryOp
,
Scale
,
4
,
2
,
ck
::
Sequence
<
1
>
,
ck
::
Sequence
<
1
>>
>
;
// clang-format on
void
add_device_permute_scale_f16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceElementwise
<
ck
::
Tuple
<
F16
>
,
ck
::
Tuple
<
F16
>
,
Pass
,
UnaryOp
,
Scale
,
4
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_permute_scale_f16_instances
{});
}
void
add_device_permute_scale_f32_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceElementwise
<
ck
::
Tuple
<
F32
>
,
ck
::
Tuple
<
F32
>
,
Pass
,
UnaryOp
,
Scale
,
4
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_permute_scale_f32_instances
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/transpose/device_transpose_instances_3d.cpp
View file @
d0f355a3
...
@@ -19,22 +19,14 @@ void add_device_transpose_f16_instances(
...
@@ -19,22 +19,14 @@ void add_device_transpose_f16_instances(
std
::
vector
<
std
::
unique_ptr
<
DeviceElementwise
<
ck
::
Tuple
<
F16
>
,
ck
::
Tuple
<
F16
>
,
PassThrough
,
5
>>>&
std
::
vector
<
std
::
unique_ptr
<
DeviceElementwise
<
ck
::
Tuple
<
F16
>
,
ck
::
Tuple
<
F16
>
,
PassThrough
,
5
>>>&
instances
)
instances
)
{
{
#ifdef CK_ENABLE_FP16
add_device_operation_instances
(
instances
,
device_transpose_f16_instances
{});
add_device_operation_instances
(
instances
,
device_transpose_f16_instances
{});
#else
ignore
=
instances
;
#endif
}
}
void
add_device_transpose_f32_instances
(
void
add_device_transpose_f32_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceElementwise
<
ck
::
Tuple
<
F32
>
,
ck
::
Tuple
<
F32
>
,
PassThrough
,
5
>>>&
std
::
vector
<
std
::
unique_ptr
<
DeviceElementwise
<
ck
::
Tuple
<
F32
>
,
ck
::
Tuple
<
F32
>
,
PassThrough
,
5
>>>&
instances
)
instances
)
{
{
#ifdef CK_ENABLE_FP32
add_device_operation_instances
(
instances
,
device_transpose_f32_instances
{});
add_device_operation_instances
(
instances
,
device_transpose_f32_instances
{});
#else
ignore
=
instances
;
#endif
}
}
}
// namespace instance
}
// namespace instance
...
...
profiler/include/profiler/profile_groupnorm_bwd_data_impl.hpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/groupnorm_bwd_data.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_groupnorm_bwd.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
DYDataType
,
typename
XDataType
,
typename
GammaDataType
,
typename
MeanInvStdDataType
,
typename
ComputeDataType
,
typename
DXDataType
>
bool
profile_groupnorm_bwd_data_impl
(
int
do_verification
,
int
init_method
,
bool
do_log
,
bool
time_kernel
,
std
::
vector
<
index_t
>
length
)
{
// we don't need DGamma and DBeta here, just for reference class
using
DGammaDataType
=
DXDataType
;
using
DBetaDataType
=
DXDataType
;
if
(
length
.
size
()
!=
5
)
return
false
;
index_t
N
=
length
[
0
];
index_t
G
=
length
[
3
];
index_t
C
=
length
[
4
];
std
::
vector
<
index_t
>
reduce_dim
=
{
1
,
2
,
4
};
std
::
vector
<
index_t
>
gammaLength
=
{
G
,
C
};
Tensor
<
DYDataType
>
dy
(
length
);
Tensor
<
XDataType
>
x
(
length
);
Tensor
<
GammaDataType
>
gamma
({
G
,
C
});
Tensor
<
MeanInvStdDataType
>
mean
({
N
,
G
});
Tensor
<
MeanInvStdDataType
>
inv_std
({
N
,
G
});
Tensor
<
DXDataType
>
dx
(
length
);
Tensor
<
DXDataType
>
host_dx
(
length
);
Tensor
<
DGammaDataType
>
host_dgamma
({
G
,
C
});
Tensor
<
DBetaDataType
>
host_dbeta
({
G
,
C
});
std
::
vector
<
index_t
>
strideDy
=
std
::
vector
<
ck
::
index_t
>
{
dy
.
mDesc
.
GetStrides
().
begin
(),
dy
.
mDesc
.
GetStrides
().
end
()};
std
::
vector
<
index_t
>
strideX
=
strideDy
;
std
::
vector
<
index_t
>
strideDx
=
strideDy
;
std
::
vector
<
index_t
>
strideGamma
=
{
0
,
0
,
0
,
C
,
1
};
std
::
vector
<
index_t
>
strideMeanInvStd
=
{
G
,
0
,
0
,
1
,
0
};
switch
(
init_method
)
{
case
0
:
dy
.
GenerateTensorValue
(
GeneratorTensor_1
<
DYDataType
>
{});
x
.
GenerateTensorValue
(
GeneratorTensor_1
<
XDataType
>
{});
gamma
.
GenerateTensorValue
(
GeneratorTensor_1
<
GammaDataType
>
{});
mean
.
GenerateTensorValue
(
GeneratorTensor_1
<
MeanInvStdDataType
>
{});
inv_std
.
GenerateTensorValue
(
GeneratorTensor_1
<
MeanInvStdDataType
>
{});
dx
.
GenerateTensorValue
(
GeneratorTensor_1
<
DXDataType
>
{});
break
;
case
1
:
dy
.
GenerateTensorValue
(
GeneratorTensor_2
<
DYDataType
>
{
-
5
,
5
});
x
.
GenerateTensorValue
(
GeneratorTensor_2
<
XDataType
>
{
-
5
,
5
});
gamma
.
GenerateTensorValue
(
GeneratorTensor_2
<
GammaDataType
>
{
-
5
,
5
});
mean
.
GenerateTensorValue
(
GeneratorTensor_2
<
MeanInvStdDataType
>
{
-
5
,
5
});
inv_std
.
GenerateTensorValue
(
GeneratorTensor_2
<
MeanInvStdDataType
>
{
-
5
,
5
});
dx
.
GenerateTensorValue
(
GeneratorTensor_2
<
DXDataType
>
{
-
5
,
5
});
break
;
default:
dy
.
GenerateTensorValue
(
GeneratorTensor_3
<
DYDataType
>
{
0
,
1
});
x
.
GenerateTensorValue
(
GeneratorTensor_3
<
XDataType
>
{
0
,
1
});
gamma
.
GenerateTensorValue
(
GeneratorTensor_3
<
GammaDataType
>
{
-
0.5
,
0.5
});
mean
.
GenerateTensorValue
(
GeneratorTensor_3
<
MeanInvStdDataType
>
{
-
0.5
,
0.5
});
inv_std
.
GenerateTensorValue
(
GeneratorTensor_3
<
MeanInvStdDataType
>
{
-
0.5
,
0.5
});
dx
.
GenerateTensorValue
(
GeneratorTensor_3
<
DXDataType
>
{
-
0.5
,
0.5
});
}
DeviceMem
dy_dev
(
sizeof
(
DYDataType
)
*
dy
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
x_dev
(
sizeof
(
XDataType
)
*
x
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
gamma_dev
(
sizeof
(
GammaDataType
)
*
gamma
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
mean_dev
(
sizeof
(
MeanInvStdDataType
)
*
mean
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
inv_std_dev
(
sizeof
(
MeanInvStdDataType
)
*
inv_std
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
dx_dev
(
sizeof
(
DXDataType
)
*
dx
.
mDesc
.
GetElementSpaceSize
());
dy_dev
.
ToDevice
(
dy
.
mData
.
data
());
x_dev
.
ToDevice
(
x
.
mData
.
data
());
gamma_dev
.
ToDevice
(
gamma
.
mData
.
data
());
mean_dev
.
ToDevice
(
mean
.
mData
.
data
());
inv_std_dev
.
ToDevice
(
inv_std
.
mData
.
data
());
// add device normalization instances
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceNormalizationBwdData
<
DYDataType
,
XDataType
,
GammaDataType
,
MeanInvStdDataType
,
DXDataType
,
5
,
3
>
;
// get device op instances
const
auto
instance_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
instance_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
std
::
string
best_instance_name
;
float
best_avg_time
=
std
::
numeric_limits
<
float
>::
max
();
float
best_gb_per_sec
=
0
;
if
(
do_verification
)
{
using
ReferenceInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGroupnormBwd
<
DYDataType
,
XDataType
,
GammaDataType
,
MeanInvStdDataType
,
DGammaDataType
,
DBetaDataType
,
DXDataType
,
ComputeDataType
>
;
ReferenceInstance
ref
;
auto
ref_argument
=
ref
.
MakeArgument
(
dy
,
x
,
gamma
,
mean
,
inv_std
,
host_dgamma
,
host_dbeta
,
host_dx
,
length
);
auto
ref_invoker
=
ref
.
MakeInvoker
();
ref_invoker
.
Run
(
ref_argument
);
}
int
num_kernel
=
0
;
for
(
auto
&
inst_ptr
:
instance_ptrs
)
{
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
length
,
strideDy
,
strideX
,
strideGamma
,
strideMeanInvStd
,
strideMeanInvStd
,
strideDx
,
reduce_dim
,
dy_dev
.
GetDeviceBuffer
(),
x_dev
.
GetDeviceBuffer
(),
gamma_dev
.
GetDeviceBuffer
(),
mean_dev
.
GetDeviceBuffer
(),
inv_std_dev
.
GetDeviceBuffer
(),
dx_dev
.
GetDeviceBuffer
());
if
(
inst_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
++
num_kernel
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" skipped due to unsupported argument: "
;
LogRange
(
std
::
cout
<<
"input lengths = "
,
length
,
", "
)
<<
std
::
endl
;
}
continue
;
}
size_t
workspace_sz
=
inst_ptr
->
GetWorkSpaceSize
(
argument_ptr
.
get
());
DeviceMem
workspace_dev
(
workspace_sz
);
inst_ptr
->
SetWorkSpacePointer
(
argument_ptr
.
get
(),
workspace_dev
.
GetDeviceBuffer
());
auto
invoker_ptr
=
inst_ptr
->
MakeInvokerPointer
();
float
avg_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
num_bytes
=
dy
.
mDesc
.
GetElementSize
()
*
sizeof
(
DYDataType
)
+
x
.
mDesc
.
GetElementSize
()
*
sizeof
(
XDataType
)
+
gamma
.
mDesc
.
GetElementSize
()
*
sizeof
(
GammaDataType
)
+
mean
.
mDesc
.
GetElementSize
()
*
sizeof
(
MeanInvStdDataType
)
+
inv_std
.
mDesc
.
GetElementSize
()
*
sizeof
(
MeanInvStdDataType
)
+
dx
.
mDesc
.
GetElementSize
()
*
sizeof
(
DXDataType
);
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
if
(
time_kernel
)
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s, "
<<
inst_ptr
->
GetTypeString
()
<<
std
::
endl
;
if
(
avg_time
<
best_avg_time
)
{
best_instance_name
=
inst_ptr
->
GetTypeString
();
best_avg_time
=
avg_time
;
best_gb_per_sec
=
gb_per_sec
;
}
if
(
do_verification
)
{
dx_dev
.
FromDevice
(
dx
.
mData
.
data
());
bool
pass
=
ck
::
utils
::
check_err
(
dx
.
mData
,
host_dx
.
mData
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
if
(
do_log
)
{
LogRangeAsType
<
float
>
(
std
::
cout
<<
"dy : "
,
dy
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"host_dx : "
,
host_dx
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"dx : "
,
dx
.
mData
,
","
)
<<
std
::
endl
;
}
if
(
!
pass
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" failed verification: "
;
LogRange
(
std
::
cout
<<
"lengths = ["
,
length
,
", "
)
<<
"]."
<<
std
::
endl
;
return
false
;
}
else
{
if
(
time_kernel
)
std
::
cout
<<
"pass"
<<
std
::
endl
;
}
}
}
if
(
time_kernel
)
{
LogRange
(
std
::
cout
<<
"length = "
,
length
,
","
)
<<
", "
;
LogRange
(
std
::
cout
<<
"reduce dims "
,
reduce_dim
,
","
)
<<
std
::
endl
;
std
::
cout
<<
"best perf = "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s,"
<<
best_instance_name
<<
std
::
endl
;
}
if
(
num_kernel
==
0
)
{
std
::
cout
<<
"Error: No kernel is applicable"
<<
std
::
endl
;
return
false
;
}
return
true
;
}
}
// namespace profiler
}
// namespace ck
profiler/include/profiler/profile_layernorm_bwd_data_impl.hpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/layernorm_bwd_data.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_layernorm_bwd.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
DYDataType
,
typename
XDataType
,
typename
GammaDataType
,
typename
MeanInvStdDataType
,
typename
ComputeDataType
,
typename
DXDataType
,
index_t
Rank
>
bool
profile_layernorm_bwd_data_impl
(
int
do_verification
,
int
init_method
,
bool
do_log
,
bool
time_kernel
,
std
::
vector
<
index_t
>
length
)
{
// we don't need DGamma and DBeta here, just for reference class
using
DGammaDataType
=
DXDataType
;
using
DBetaDataType
=
DXDataType
;
if
(
length
.
size
()
!=
Rank
||
Rank
<
2
)
return
false
;
// Assume normalize dimension except for batch (first) dimension
std
::
vector
<
index_t
>
reduce_length
{
length
.
begin
()
+
1
,
length
.
end
()};
std
::
vector
<
index_t
>
reduce_dim
;
for
(
int
i
=
1
;
i
<
Rank
;
++
i
)
reduce_dim
.
push_back
(
i
);
Tensor
<
DYDataType
>
dy
(
length
);
Tensor
<
XDataType
>
x
(
length
);
Tensor
<
GammaDataType
>
gamma
(
reduce_length
);
Tensor
<
MeanInvStdDataType
>
mean
({
length
[
0
]});
Tensor
<
MeanInvStdDataType
>
inv_std
({
length
[
0
]});
Tensor
<
DXDataType
>
dx
(
length
);
Tensor
<
DXDataType
>
host_dx
(
length
);
Tensor
<
DGammaDataType
>
host_dgamma
(
reduce_length
);
Tensor
<
DBetaDataType
>
host_dbeta
(
reduce_length
);
std
::
vector
<
index_t
>
strideDy
=
std
::
vector
<
ck
::
index_t
>
{
dy
.
mDesc
.
GetStrides
().
begin
(),
dy
.
mDesc
.
GetStrides
().
end
()};
std
::
vector
<
index_t
>
strideX
=
strideDy
;
std
::
vector
<
index_t
>
strideDx
=
strideDy
;
std
::
vector
<
index_t
>
strideGamma
=
strideDy
;
strideGamma
[
0
]
=
0
;
std
::
vector
<
index_t
>
strideMeanInvStd
{
Rank
,
0
};
strideMeanInvStd
[
0
]
=
1
;
switch
(
init_method
)
{
case
0
:
dy
.
GenerateTensorValue
(
GeneratorTensor_1
<
DYDataType
>
{});
x
.
GenerateTensorValue
(
GeneratorTensor_1
<
XDataType
>
{});
gamma
.
GenerateTensorValue
(
GeneratorTensor_1
<
GammaDataType
>
{});
mean
.
GenerateTensorValue
(
GeneratorTensor_1
<
MeanInvStdDataType
>
{});
inv_std
.
GenerateTensorValue
(
GeneratorTensor_1
<
MeanInvStdDataType
>
{});
dx
.
GenerateTensorValue
(
GeneratorTensor_1
<
DXDataType
>
{});
break
;
case
1
:
dy
.
GenerateTensorValue
(
GeneratorTensor_2
<
DYDataType
>
{
-
5
,
5
});
x
.
GenerateTensorValue
(
GeneratorTensor_2
<
XDataType
>
{
-
5
,
5
});
gamma
.
GenerateTensorValue
(
GeneratorTensor_2
<
GammaDataType
>
{
-
5
,
5
});
mean
.
GenerateTensorValue
(
GeneratorTensor_2
<
MeanInvStdDataType
>
{
-
5
,
5
});
inv_std
.
GenerateTensorValue
(
GeneratorTensor_2
<
MeanInvStdDataType
>
{
-
5
,
5
});
dx
.
GenerateTensorValue
(
GeneratorTensor_2
<
DXDataType
>
{
-
5
,
5
});
break
;
default:
dy
.
GenerateTensorValue
(
GeneratorTensor_3
<
DYDataType
>
{
0
,
1
});
x
.
GenerateTensorValue
(
GeneratorTensor_3
<
XDataType
>
{
0
,
1
});
gamma
.
GenerateTensorValue
(
GeneratorTensor_3
<
GammaDataType
>
{
-
0.5
,
0.5
});
mean
.
GenerateTensorValue
(
GeneratorTensor_3
<
MeanInvStdDataType
>
{
-
0.5
,
0.5
});
inv_std
.
GenerateTensorValue
(
GeneratorTensor_3
<
MeanInvStdDataType
>
{
-
0.5
,
0.5
});
dx
.
GenerateTensorValue
(
GeneratorTensor_3
<
DXDataType
>
{
-
0.5
,
0.5
});
}
DeviceMem
dy_dev
(
sizeof
(
DYDataType
)
*
dy
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
x_dev
(
sizeof
(
XDataType
)
*
x
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
gamma_dev
(
sizeof
(
GammaDataType
)
*
gamma
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
mean_dev
(
sizeof
(
MeanInvStdDataType
)
*
mean
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
inv_std_dev
(
sizeof
(
MeanInvStdDataType
)
*
inv_std
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
dx_dev
(
sizeof
(
DXDataType
)
*
dx
.
mDesc
.
GetElementSpaceSize
());
dy_dev
.
ToDevice
(
dy
.
mData
.
data
());
x_dev
.
ToDevice
(
x
.
mData
.
data
());
gamma_dev
.
ToDevice
(
gamma
.
mData
.
data
());
mean_dev
.
ToDevice
(
mean
.
mData
.
data
());
inv_std_dev
.
ToDevice
(
inv_std
.
mData
.
data
());
constexpr
int
NumReduceDim
=
Rank
-
1
;
// add device normalization instances
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceNormalizationBwdData
<
DYDataType
,
XDataType
,
GammaDataType
,
MeanInvStdDataType
,
DXDataType
,
Rank
,
NumReduceDim
>
;
// get device op instances
const
auto
instance_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
instance_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
std
::
string
best_instance_name
;
float
best_avg_time
=
std
::
numeric_limits
<
float
>::
max
();
float
best_gb_per_sec
=
0
;
if
(
do_verification
)
{
using
ReferenceInstance
=
ck
::
tensor_operation
::
host
::
ReferenceLayernormBwd
<
DYDataType
,
XDataType
,
GammaDataType
,
MeanInvStdDataType
,
DGammaDataType
,
DBetaDataType
,
DXDataType
,
ComputeDataType
>
;
ReferenceInstance
ref
;
auto
ref_argument
=
ref
.
MakeArgument
(
dy
,
x
,
gamma
,
mean
,
inv_std
,
host_dgamma
,
host_dbeta
,
host_dx
,
length
);
auto
ref_invoker
=
ref
.
MakeInvoker
();
ref_invoker
.
Run
(
ref_argument
);
}
int
num_kernel
=
0
;
for
(
auto
&
inst_ptr
:
instance_ptrs
)
{
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
length
,
strideDy
,
strideX
,
strideGamma
,
strideMeanInvStd
,
strideMeanInvStd
,
strideDx
,
reduce_dim
,
dy_dev
.
GetDeviceBuffer
(),
x_dev
.
GetDeviceBuffer
(),
gamma_dev
.
GetDeviceBuffer
(),
mean_dev
.
GetDeviceBuffer
(),
inv_std_dev
.
GetDeviceBuffer
(),
dx_dev
.
GetDeviceBuffer
());
if
(
inst_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
++
num_kernel
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" skipped due to unsupported argument: "
;
LogRange
(
std
::
cout
<<
"input lengths = "
,
length
,
", "
)
<<
std
::
endl
;
}
continue
;
}
size_t
workspace_sz
=
inst_ptr
->
GetWorkSpaceSize
(
argument_ptr
.
get
());
DeviceMem
workspace_dev
(
workspace_sz
);
inst_ptr
->
SetWorkSpacePointer
(
argument_ptr
.
get
(),
workspace_dev
.
GetDeviceBuffer
());
auto
invoker_ptr
=
inst_ptr
->
MakeInvokerPointer
();
float
avg_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
num_bytes
=
dy
.
mDesc
.
GetElementSize
()
*
sizeof
(
DYDataType
)
+
x
.
mDesc
.
GetElementSize
()
*
sizeof
(
XDataType
)
+
gamma
.
mDesc
.
GetElementSize
()
*
sizeof
(
GammaDataType
)
+
mean
.
mDesc
.
GetElementSize
()
*
sizeof
(
MeanInvStdDataType
)
+
inv_std
.
mDesc
.
GetElementSize
()
*
sizeof
(
MeanInvStdDataType
)
+
dx
.
mDesc
.
GetElementSize
()
*
sizeof
(
DXDataType
);
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
if
(
time_kernel
)
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s, "
<<
inst_ptr
->
GetTypeString
()
<<
std
::
endl
;
if
(
avg_time
<
best_avg_time
)
{
best_instance_name
=
inst_ptr
->
GetTypeString
();
best_avg_time
=
avg_time
;
best_gb_per_sec
=
gb_per_sec
;
}
if
(
do_verification
)
{
dx_dev
.
FromDevice
(
dx
.
mData
.
data
());
bool
pass
=
ck
::
utils
::
check_err
(
dx
.
mData
,
host_dx
.
mData
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
if
(
do_log
)
{
LogRangeAsType
<
float
>
(
std
::
cout
<<
"dy : "
,
dy
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"host_dx : "
,
host_dx
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"dx : "
,
dx
.
mData
,
","
)
<<
std
::
endl
;
}
if
(
!
pass
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" failed verification: "
;
LogRange
(
std
::
cout
<<
"lengths = ["
,
length
,
", "
)
<<
"]."
<<
std
::
endl
;
return
false
;
}
else
{
if
(
time_kernel
)
std
::
cout
<<
"pass"
<<
std
::
endl
;
}
}
}
if
(
time_kernel
)
{
LogRange
(
std
::
cout
<<
"length = "
,
length
,
","
)
<<
", "
;
LogRange
(
std
::
cout
<<
"reduce dims "
,
reduce_dim
,
","
)
<<
std
::
endl
;
std
::
cout
<<
"best perf = "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s,"
<<
best_instance_name
<<
std
::
endl
;
}
if
(
num_kernel
==
0
)
{
std
::
cout
<<
"Error: No kernel is applicable"
<<
std
::
endl
;
return
false
;
}
return
true
;
}
}
// namespace profiler
}
// namespace ck
profiler/src/CMakeLists.txt
View file @
d0f355a3
...
@@ -16,7 +16,9 @@ set(PROFILER_SOURCES
...
@@ -16,7 +16,9 @@ set(PROFILER_SOURCES
profile_grouped_conv_fwd.cpp
profile_grouped_conv_fwd.cpp
profile_grouped_conv_bwd_weight.cpp
profile_grouped_conv_bwd_weight.cpp
profile_reduce.cpp
profile_reduce.cpp
profile_groupnorm_bwd_data.cpp
profile_groupnorm_fwd.cpp
profile_groupnorm_fwd.cpp
profile_layernorm_bwd_data.cpp
profile_layernorm_fwd.cpp
profile_layernorm_fwd.cpp
profile_max_pool3d_fwd.cpp
profile_max_pool3d_fwd.cpp
profile_avg_pool3d_bwd.cpp
profile_avg_pool3d_bwd.cpp
...
@@ -78,6 +80,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv3d_bwd_w
...
@@ -78,6 +80,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv3d_bwd_w
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_softmax_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_softmax_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batchnorm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batchnorm_instance
)
...
...
profiler/src/profile_groupnorm_bwd_data.cpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_groupnorm_bwd_data_impl.hpp"
#include "profiler_operation_registry.hpp"
using
ck
::
index_t
;
struct
groupnormBwdDataArgParser
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
int
>>
long_opts
=
{{
"length"
,
{}}};
bool
parse_opt
(
int
argc
,
char
*
argv
[],
const
std
::
string
&
key
,
int
i
)
{
if
(
std
::
string
(
"--"
)
+
key
==
argv
[
i
])
{
int
pos
=
i
;
while
(
++
i
<
argc
&&
argv
[
i
][
0
]
!=
'-'
)
{}
int
end
=
i
;
for
(
int
j
=
pos
+
1
;
j
<
end
;
j
++
)
{
long_opts
[
key
].
push_back
(
std
::
stoi
(
argv
[
j
]));
}
return
true
;
}
return
false
;
}
void
operator
()(
int
argc
,
char
*
argv
[])
{
for
(
auto
&
kv
:
long_opts
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
parse_opt
(
argc
,
argv
,
kv
.
first
,
i
))
break
;
}
}
}
};
void
print_help_groupnorm_bwd_data
()
{
// eg: ckProfiler groupnorm_bwd_data 1 0 2 0 1 --length 1 16 16 32 40
std
::
cout
<<
"arg1: data type (0: fp16; 1: fp32)
\n
"
<<
"arg2: verification (0: no; 1: yes)
\n
"
<<
"arg3: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg4: print tensor value (0: no; 1: yes)
\n
"
<<
"arg5: time kernel (0=no, 1=yes)
\n
"
<<
"--length: tensor extents (e.g, --length 1 16 16 32 40)
\n
"
<<
std
::
endl
;
}
int
profile_groupnorm_bwd_data
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<=
2
)
{
print_help_groupnorm_bwd_data
();
return
0
;
}
groupnormBwdDataArgParser
arg_parser
;
// short unnamed options
const
ck
::
DataTypeEnum
data_type
=
static_cast
<
ck
::
DataTypeEnum
>
(
std
::
stoi
(
argv
[
2
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
3
]);
const
int
init_method
=
std
::
stoi
(
argv
[
4
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
5
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
6
]);
// parse the long options
arg_parser
(
argc
,
argv
);
const
std
::
vector
<
index_t
>
length
=
arg_parser
.
long_opts
[
"length"
];
using
F32
=
float
;
if
(
length
.
size
()
==
5
)
{
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
{
ck
::
profiler
::
profile_groupnorm_bwd_data_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
}
else
{
throw
std
::
runtime_error
(
"length should be 5"
);
}
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"groupnorm_bwd_data"
,
"Group Normalization"
,
profile_groupnorm_bwd_data
);
profiler/src/profile_groupnorm_fwd.cpp
View file @
d0f355a3
...
@@ -98,7 +98,7 @@ int profile_groupnorm(int argc, char* argv[])
...
@@ -98,7 +98,7 @@ int profile_groupnorm(int argc, char* argv[])
}
}
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
{
{
ck
::
profiler
::
profile_groupnorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
32
,
false
>
(
ck
::
profiler
::
profile_groupnorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
16
,
false
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
}
else
else
...
...
profiler/src/profile_layernorm_bwd_data.cpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_layernorm_bwd_data_impl.hpp"
#include "profiler_operation_registry.hpp"
using
ck
::
index_t
;
struct
layernormBwdDataArgParser
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
int
>>
long_opts
=
{{
"length"
,
{}}};
bool
parse_opt
(
int
argc
,
char
*
argv
[],
const
std
::
string
&
key
,
int
i
)
{
if
(
std
::
string
(
"--"
)
+
key
==
argv
[
i
])
{
int
pos
=
i
;
while
(
++
i
<
argc
&&
argv
[
i
][
0
]
!=
'-'
)
{}
int
end
=
i
;
for
(
int
j
=
pos
+
1
;
j
<
end
;
j
++
)
{
long_opts
[
key
].
push_back
(
std
::
stoi
(
argv
[
j
]));
}
return
true
;
}
return
false
;
}
void
operator
()(
int
argc
,
char
*
argv
[])
{
for
(
auto
&
kv
:
long_opts
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
parse_opt
(
argc
,
argv
,
kv
.
first
,
i
))
break
;
}
}
}
};
void
print_help_layernorm_bwd_data
()
{
// eg: ckProfiler layernorm_bwd_data 0 0 2 0 1 --length 1502 4096
std
::
cout
<<
"arg1: data type (0: fp16; 1: fp32)
\n
"
<<
"arg2: verification (0: no; 1: yes)
\n
"
<<
"arg3: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg4: print tensor value (0: no; 1: yes)
\n
"
<<
"arg5: time kernel (0=no, 1=yes)
\n
"
<<
"--length: tensor extents (e.g, --length 1024 1024)
\n
"
<<
std
::
endl
;
}
int
profile_layernorm_bwd_data
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<=
2
)
{
print_help_layernorm_bwd_data
();
return
0
;
}
layernormBwdDataArgParser
arg_parser
;
// short unnamed options
const
ck
::
DataTypeEnum
data_type
=
static_cast
<
ck
::
DataTypeEnum
>
(
std
::
stoi
(
argv
[
2
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
3
]);
const
int
init_method
=
std
::
stoi
(
argv
[
4
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
5
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
6
]);
// parse the long options
arg_parser
(
argc
,
argv
);
const
std
::
vector
<
index_t
>
length
=
arg_parser
.
long_opts
[
"length"
];
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
if
(
length
.
size
()
==
2
)
{
constexpr
int
rank
=
2
;
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
{
ck
::
profiler
::
profile_layernorm_bwd_data_impl
<
F16
,
F16
,
F16
,
F16
,
F32
,
F16
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
{
ck
::
profiler
::
profile_layernorm_bwd_data_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"layernorm_bwd_data"
,
"Layer Normalization"
,
profile_layernorm_bwd_data
);
profiler/src/profile_layernorm_fwd.cpp
View file @
d0f355a3
...
@@ -104,7 +104,7 @@ int profile_layernorm(int argc, char* argv[])
...
@@ -104,7 +104,7 @@ int profile_layernorm(int argc, char* argv[])
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
{
{
ck
::
profiler
::
profile_layernorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
32
,
false
,
rank
>
(
ck
::
profiler
::
profile_layernorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
16
,
false
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
}
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
...
@@ -125,4 +125,4 @@ int profile_layernorm(int argc, char* argv[])
...
@@ -125,4 +125,4 @@ int profile_layernorm(int argc, char* argv[])
return
0
;
return
0
;
}
}
REGISTER_PROFILER_OPERATION
(
"layernorm"
,
"Layer Normalization"
,
profile_layernorm
);
REGISTER_PROFILER_OPERATION
(
"layernorm
_fwd
"
,
"Layer Normalization"
,
profile_layernorm
);
test/CMakeLists.txt
View file @
d0f355a3
...
@@ -140,6 +140,7 @@ add_subdirectory(grouped_convnd_bwd_weight)
...
@@ -140,6 +140,7 @@ add_subdirectory(grouped_convnd_bwd_weight)
add_subdirectory
(
block_to_ctile_map
)
add_subdirectory
(
block_to_ctile_map
)
add_subdirectory
(
softmax
)
add_subdirectory
(
softmax
)
add_subdirectory
(
normalization_fwd
)
add_subdirectory
(
normalization_fwd
)
add_subdirectory
(
normalization_bwd_data
)
add_subdirectory
(
data_type
)
add_subdirectory
(
data_type
)
add_subdirectory
(
elementwise_normalization
)
add_subdirectory
(
elementwise_normalization
)
add_subdirectory
(
batchnorm
)
add_subdirectory
(
batchnorm
)
...
@@ -149,6 +150,7 @@ add_subdirectory(batched_gemm_multi_d)
...
@@ -149,6 +150,7 @@ add_subdirectory(batched_gemm_multi_d)
add_subdirectory
(
grouped_convnd_bwd_data
)
add_subdirectory
(
grouped_convnd_bwd_data
)
add_subdirectory
(
conv_tensor_rearrange
)
add_subdirectory
(
conv_tensor_rearrange
)
add_subdirectory
(
transpose
)
add_subdirectory
(
transpose
)
add_subdirectory
(
permute_scale
)
add_subdirectory
(
wrapper
)
add_subdirectory
(
wrapper
)
if
(
GPU_TARGETS MATCHES
"gfx11"
)
if
(
GPU_TARGETS MATCHES
"gfx11"
)
add_subdirectory
(
wmma_op
)
add_subdirectory
(
wmma_op
)
...
...
test/normalization_bwd_data/CMakeLists.txt
0 → 100644
View file @
d0f355a3
add_custom_target
(
test_normalization_bwd_data
)
add_gtest_executable
(
test_layernorm2d_bwd_data_fp32 test_layernorm2d_bwd_data_fp32.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_layernorm2d_bwd_data_fp32 PRIVATE utility device_normalization_bwd_data_instance
)
add_dependencies
(
test_normalization_bwd_data test_layernorm2d_bwd_data_fp32
)
endif
()
add_gtest_executable
(
test_groupnorm_bwd_data_fp32 test_groupnorm_bwd_data_fp32.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_groupnorm_bwd_data_fp32 PRIVATE utility device_normalization_bwd_data_instance
)
add_dependencies
(
test_normalization_bwd_data test_groupnorm_bwd_data_fp32
)
endif
()
test/normalization_bwd_data/test_groupnorm_bwd_data_fp32.cpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include "gtest/gtest.h"
#include "profiler/profile_groupnorm_bwd_data_impl.hpp"
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
ck
::
index_t
;
template
<
typename
Tuple
>
class
TestgroupnormBwdData
:
public
::
testing
::
Test
{
protected:
using
DYDataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
XDataType
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
using
GammaDataType
=
std
::
tuple_element_t
<
2
,
Tuple
>
;
using
MeanInvStdDataType
=
std
::
tuple_element_t
<
3
,
Tuple
>
;
using
ComputeDataType
=
std
::
tuple_element_t
<
4
,
Tuple
>
;
using
DXDataType
=
std
::
tuple_element_t
<
5
,
Tuple
>
;
void
Run
()
{
// Bwd data: [N, H, W, G, C], reduce H, W, C
std
::
vector
<
std
::
vector
<
ck
::
index_t
>>
lengths
=
{{
1
,
1
,
1
,
1
,
1
},
{
1
,
2
,
3
,
4
,
5
},
{
256
,
9
,
9
,
9
,
9
},
{
1
,
64
,
64
,
32
,
10
},
{
1
,
32
,
32
,
32
,
20
},
{
1
,
16
,
16
,
32
,
40
}};
for
(
auto
length
:
lengths
)
{
bool
success
=
ck
::
profiler
::
profile_groupnorm_bwd_data_impl
<
DYDataType
,
XDataType
,
GammaDataType
,
MeanInvStdDataType
,
ComputeDataType
,
DXDataType
>
(
true
,
2
,
false
,
false
,
length
);
EXPECT_TRUE
(
success
);
}
}
};
using
KernelTypes
=
::
testing
::
Types
<
// DYDataType XDataType, GammaDataType, MeanInvStdDataType, ComputeDataType, DXDataType>
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>>
;
TYPED_TEST_SUITE
(
TestgroupnormBwdData
,
KernelTypes
);
TYPED_TEST
(
TestgroupnormBwdData
,
Test_FP32
)
{
this
->
Run
();
}
test/normalization_bwd_data/test_layernorm2d_bwd_data_fp32.cpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include "gtest/gtest.h"
#include "profiler/profile_layernorm_bwd_data_impl.hpp"
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
ck
::
index_t
;
template
<
typename
Tuple
>
class
TestLayernorm2dBwdData
:
public
::
testing
::
Test
{
protected:
using
DYDataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
XDataType
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
using
GammaDataType
=
std
::
tuple_element_t
<
2
,
Tuple
>
;
using
MeanInvStdDataType
=
std
::
tuple_element_t
<
3
,
Tuple
>
;
using
ComputeDataType
=
std
::
tuple_element_t
<
4
,
Tuple
>
;
using
DXDataType
=
std
::
tuple_element_t
<
5
,
Tuple
>
;
void
Run
()
{
// Bwd data: [N, D], reduce D
std
::
vector
<
std
::
vector
<
ck
::
index_t
>>
lengths
=
{
{
4
,
256
},
{
8
,
511
},
{
9
,
1032
},
{
4
,
2048
},
{
1
,
8192
},
{
4000
,
2000
}};
for
(
auto
length
:
lengths
)
{
bool
success
=
ck
::
profiler
::
profile_layernorm_bwd_data_impl
<
DYDataType
,
XDataType
,
GammaDataType
,
MeanInvStdDataType
,
ComputeDataType
,
DXDataType
,
2
>
(
true
,
2
,
false
,
false
,
length
);
EXPECT_TRUE
(
success
);
}
}
};
using
KernelTypes
=
::
testing
::
Types
<
// DYDataType XDataType, GammaDataType, MeanInvStdDataType, ComputeDataType, DXDataType>
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>>
;
TYPED_TEST_SUITE
(
TestLayernorm2dBwdData
,
KernelTypes
);
TYPED_TEST
(
TestLayernorm2dBwdData
,
Test_FP32
)
{
this
->
Run
();
}
test/normalization_fwd/test_groupnorm_fwd_fp16.cpp
View file @
d0f355a3
...
@@ -47,8 +47,8 @@ class TestGroupnorm : public ::testing::Test
...
@@ -47,8 +47,8 @@ class TestGroupnorm : public ::testing::Test
};
};
using
KernelTypes
=
::
testing
::
Types
<
using
KernelTypes
=
::
testing
::
Types
<
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType
, SaveMeanInvStdDataType
>
std
::
tuple
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
32
>>
;
std
::
tuple
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
16
>>
;
TYPED_TEST_SUITE
(
TestGroupnorm
,
KernelTypes
);
TYPED_TEST_SUITE
(
TestGroupnorm
,
KernelTypes
);
TYPED_TEST
(
TestGroupnorm
,
Test_FP16
)
{
this
->
Run
();
}
TYPED_TEST
(
TestGroupnorm
,
Test_FP16
)
{
this
->
Run
();
}
test/normalization_fwd/test_groupnorm_fwd_fp32.cpp
View file @
d0f355a3
...
@@ -45,7 +45,7 @@ class TestGroupnorm : public ::testing::Test
...
@@ -45,7 +45,7 @@ class TestGroupnorm : public ::testing::Test
};
};
using
KernelTypes
=
::
testing
::
Types
<
using
KernelTypes
=
::
testing
::
Types
<
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType
, SaveMeanInvStdDataType
>
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>>
;
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>>
;
TYPED_TEST_SUITE
(
TestGroupnorm
,
KernelTypes
);
TYPED_TEST_SUITE
(
TestGroupnorm
,
KernelTypes
);
...
...
test/normalization_fwd/test_layernorm2d_fwd_fp16.cpp
View file @
d0f355a3
...
@@ -41,8 +41,8 @@ class TestLayernorm2d : public ::testing::Test
...
@@ -41,8 +41,8 @@ class TestLayernorm2d : public ::testing::Test
};
};
using
KernelTypes
=
::
testing
::
Types
<
using
KernelTypes
=
::
testing
::
Types
<
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType
, SaveMeanInvStdDataType
>
std
::
tuple
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
32
>>
;
std
::
tuple
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
16
>>
;
TYPED_TEST_SUITE
(
TestLayernorm2d
,
KernelTypes
);
TYPED_TEST_SUITE
(
TestLayernorm2d
,
KernelTypes
);
TYPED_TEST
(
TestLayernorm2d
,
Test_FP16
)
{
this
->
Run
();
}
TYPED_TEST
(
TestLayernorm2d
,
Test_FP16
)
{
this
->
Run
();
}
test/normalization_fwd/test_layernorm4d_fwd_fp16.cpp
View file @
d0f355a3
...
@@ -41,8 +41,8 @@ class TestLayernorm4d : public ::testing::Test
...
@@ -41,8 +41,8 @@ class TestLayernorm4d : public ::testing::Test
};
};
using
KernelTypes
=
::
testing
::
Types
<
using
KernelTypes
=
::
testing
::
Types
<
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
// XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType
, SaveMeanInvStdDataType
>
std
::
tuple
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
32
>>
;
std
::
tuple
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
16
>>
;
TYPED_TEST_SUITE
(
TestLayernorm4d
,
KernelTypes
);
TYPED_TEST_SUITE
(
TestLayernorm4d
,
KernelTypes
);
TYPED_TEST
(
TestLayernorm4d
,
Test_FP16
)
{
this
->
Run
();
}
TYPED_TEST
(
TestLayernorm4d
,
Test_FP16
)
{
this
->
Run
();
}
test/permute_scale/CMakeLists.txt
0 → 100644
View file @
d0f355a3
add_custom_target
(
test_permute
)
add_gtest_executable
(
test_permute_scale test_permute_scale.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_permute_scale PRIVATE utility device_permute_scale_instance
)
add_dependencies
(
test_permute test_permute_scale
)
endif
()
test/permute_scale/test_permute_scale.cpp
0 → 100644
View file @
d0f355a3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include "gtest/gtest.h"
#include "test_permute_scale_impl.hpp"
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
ck
::
index_t
;
template
<
typename
Tuple
>
class
TestPermute
:
public
::
testing
::
Test
{
protected:
using
ADataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
BDataType
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
void
Run
()
{
std
::
vector
<
std
::
vector
<
ck
::
index_t
>>
lengths
=
{
{
4
,
2
,
1
,
8
},
{
1
,
1
,
1
,
1
},
{
16
,
8
,
32
,
64
},
{
32
,
64
,
128
,
128
}};
for
(
auto
length
:
lengths
)
{
bool
success
=
ck
::
test_permute_scale_impl
<
ADataType
,
BDataType
,
4
>
(
true
,
2
,
false
,
false
,
length
);
EXPECT_TRUE
(
success
);
}
}
};
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
F16
,
F16
>
,
std
::
tuple
<
F32
,
F32
>>
;
TYPED_TEST_SUITE
(
TestPermute
,
KernelTypes
);
TYPED_TEST
(
TestPermute
,
Test_FP16
)
{
this
->
Run
();
}
TYPED_TEST
(
TestPermute
,
Test_FP32
)
{
this
->
Run
();
}
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment