Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
6252d207
Commit
6252d207
authored
Sep 13, 2024
by
carlushuang
Browse files
Merge remote-tracking branch 'origin/develop' into ck_tile/fav3_fwd_sept
parents
eed60199
e07f1108
Changes
51
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
938 additions
and
3 deletions
+938
-3
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_bf16_instance.cpp
...g_pool2d_bwd/device_avg_pool2d_bwd_nhwc_bf16_instance.cpp
+21
-0
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f16_instance.cpp
...vg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f16_instance.cpp
+21
-0
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f32_instance.cpp
...vg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f32_instance.cpp
+21
-0
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f8_instance.cpp
...avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f8_instance.cpp
+20
-0
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_instance_common.hpp
...pool2d_bwd/device_avg_pool2d_bwd_nhwc_instance_common.hpp
+38
-0
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_int8_instance.cpp
...g_pool2d_bwd/device_avg_pool2d_bwd_nhwc_int8_instance.cpp
+20
-0
library/src/tensor_operation_instance/gpu/max_pool_bwd/CMakeLists.txt
...tensor_operation_instance/gpu/max_pool_bwd/CMakeLists.txt
+2
-1
library/src/tensor_operation_instance/gpu/max_pool_bwd/device_max_pool_bwd_int8_instance.cpp
...ce/gpu/max_pool_bwd/device_max_pool_bwd_int8_instance.cpp
+20
-0
library/src/tensor_operation_instance/gpu/max_pool_bwd/max_pool_bwd_instance_common.hpp
...nstance/gpu/max_pool_bwd/max_pool_bwd_instance_common.hpp
+3
-1
library/src/tensor_operation_instance/gpu/pool2d_fwd/CMakeLists.txt
...c/tensor_operation_instance/gpu/pool2d_fwd/CMakeLists.txt
+8
-0
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_bf16_instance.cpp
...u/pool2d_fwd/device_avg_pool2d_fwd_nhwc_bf16_instance.cpp
+25
-0
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_f16_instance.cpp
...pu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_f16_instance.cpp
+24
-0
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_f32_instance.cpp
...pu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_f32_instance.cpp
+24
-0
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_max_pool2d_fwd_nhwc_bf16_instance.cpp
...u/pool2d_fwd/device_max_pool2d_fwd_nhwc_bf16_instance.cpp
+34
-0
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_max_pool2d_fwd_nhwc_f16_instance.cpp
...pu/pool2d_fwd/device_max_pool2d_fwd_nhwc_f16_instance.cpp
+32
-0
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_max_pool2d_fwd_nhwc_f32_instance.cpp
...pu/pool2d_fwd/device_max_pool2d_fwd_nhwc_f32_instance.cpp
+32
-0
library/src/tensor_operation_instance/gpu/pool2d_fwd/pool2d_fwd_instance_common.hpp
...on_instance/gpu/pool2d_fwd/pool2d_fwd_instance_common.hpp
+41
-0
profiler/include/profiler/data_type_enum.hpp
profiler/include/profiler/data_type_enum.hpp
+2
-1
profiler/include/profiler/profile_avg_pool2d_bwd_impl.hpp
profiler/include/profiler/profile_avg_pool2d_bwd_impl.hpp
+255
-0
profiler/include/profiler/profile_max_pool2d_bwd_impl.hpp
profiler/include/profiler/profile_max_pool2d_bwd_impl.hpp
+295
-0
No files found.
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_bf16_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_avgpool_2D_bwd_nhwc_bf16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceAvgPoolBwd
<
2
,
BF16
,
BF16
,
NHWC
,
NHWC
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_avgpool_2D_bwd_nhwc_instances
<
BF16
,
BF16
,
F32
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f16_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_avgpool_2D_bwd_nhwc_f16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceAvgPoolBwd
<
2
,
F16
,
F16
,
NHWC
,
NHWC
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_avgpool_2D_bwd_nhwc_instances
<
F16
,
F16
,
F32
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f32_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_avgpool_2D_bwd_nhwc_f32_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceAvgPoolBwd
<
2
,
F32
,
F32
,
NHWC
,
NHWC
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_avgpool_2D_bwd_nhwc_instances
<
F32
,
F32
,
F32
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_f8_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_avgpool_2D_bwd_nhwc_f8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceAvgPoolBwd
<
2
,
F8
,
F8
,
NHWC
,
NHWC
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_avgpool_2D_bwd_nhwc_instances
<
F8
,
F8
,
F32
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_instance_common.hpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_avgpool2d_bwd_nhwc_nhwc.hpp"
#include "ck/utility/data_type.hpp"
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
using
F16
=
ck
::
half_t
;
using
BF16
=
ck
::
bhalf_t
;
using
F8
=
ck
::
f8_t
;
using
I8
=
int8_t
;
using
I32
=
int32_t
;
using
F32
=
float
;
using
NHWC
=
ck
::
tensor_layout
::
convolution
::
NHWC
;
template
<
typename
OutType
,
typename
InType
,
typename
ComputeType
>
using
device_avgpool_2D_bwd_nhwc_instances
=
std
::
tuple
<
// clang-format off
DeviceAvgPool2dBwd_NHWC_NHWC
<
OutType
,
InType
,
ComputeType
,
256
,
256
,
1
,
1
,
1
,
1
>
,
DeviceAvgPool2dBwd_NHWC_NHWC
<
OutType
,
InType
,
ComputeType
,
256
,
256
,
1
,
2
,
2
,
2
>
,
DeviceAvgPool2dBwd_NHWC_NHWC
<
OutType
,
InType
,
ComputeType
,
256
,
256
,
1
,
4
,
4
,
4
>
,
DeviceAvgPool2dBwd_NHWC_NHWC
<
OutType
,
InType
,
ComputeType
,
256
,
256
,
1
,
8
,
8
,
8
>
,
DeviceAvgPool2dBwd_NHWC_NHWC
<
OutType
,
InType
,
ComputeType
,
256
,
32
,
8
,
8
,
8
,
8
>
// clang-format on
>
;
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/avg_pool2d_bwd/device_avg_pool2d_bwd_nhwc_int8_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_avgpool_2D_bwd_nhwc_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceAvgPoolBwd
<
2
,
I8
,
I8
,
NHWC
,
NHWC
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_avgpool_2D_bwd_nhwc_instances
<
I8
,
I8
,
I32
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/max_pool_bwd/CMakeLists.txt
View file @
6252d207
set
(
DEVICE_MAXPOOL_BWD_INSTANCES
)
set
(
DEVICE_MAXPOOL_BWD_INSTANCES
)
list
(
APPEND DEVICE_MAXPOOL_BWD_INSTANCES device_max_pool_bwd_f16_instance.cpp
list
(
APPEND DEVICE_MAXPOOL_BWD_INSTANCES device_max_pool_bwd_f16_instance.cpp
device_max_pool_bwd_bf16_instance.cpp
device_max_pool_bwd_bf16_instance.cpp
device_max_pool_bwd_f32_instance.cpp
)
device_max_pool_bwd_f32_instance.cpp
device_max_pool_bwd_int8_instance.cpp
)
add_instance_library
(
device_max_pool_bwd_instance
${
DEVICE_MAXPOOL_BWD_INSTANCES
}
)
add_instance_library
(
device_max_pool_bwd_instance
${
DEVICE_MAXPOOL_BWD_INSTANCES
}
)
library/src/tensor_operation_instance/gpu/max_pool_bwd/device_max_pool_bwd_int8_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "max_pool_bwd_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_maxpool_bwd_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceMaxPoolBwd
<
I8
,
I32
,
I8
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_maxpool_bwd_instances
<
I8
,
I32
,
I8
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/max_pool_bwd/max_pool_bwd_instance_common.hpp
View file @
6252d207
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#pragma once
...
@@ -17,6 +17,8 @@ namespace instance {
...
@@ -17,6 +17,8 @@ namespace instance {
using
I32
=
int32_t
;
using
I32
=
int32_t
;
using
F16
=
ck
::
half_t
;
using
F16
=
ck
::
half_t
;
using
BF16
=
ck
::
bhalf_t
;
using
BF16
=
ck
::
bhalf_t
;
using
I8
=
int8_t
;
using
F8
=
ck
::
f8_t
;
using
F32
=
float
;
using
F32
=
float
;
template
<
typename
DOutDataType
,
typename
IndexDataType
,
typename
DInDataType
>
template
<
typename
DOutDataType
,
typename
IndexDataType
,
typename
DInDataType
>
...
...
library/src/tensor_operation_instance/gpu/pool2d_fwd/CMakeLists.txt
0 → 100644
View file @
6252d207
set
(
DEVICE_POOL2D_FWD_INSTANCES
)
list
(
APPEND DEVICE_POOL2D_FWD_INSTANCES device_avg_pool2d_fwd_nhwc_f16_instance.cpp
device_max_pool2d_fwd_nhwc_f16_instance.cpp
device_avg_pool2d_fwd_nhwc_f32_instance.cpp
device_max_pool2d_fwd_nhwc_f32_instance.cpp
device_avg_pool2d_fwd_nhwc_bf16_instance.cpp
device_max_pool2d_fwd_nhwc_bf16_instance.cpp
)
add_instance_library
(
device_pool2d_fwd_instance
${
DEVICE_POOL2D_FWD_INSTANCES
}
)
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_bf16_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "pool2d_fwd_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
static
constexpr
auto
ReduceOpId
=
ck
::
ReduceTensorOp
::
AVG
;
void
add_device_pool2d_fwd_nhwc_bf16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
BF16
,
BF16
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
false
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
BF16
,
BF16
,
I32
,
F32
,
ReduceOpId
,
false
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_f16_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "pool2d_fwd_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
static
constexpr
auto
ReduceOpId
=
ck
::
ReduceTensorOp
::
AVG
;
void
add_device_pool2d_fwd_nhwc_f16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
F16
,
F16
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
false
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
F16
,
F16
,
I32
,
F32
,
ReduceOpId
,
false
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_avg_pool2d_fwd_nhwc_f32_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "pool2d_fwd_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
static
constexpr
auto
ReduceOpId
=
ck
::
ReduceTensorOp
::
AVG
;
void
add_device_pool2d_fwd_nhwc_f32_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
F32
,
F32
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
false
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
F32
,
F32
,
I32
,
F32
,
ReduceOpId
,
false
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_max_pool2d_fwd_nhwc_bf16_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "pool2d_fwd_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
static
constexpr
auto
ReduceOpId
=
ck
::
ReduceTensorOp
::
MAX
;
void
add_device_pool2d_fwd_nhwc_bf16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
BF16
,
BF16
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
false
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
BF16
,
BF16
,
I32
,
F32
,
ReduceOpId
,
false
>
{});
}
void
add_device_pool2d_fwd_nhwc_index_bf16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
BF16
,
BF16
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
true
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
BF16
,
BF16
,
I32
,
F32
,
ReduceOpId
,
true
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_max_pool2d_fwd_nhwc_f16_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "pool2d_fwd_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
static
constexpr
auto
ReduceOpId
=
ck
::
ReduceTensorOp
::
MAX
;
void
add_device_pool2d_fwd_nhwc_f16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
F16
,
F16
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
false
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
F16
,
F16
,
I32
,
F32
,
ReduceOpId
,
false
>
{});
}
void
add_device_pool2d_fwd_nhwc_index_f16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
F16
,
F16
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
true
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
F16
,
F16
,
I32
,
F32
,
ReduceOpId
,
true
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/pool2d_fwd/device_max_pool2d_fwd_nhwc_f32_instance.cpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "pool2d_fwd_instance_common.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
static
constexpr
auto
ReduceOpId
=
ck
::
ReduceTensorOp
::
MAX
;
void
add_device_pool2d_fwd_nhwc_f32_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
F32
,
F32
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
false
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
F32
,
F32
,
I32
,
F32
,
ReduceOpId
,
false
>
{});
}
void
add_device_pool2d_fwd_nhwc_index_f32_instances
(
std
::
vector
<
std
::
unique_ptr
<
DevicePoolFwd
<
4
,
2
,
F32
,
F32
,
I32
,
NHWC
,
NHWC
,
ReduceOpId
,
true
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_pool2d_fwd_nhwc_instances
<
F32
,
F32
,
I32
,
F32
,
ReduceOpId
,
true
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/pool2d_fwd/pool2d_fwd_instance_common.hpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_pool2d_fwd_nhwc_nhwc.hpp"
#include "ck/utility/data_type.hpp"
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
using
I32
=
int32_t
;
using
F16
=
ck
::
half_t
;
using
BF16
=
ck
::
bhalf_t
;
using
F32
=
float
;
using
NHWC
=
ck
::
tensor_layout
::
convolution
::
NHWC
;
template
<
typename
InDataType
,
typename
OutDataType
,
typename
IndexDataType
,
typename
ComputeDataType
,
ReduceTensorOp
ReduceOpId
,
bool
OutputIndex
>
using
device_pool2d_fwd_nhwc_instances
=
// clang-format off
std
::
tuple
<
DevicePool2dFwd_NHWC_NHWC
<
InDataType
,
OutDataType
,
IndexDataType
,
ComputeDataType
,
ReduceOpId
,
OutputIndex
,
256
,
256
,
1
,
1
,
1
,
1
>
,
DevicePool2dFwd_NHWC_NHWC
<
InDataType
,
OutDataType
,
IndexDataType
,
ComputeDataType
,
ReduceOpId
,
OutputIndex
,
256
,
256
,
1
,
2
,
1
,
2
>
,
DevicePool2dFwd_NHWC_NHWC
<
InDataType
,
OutDataType
,
IndexDataType
,
ComputeDataType
,
ReduceOpId
,
OutputIndex
,
256
,
256
,
1
,
4
,
1
,
4
>
// clang-format on
>
;
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
profiler/include/profiler/data_type_enum.hpp
View file @
6252d207
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#pragma once
...
@@ -14,6 +14,7 @@ enum struct DataTypeEnum
...
@@ -14,6 +14,7 @@ enum struct DataTypeEnum
Int8x4
=
4
,
Int8x4
=
4
,
BFloat16
=
5
,
BFloat16
=
5
,
Double
=
6
,
Double
=
6
,
Float8
=
7
,
Unknown
=
100
,
Unknown
=
100
,
};
};
...
...
profiler/include/profiler/profile_avg_pool2d_bwd_impl.hpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/avg_pool2d_bwd.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_avgpool_bwd.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
TensorLayout
>
std
::
vector
<
ck
::
index_t
>
f_tensor_strides_nchw
(
ck
::
index_t
N
,
ck
::
index_t
C
,
ck
::
index_t
H
,
ck
::
index_t
W
,
TensorLayout
layout
)
{
using
namespace
ck
::
literals
;
(
void
)
N
;
if
constexpr
(
ck
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
convolution
::
NHWC
>::
value
)
return
{
C
*
H
*
W
,
1
_uz
,
W
*
C
,
C
};
else
throw
std
::
runtime_error
(
"not supported yet"
);
};
template
<
typename
DOutDataType
,
typename
DInDataType
,
typename
DOutLayout
,
typename
DInLayout
>
bool
profile_avg_pool2d_bwd_impl
(
int
do_verification
,
int
init_method
,
bool
do_log
,
bool
time_kernel
,
std
::
vector
<
index_t
>
in_length
,
std
::
vector
<
index_t
>
window_spatial_lengths
,
std
::
vector
<
index_t
>
window_strides
,
std
::
vector
<
index_t
>
window_dilations
,
std
::
vector
<
index_t
>
input_left_pads
,
std
::
vector
<
index_t
>
input_right_pads
)
{
constexpr
index_t
InOutRank
=
4
;
constexpr
index_t
WindowRank
=
2
;
if
(
in_length
.
size
()
!=
InOutRank
||
window_spatial_lengths
.
size
()
!=
WindowRank
||
window_strides
.
size
()
!=
WindowRank
||
window_dilations
.
size
()
!=
WindowRank
||
input_left_pads
.
size
()
!=
WindowRank
||
input_right_pads
.
size
()
!=
WindowRank
)
{
std
::
cout
<<
"Parameter is incorrect"
<<
std
::
endl
;
return
false
;
}
std
::
vector
<
index_t
>
out_length
(
InOutRank
);
const
int
N
=
in_length
[
0
];
const
int
C
=
in_length
[
1
];
out_length
[
0
]
=
N
;
out_length
[
1
]
=
C
;
// Calculate Ho, Wo
for
(
unsigned
i
=
2
;
i
<
InOutRank
;
++
i
)
{
const
int
idx
=
i
-
2
;
auto
pad1
=
input_left_pads
[
idx
];
auto
pad2
=
input_right_pads
[
idx
];
auto
windows_size
=
window_spatial_lengths
[
idx
];
auto
windows_stride
=
window_strides
[
idx
];
auto
windows_dilation
=
window_dilations
[
idx
];
auto
eff
=
(
windows_size
-
1
)
*
windows_dilation
+
1
;
out_length
[
i
]
=
(
in_length
[
i
]
+
pad1
+
pad2
-
eff
)
/
windows_stride
+
1
;
}
const
int
Hi
=
in_length
[
2
];
const
int
Wi
=
in_length
[
3
];
const
int
Ho
=
out_length
[
2
];
const
int
Wo
=
out_length
[
3
];
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
N_
,
std
::
size_t
C_
,
std
::
size_t
H
,
std
::
size_t
W
)
{
using
namespace
ck
::
literals
;
return
HostTensorDescriptor
({
N_
,
C_
,
H
,
W
},
{
C_
*
H
*
W
,
1
_uz
,
W
*
C_
,
C_
});
};
Tensor
<
DOutDataType
>
out_n_c_ho_wo_host
(
f_host_tensor_descriptor
(
N
,
C
,
Ho
,
Wo
));
Tensor
<
DInDataType
>
in_n_c_hi_wi_device
(
f_host_tensor_descriptor
(
N
,
C
,
Hi
,
Wi
));
Tensor
<
DInDataType
>
in_n_c_hi_wi_host
(
f_host_tensor_descriptor
(
N
,
C
,
Hi
,
Wi
));
switch
(
init_method
)
{
case
0
:
{
out_n_c_ho_wo_host
.
GenerateTensorValue
(
GeneratorTensor_1
<
DOutDataType
>
{});
break
;
}
case
1
:
{
out_n_c_ho_wo_host
.
GenerateTensorValue
(
GeneratorTensor_2
<
DOutDataType
>
{
-
5
,
5
});
break
;
}
default:
{
out_n_c_ho_wo_host
.
GenerateTensorValue
(
GeneratorTensor_3
<
DOutDataType
>
{
-
0.5
,
0.5
});
}
}
DeviceMem
dout_device_buf
(
sizeof
(
DOutDataType
)
*
out_n_c_ho_wo_host
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
din_device_buf
(
sizeof
(
DInDataType
)
*
in_n_c_hi_wi_device
.
mDesc
.
GetElementSpaceSize
());
dout_device_buf
.
ToDevice
(
out_n_c_ho_wo_host
.
mData
.
data
());
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceAvgPoolBwd
<
2
,
DOutDataType
,
DInDataType
,
DOutLayout
,
DInLayout
>
;
// get device op instances
const
auto
instance_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
instance_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
std
::
string
best_instance_name
;
float
best_avg_time
=
std
::
numeric_limits
<
float
>::
max
();
float
best_gb_per_sec
=
0
;
if
(
do_verification
)
{
using
ReferencePoolingBwdInstance
=
ck
::
tensor_operation
::
host
::
ReferenceAvgPoolBwd
<
2
,
DInDataType
,
DOutDataType
>
;
ReferencePoolingBwdInstance
ref_pooling_bwd
;
auto
ref_pooling_bwd_argument
=
ref_pooling_bwd
.
MakeArgument
(
in_n_c_hi_wi_host
,
out_n_c_ho_wo_host
,
window_spatial_lengths
,
window_strides
,
window_dilations
,
input_left_pads
,
input_right_pads
);
auto
ref_invoker
=
ref_pooling_bwd
.
MakeInvoker
();
ref_invoker
.
Run
(
ref_pooling_bwd_argument
);
}
int
num_kernel
=
0
;
bool
pass
=
true
;
bool
instance_found
=
false
;
for
(
auto
&
inst_ptr
:
instance_ptrs
)
{
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
static_cast
<
DOutDataType
*>
(
dout_device_buf
.
GetDeviceBuffer
()),
static_cast
<
DInDataType
*>
(
din_device_buf
.
GetDeviceBuffer
()),
{
N
,
C
,
Ho
,
Wo
},
{
N
,
C
,
Hi
,
Wi
},
f_tensor_strides_nchw
(
N
,
C
,
Ho
,
Wo
,
DOutLayout
{}),
f_tensor_strides_nchw
(
N
,
C
,
Hi
,
Wi
,
DInLayout
{}),
window_spatial_lengths
,
window_strides
,
window_dilations
,
input_left_pads
,
input_right_pads
);
if
(
inst_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
++
num_kernel
;
instance_found
=
true
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" skipped due to unsupported argument: "
;
LogRange
(
std
::
cout
<<
"doutput lengths = "
,
out_length
,
", "
)
<<
std
::
endl
;
}
continue
;
}
din_device_buf
.
SetZero
();
auto
invoker_ptr
=
inst_ptr
->
MakeInvokerPointer
();
float
avg_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
num_bytes
=
out_n_c_ho_wo_host
.
mDesc
.
GetElementSize
()
*
sizeof
(
DOutDataType
)
+
in_n_c_hi_wi_device
.
mDesc
.
GetElementSize
()
*
sizeof
(
DInDataType
);
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
if
(
time_kernel
)
{
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s, "
<<
inst_ptr
->
GetTypeString
()
<<
std
::
endl
;
}
if
(
avg_time
<
best_avg_time
)
{
best_instance_name
=
inst_ptr
->
GetTypeString
();
best_avg_time
=
avg_time
;
best_gb_per_sec
=
gb_per_sec
;
}
if
(
do_verification
)
{
din_device_buf
.
FromDevice
(
in_n_c_hi_wi_device
.
mData
.
data
());
bool
local_pass
=
ck
::
utils
::
check_err
(
in_n_c_hi_wi_device
.
mData
,
in_n_c_hi_wi_host
.
mData
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
if
(
do_log
)
{
LogRangeAsType
<
float
>
(
std
::
cout
<<
"in_n_c_hi_wi_device: "
,
in_n_c_hi_wi_device
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"in_n_c_hi_wi_host: "
,
in_n_c_hi_wi_host
.
mData
,
","
)
<<
std
::
endl
;
}
if
(
!
local_pass
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" failed verification: "
;
LogRange
(
std
::
cout
<<
"doutput lengths = ["
,
out_length
,
", "
)
<<
"]."
<<
std
::
endl
;
pass
&=
local_pass
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
"pass"
<<
std
::
endl
;
}
}
}
}
if
(
time_kernel
)
{
LogRange
(
std
::
cout
<<
"length = "
,
out_length
,
","
)
<<
std
::
endl
;
std
::
cout
<<
"best perf = "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_instance_name
<<
std
::
endl
;
}
if
(
num_kernel
==
0
)
{
std
::
cout
<<
"Error: No kernel is applicable"
<<
std
::
endl
;
return
false
;
}
return
pass
&&
instance_found
;
}
}
// namespace profiler
}
// namespace ck
profiler/include/profiler/profile_max_pool2d_bwd_impl.hpp
0 → 100644
View file @
6252d207
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/pool3d_fwd.hpp"
#include "ck/library/tensor_operation_instance/gpu/max_pool_bwd.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_pool_fwd.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_maxpool_bwd.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
InDataType
,
typename
OutDataType
,
typename
IndexDataType
,
typename
DOutDataType
,
typename
DInDataType
,
bool
PropagateNan
>
bool
profile_max_pool2d_bwd_impl
(
int
do_verification
,
int
init_method
,
bool
do_log
,
bool
time_kernel
,
std
::
vector
<
index_t
>
in_length
,
std
::
vector
<
index_t
>
window_spatial_lengths
,
std
::
vector
<
index_t
>
window_strides
,
std
::
vector
<
index_t
>
window_dilations
,
std
::
vector
<
index_t
>
input_left_pads
,
std
::
vector
<
index_t
>
input_right_pads
)
{
// AtomicAdd only support f32 for now. ComputeDataType must be float32
using
ComputeDataType
=
float
;
constexpr
index_t
InOutRank
=
4
;
constexpr
index_t
WindowRank
=
2
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
if
(
in_length
.
size
()
!=
InOutRank
||
window_spatial_lengths
.
size
()
!=
WindowRank
||
window_strides
.
size
()
!=
WindowRank
||
window_dilations
.
size
()
!=
WindowRank
||
input_left_pads
.
size
()
!=
WindowRank
||
input_right_pads
.
size
()
!=
WindowRank
)
{
std
::
cout
<<
"Parameter is incorrect"
<<
std
::
endl
;
return
false
;
}
std
::
vector
<
index_t
>
out_length
(
InOutRank
);
int
N
=
in_length
[
0
];
int
C
=
in_length
[
1
];
out_length
[
0
]
=
N
;
out_length
[
1
]
=
C
;
// Calculate Ho, Wo
for
(
unsigned
i
=
2
;
i
<
InOutRank
;
++
i
)
{
const
int
idx
=
i
-
2
;
auto
pad1
=
input_left_pads
[
idx
];
auto
pad2
=
input_right_pads
[
idx
];
auto
windows_size
=
window_spatial_lengths
[
idx
];
auto
windows_stride
=
window_strides
[
idx
];
auto
windows_dilation
=
window_dilations
[
idx
];
auto
eff
=
(
windows_size
-
1
)
*
windows_dilation
+
1
;
out_length
[
i
]
=
(
in_length
[
i
]
+
pad1
+
pad2
-
eff
)
/
windows_stride
+
1
;
}
int
Hi
=
in_length
[
2
];
int
Wi
=
in_length
[
3
];
int
Ho
=
out_length
[
2
];
int
Wo
=
out_length
[
3
];
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
N_
,
std
::
size_t
C_
,
std
::
size_t
H
,
std
::
size_t
W
)
{
using
namespace
ck
::
literals
;
return
HostTensorDescriptor
({
N_
,
C_
,
H
,
W
},
{
C_
*
H
*
W
,
1
_uz
,
W
*
C_
,
C_
});
};
Tensor
<
InDataType
>
in_n_c_hi_wi
(
f_host_tensor_descriptor
(
N
,
C
,
Hi
,
Wi
));
Tensor
<
OutDataType
>
out_n_c_ho_wo
(
f_host_tensor_descriptor
(
N
,
C
,
Ho
,
Wo
));
Tensor
<
IndexDataType
>
out_indices_n_c_ho_wo
(
f_host_tensor_descriptor
(
N
,
C
,
Ho
,
Wo
));
Tensor
<
DOutDataType
>
dout_n_c_ho_wo
(
f_host_tensor_descriptor
(
N
,
C
,
Ho
,
Wo
));
Tensor
<
DInDataType
>
din_n_c_hi_wi_host
(
f_host_tensor_descriptor
(
N
,
C
,
Hi
,
Wi
));
Tensor
<
DInDataType
>
din_n_c_hi_wi_device
(
f_host_tensor_descriptor
(
N
,
C
,
Hi
,
Wi
));
switch
(
init_method
)
{
case
0
:
{
in_n_c_hi_wi
.
GenerateTensorValue
(
GeneratorTensor_1
<
InDataType
>
{});
dout_n_c_ho_wo
.
GenerateTensorValue
(
GeneratorTensor_1
<
DOutDataType
>
{});
break
;
}
case
1
:
{
in_n_c_hi_wi
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
-
5
,
5
});
dout_n_c_ho_wo
.
GenerateTensorValue
(
GeneratorTensor_2
<
DOutDataType
>
{
-
5
,
5
});
break
;
}
default:
{
in_n_c_hi_wi
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
-
0.5
,
0.5
});
dout_n_c_ho_wo
.
GenerateTensorValue
(
GeneratorTensor_3
<
DOutDataType
>
{
-
0.5
,
0.5
});
}
}
DeviceMem
indices_device_buf
(
sizeof
(
IndexDataType
)
*
out_indices_n_c_ho_wo
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
dout_device_buf
(
sizeof
(
DOutDataType
)
*
dout_n_c_ho_wo
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
din_device_buf
(
sizeof
(
DInDataType
)
*
din_n_c_hi_wi_device
.
mDesc
.
GetElementSpaceSize
());
// Generate index data from forwarding
{
using
ReferencePoolingFwdInstance
=
ck
::
tensor_operation
::
host
::
ReferencePoolingFwd
<
InOutRank
,
WindowRank
,
InDataType
,
OutDataType
,
ComputeDataType
,
IndexDataType
,
ck
::
ReduceTensorOp
::
MAX
,
false
,
true
>
;
ReferencePoolingFwdInstance
ref_pooling_fwd
;
auto
ref_pooling_fwd_argument
=
ref_pooling_fwd
.
MakeArgument
(
in_n_c_hi_wi
,
out_n_c_ho_wo
,
out_indices_n_c_ho_wo
,
window_spatial_lengths
,
window_strides
,
window_dilations
,
input_left_pads
,
input_right_pads
);
auto
ref_pooling_fwd_invoker
=
ref_pooling_fwd
.
MakeInvoker
();
ref_pooling_fwd_invoker
.
Run
(
ref_pooling_fwd_argument
);
}
indices_device_buf
.
ToDevice
(
out_indices_n_c_ho_wo
.
mData
.
data
());
dout_device_buf
.
ToDevice
(
dout_n_c_ho_wo
.
mData
.
data
());
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceMaxPoolBwd
<
DOutDataType
,
IndexDataType
,
DInDataType
>
;
// get device op instances
const
auto
instance_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
instance_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
std
::
string
best_instance_name
;
float
best_avg_time
=
std
::
numeric_limits
<
float
>::
max
();
float
best_gb_per_sec
=
0
;
if
(
do_verification
)
{
using
ReferencePoolingBwdInstance
=
ck
::
tensor_operation
::
host
::
ReferenceMaxPoolBwd
<
DOutDataType
,
IndexDataType
,
ComputeDataType
,
DInDataType
,
PassThrough
>
;
ReferencePoolingBwdInstance
ref_pooling_bwd
;
auto
ref_pooling_bwd_argument
=
ref_pooling_bwd
.
MakeArgument
(
dout_n_c_ho_wo
,
out_indices_n_c_ho_wo
,
din_n_c_hi_wi_host
,
PassThrough
{});
auto
ref_invoker
=
ref_pooling_bwd
.
MakeInvoker
();
ref_invoker
.
Run
(
ref_pooling_bwd_argument
);
}
int
num_kernel
=
0
;
bool
pass
=
true
;
bool
instance_found
=
false
;
for
(
auto
&
inst_ptr
:
instance_ptrs
)
{
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
static_cast
<
DOutDataType
*>
(
dout_device_buf
.
GetDeviceBuffer
()),
static_cast
<
IndexDataType
*>
(
indices_device_buf
.
GetDeviceBuffer
()),
static_cast
<
DInDataType
*>
(
din_device_buf
.
GetDeviceBuffer
()),
dout_n_c_ho_wo
.
mDesc
.
GetElementSpaceSize
(),
din_n_c_hi_wi_device
.
mDesc
.
GetElementSpaceSize
(),
window_spatial_lengths
,
window_strides
,
window_dilations
);
if
(
inst_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
++
num_kernel
;
instance_found
=
true
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" skipped due to unsupported argument: "
;
LogRange
(
std
::
cout
<<
"doutput lengths = "
,
out_length
,
", "
)
<<
std
::
endl
;
}
continue
;
}
size_t
workspace_sz
=
inst_ptr
->
GetWorkSpaceSize
(
argument_ptr
.
get
());
DeviceMem
workspace_device_buf
(
workspace_sz
);
inst_ptr
->
SetWorkSpacePointer
(
argument_ptr
.
get
(),
workspace_device_buf
.
GetDeviceBuffer
());
auto
invoker_ptr
=
inst_ptr
->
MakeInvokerPointer
();
float
avg_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
num_bytes
=
dout_n_c_ho_wo
.
mDesc
.
GetElementSize
()
*
sizeof
(
DOutDataType
)
+
out_indices_n_c_ho_wo
.
mDesc
.
GetElementSize
()
*
sizeof
(
IndexDataType
)
+
din_n_c_hi_wi_device
.
mDesc
.
GetElementSize
()
*
sizeof
(
DInDataType
);
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
if
(
time_kernel
)
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s, "
<<
inst_ptr
->
GetTypeString
()
<<
std
::
endl
;
if
(
avg_time
<
best_avg_time
)
{
best_instance_name
=
inst_ptr
->
GetTypeString
();
best_avg_time
=
avg_time
;
best_gb_per_sec
=
gb_per_sec
;
}
if
(
do_verification
)
{
din_device_buf
.
FromDevice
(
din_n_c_hi_wi_device
.
mData
.
data
());
bool
local_pass
=
ck
::
utils
::
check_err
(
din_n_c_hi_wi_device
.
mData
,
din_n_c_hi_wi_host
.
mData
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
if
(
do_log
)
{
LogRangeAsType
<
float
>
(
std
::
cout
<<
"out_indices_n_c_ho_wo: "
,
out_indices_n_c_ho_wo
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"din_n_c_hi_wi_device: "
,
din_n_c_hi_wi_device
.
mData
,
","
)
<<
std
::
endl
;
LogRangeAsType
<
float
>
(
std
::
cout
<<
"din_n_c_hi_wi_host: "
,
din_n_c_hi_wi_host
.
mData
,
","
)
<<
std
::
endl
;
}
if
(
!
local_pass
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" failed verification: "
;
LogRange
(
std
::
cout
<<
"doutput lengths = ["
,
out_length
,
", "
)
<<
"]."
<<
std
::
endl
;
pass
&=
local_pass
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
"pass"
<<
std
::
endl
;
}
}
}
}
if
(
time_kernel
)
{
LogRange
(
std
::
cout
<<
"length = "
,
out_length
,
","
)
<<
std
::
endl
;
std
::
cout
<<
"best perf = "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_instance_name
<<
std
::
endl
;
}
if
(
num_kernel
==
0
)
{
std
::
cout
<<
"Error: No kernel is applicable"
<<
std
::
endl
;
return
false
;
}
return
pass
&&
instance_found
;
}
}
// namespace profiler
}
// namespace ck
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment