Unverified Commit 448c0f56 authored by Mateusz Ozga's avatar Mateusz Ozga Committed by GitHub
Browse files

Pool2d max/avg kernel in the BWD version (#1494)

* Add pool2d instance BWD AVG

* Add pool2d instance BWD MAX

* Fix: avg review

* Fix review: part2

* Fix - enable test when type is compiled

* Fix review part3
parent e8d2887c
...@@ -355,12 +355,39 @@ struct UnaryDivide ...@@ -355,12 +355,39 @@ struct UnaryDivide
__host__ __device__ void operator()(T& y, const T& x) const __host__ __device__ void operator()(T& y, const T& x) const
{ {
static_assert(is_same<T, float>::value || is_same<T, double>::value || static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, int32_t>::value, is_same<T, int32_t>::value || is_same<T, int8_t>::value,
"Data type is not supported by this operation!"); "Data type is not supported by this operation!");
y = x / type_convert<T>(divider_); y = x / type_convert<T>(divider_);
}; };
template <>
__host__ __device__ void operator()<half_t>(half_t& y, const half_t& x) const
{
float x_ = type_convert<float>(x);
float divider_f_ = type_convert<float>(divider_);
y = type_convert<half_t>(x_ / divider_f_);
};
template <>
__host__ __device__ void operator()<bhalf_t>(bhalf_t& y, const bhalf_t& x) const
{
float x_ = type_convert<float>(x);
float divider_f_ = type_convert<float>(divider_);
y = type_convert<bhalf_t>(x_ / divider_f_);
};
template <>
__host__ __device__ void operator()<f8_t>(f8_t& y, const f8_t& x) const
{
float x_ = type_convert<float>(x);
float divider_f_ = type_convert<float>(divider_);
y = type_convert<f8_t>(x_ / divider_f_);
};
int32_t divider_ = 1; int32_t divider_ = 1;
}; };
......
...@@ -52,12 +52,28 @@ struct Add ...@@ -52,12 +52,28 @@ struct Add
__host__ __device__ inline constexpr void operator()(T& a, T b) const __host__ __device__ inline constexpr void operator()(T& a, T b) const
{ {
static_assert(is_same<T, float>::value || is_same<T, double>::value || static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, int32_t>::value || is_same<T, half_t>::value, is_same<T, int32_t>::value || is_same<T, int8_t>::value,
"The data type is not supported by the Add accumulator!"); "The data type is not supported by the Add accumulator!");
a = a + b; a = a + b;
} }
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
a = type_convert<f8_t>(a_ + b_);
}
__host__ __device__ inline constexpr void operator()(half_t& a, half_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
a = type_convert<half_t>(a_ + b_);
}
__host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b) const __host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b) const
{ {
float a_ = type_convert<float>(a); float a_ = type_convert<float>(a);
...@@ -112,12 +128,28 @@ struct Mul ...@@ -112,12 +128,28 @@ struct Mul
__host__ __device__ inline constexpr void operator()(T& a, T b) const __host__ __device__ inline constexpr void operator()(T& a, T b) const
{ {
static_assert(is_same<T, float>::value || is_same<T, double>::value || static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, int32_t>::value || is_same<T, half_t>::value, is_same<T, int32_t>::value || is_same<T, int8_t>::value,
"The data type is not supported by the Mul accumulator!"); "The data type is not supported by the Mul accumulator!");
a = a * b; a = a * b;
} }
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
a = type_convert<f8_t>(a_ * b_);
}
__host__ __device__ inline constexpr void operator()(half_t& a, half_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
a = type_convert<half_t>(a_ * b_);
}
__host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b) const __host__ __device__ inline constexpr void operator()(bhalf_t& a, bhalf_t b) const
{ {
float a_ = type_convert<float>(a); float a_ = type_convert<float>(a);
...@@ -137,6 +169,16 @@ struct Max ...@@ -137,6 +169,16 @@ struct Max
float val = NumericLimits<float>::Lowest(); float val = NumericLimits<float>::Lowest();
return type_convert<bhalf_t>(val); return type_convert<bhalf_t>(val);
} }
if constexpr(is_same_v<T, f8_t>)
{
float val = NumericLimits<float>::Lowest();
return type_convert<f8_t>(val);
}
if constexpr(is_same_v<T, half_t>)
{
float val = NumericLimits<float>::Lowest();
return type_convert<half_t>(val);
}
else else
{ {
return NumericLimits<T>::Lowest(); return NumericLimits<T>::Lowest();
...@@ -154,8 +196,7 @@ struct Max ...@@ -154,8 +196,7 @@ struct Max
__host__ __device__ inline constexpr void operator()(T& a, T b) const __host__ __device__ inline constexpr void operator()(T& a, T b) const
{ {
static_assert(is_same<T, float>::value || is_same<T, double>::value || static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, half_t>::value || is_same<T, int32_t>::value || is_same<T, int32_t>::value || is_same<T, int8_t>::value,
is_same<T, int8_t>::value,
"The data type is not supported by the Max accumulator!"); "The data type is not supported by the Max accumulator!");
if(a < b) if(a < b)
...@@ -171,12 +212,29 @@ struct Max ...@@ -171,12 +212,29 @@ struct Max
a = b; a = b;
} }
__host__ __device__ inline constexpr void operator()(half_t& a, half_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ < b_)
a = b;
}
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ < b_)
a = b;
}
template <typename T> template <typename T>
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const __host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
{ {
static_assert(is_same<T, float>::value || is_same<T, double>::value || static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, half_t>::value || is_same<T, int32_t>::value || is_same<T, int32_t>::value || is_same<T, int8_t>::value,
is_same<T, int8_t>::value,
"The data type is not supported by the Max accumulator!"); "The data type is not supported by the Max accumulator!");
if(a < b) if(a < b)
...@@ -197,6 +255,30 @@ struct Max ...@@ -197,6 +255,30 @@ struct Max
changed = true; changed = true;
} }
} }
__host__ __device__ inline constexpr void operator()(half_t& a, half_t b, bool& changed) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ < b_)
{
a = b;
changed = true;
}
}
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b, bool& changed) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ < b_)
{
a = b;
changed = true;
}
}
}; };
struct Min struct Min
...@@ -209,6 +291,16 @@ struct Min ...@@ -209,6 +291,16 @@ struct Min
float val = NumericLimits<float>::Max(); float val = NumericLimits<float>::Max();
return type_convert<bhalf_t>(val); return type_convert<bhalf_t>(val);
} }
else if constexpr(is_same_v<T, half_t>)
{
float val = NumericLimits<float>::Max();
return type_convert<half_t>(val);
}
else if constexpr(is_same_v<T, f8_t>)
{
float val = NumericLimits<float>::Max();
return type_convert<f8_t>(val);
}
else else
{ {
return NumericLimits<T>::Max(); return NumericLimits<T>::Max();
...@@ -227,8 +319,7 @@ struct Min ...@@ -227,8 +319,7 @@ struct Min
__host__ __device__ inline constexpr void operator()(T& a, T b) const __host__ __device__ inline constexpr void operator()(T& a, T b) const
{ {
static_assert(is_same<T, float>::value || is_same<T, double>::value || static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, half_t>::value || is_same<T, int32_t>::value || is_same<T, int32_t>::value || is_same<T, int8_t>::value,
is_same<T, int8_t>::value,
"The data type is not supported by the Min accumulator!"); "The data type is not supported by the Min accumulator!");
if(a > b) if(a > b)
...@@ -244,6 +335,24 @@ struct Min ...@@ -244,6 +335,24 @@ struct Min
a = b; a = b;
} }
__host__ __device__ inline constexpr void operator()(half_t& a, half_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ > b_)
a = b;
}
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ > b_)
a = b;
}
template <typename T> template <typename T>
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const __host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
{ {
...@@ -270,6 +379,30 @@ struct Min ...@@ -270,6 +379,30 @@ struct Min
changed = true; changed = true;
} }
} }
__host__ __device__ inline constexpr void operator()(half_t& a, half_t b, bool& changed) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ > b_)
{
a = b;
changed = true;
}
}
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b, bool& changed) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ > b_)
{
a = b;
changed = true;
}
}
}; };
struct AMax struct AMax
...@@ -299,6 +432,15 @@ struct AMax ...@@ -299,6 +432,15 @@ struct AMax
a = b; a = b;
} }
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ < b_)
a = b;
}
template <typename T> template <typename T>
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const __host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
{ {
...@@ -313,6 +455,18 @@ struct AMax ...@@ -313,6 +455,18 @@ struct AMax
changed = true; changed = true;
} }
} }
__host__ __device__ inline constexpr void operator()(f8_t& a, f8_t b, bool& changed) const
{
float a_ = type_convert<float>(a);
float b_ = type_convert<float>(b);
if(a_ < b_)
{
a = b;
changed = true;
}
}
}; };
template <typename T> template <typename T>
...@@ -352,7 +506,8 @@ struct InMemoryDataOperationSupportedOnDataType<InMemoryDataOperationEnum::Set, ...@@ -352,7 +506,8 @@ struct InMemoryDataOperationSupportedOnDataType<InMemoryDataOperationEnum::Set,
static constexpr bool value = static constexpr bool value =
is_same<DataType, float>::value || is_same<DataType, double>::value || is_same<DataType, float>::value || is_same<DataType, double>::value ||
is_same<DataType, half_t>::value || is_same<DataType, bhalf_t>::value || is_same<DataType, half_t>::value || is_same<DataType, bhalf_t>::value ||
is_same<DataType, int8_t>::value || is_same<DataType, int32_t>::value; is_same<DataType, int8_t>::value || is_same<DataType, int32_t>::value ||
is_same<DataType, f8_t>::value;
}; };
template <typename DataType> template <typename DataType>
...@@ -361,7 +516,7 @@ struct InMemoryDataOperationSupportedOnDataType<InMemoryDataOperationEnum::Add, ...@@ -361,7 +516,7 @@ struct InMemoryDataOperationSupportedOnDataType<InMemoryDataOperationEnum::Add,
static constexpr bool value = static constexpr bool value =
is_same<DataType, float>::value || is_same<DataType, double>::value || is_same<DataType, float>::value || is_same<DataType, double>::value ||
is_same<DataType, half_t>::value || is_same<DataType, int8_t>::value || is_same<DataType, half_t>::value || is_same<DataType, int8_t>::value ||
is_same<DataType, int32_t>::value; is_same<DataType, int32_t>::value || is_same<DataType, f8_t>::value;
}; };
} // namespace reduce } // namespace reduce
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/tensor_operation/gpu/device/device_avgpool_bwd.hpp"
#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
#ifdef CK_ENABLE_BF16
void add_device_avgpool_2D_bwd_nhwc_bf16_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, BF16, BF16, NHWC, NHWC>>>&);
#endif
#ifdef CK_ENABLE_FP16
void add_device_avgpool_2D_bwd_nhwc_f16_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, F16, F16, NHWC, NHWC>>>&);
#endif
#ifdef CK_ENABLE_FP8
void add_device_avgpool_2D_bwd_nhwc_f8_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, F8, F8, NHWC, NHWC>>>&);
#endif
#ifdef CK_ENABLE_FP32
void add_device_avgpool_2D_bwd_nhwc_f32_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, F32, F32, NHWC, NHWC>>>&);
#endif
#ifdef CK_ENABLE_INT8
void add_device_avgpool_2D_bwd_nhwc_int8_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, I8, I8, NHWC, NHWC>>>&);
#endif
template <typename DOutDataType, typename DInDataType, typename InLayout, typename OutLayout>
struct DeviceOperationInstanceFactory<
ck::tensor_operation::device::
DeviceAvgPoolBwd<2, DOutDataType, DInDataType, InLayout, OutLayout>>
{
using DeviceOp = DeviceAvgPoolBwd<2, DOutDataType, DInDataType, InLayout, OutLayout>;
static auto GetInstances()
{
std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
if constexpr(is_same_v<InLayout, NHWC> && is_same_v<OutLayout, NHWC>)
{
#ifdef CK_ENABLE_FP16
if constexpr(is_same_v<DOutDataType, F16> && is_same_v<DInDataType, F16>)
add_device_avgpool_2D_bwd_nhwc_f16_instances(op_ptrs);
#endif
#ifdef CK_ENABLE_BF16
else if constexpr(is_same_v<DOutDataType, BF16> && is_same_v<DInDataType, BF16>)
add_device_avgpool_2D_bwd_nhwc_bf16_instances(op_ptrs);
#endif
#ifdef CK_ENABLE_FP32
else if constexpr(is_same_v<DOutDataType, F32> && is_same_v<DInDataType, F32>)
add_device_avgpool_2D_bwd_nhwc_f32_instances(op_ptrs);
#endif
#ifdef CK_ENABLE_FP8
else if constexpr(is_same_v<DOutDataType, F8> && is_same_v<DInDataType, F8>)
add_device_avgpool_2D_bwd_nhwc_f8_instances(op_ptrs);
#endif
#ifdef CK_ENABLE_INT8
else if constexpr(is_same_v<DOutDataType, I8> && is_same_v<DInDataType, I8>)
add_device_avgpool_2D_bwd_nhwc_int8_instances(op_ptrs);
#endif
}
return op_ptrs;
}
};
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once #pragma once
...@@ -23,6 +23,11 @@ void add_device_maxpool_bwd_bf16_instances( ...@@ -23,6 +23,11 @@ void add_device_maxpool_bwd_bf16_instances(
void add_device_maxpool_bwd_f32_instances( void add_device_maxpool_bwd_f32_instances(
std::vector<std::unique_ptr<DeviceMaxPoolBwd<F32, I32, F32>>>&); std::vector<std::unique_ptr<DeviceMaxPoolBwd<F32, I32, F32>>>&);
#endif #endif
#ifdef CK_ENABLE_INT8
void add_device_maxpool_bwd_int8_instances(
std::vector<std::unique_ptr<DeviceMaxPoolBwd<I8, I32, I8>>>&);
#endif
template <typename DOutDataType, typename IndexDataType, typename DInDataType> template <typename DOutDataType, typename IndexDataType, typename DInDataType>
struct DeviceOperationInstanceFactory< struct DeviceOperationInstanceFactory<
ck::tensor_operation::device::DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>> ck::tensor_operation::device::DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>>
...@@ -32,6 +37,7 @@ struct DeviceOperationInstanceFactory< ...@@ -32,6 +37,7 @@ struct DeviceOperationInstanceFactory<
static auto GetInstances() static auto GetInstances()
{ {
std::vector<std::unique_ptr<DeviceOp>> op_ptrs; std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
#ifdef CK_ENABLE_FP16 #ifdef CK_ENABLE_FP16
if constexpr(is_same_v<DOutDataType, F16> && is_same_v<DInDataType, F16> && if constexpr(is_same_v<DOutDataType, F16> && is_same_v<DInDataType, F16> &&
is_same_v<IndexDataType, I32>) is_same_v<IndexDataType, I32>)
...@@ -47,6 +53,11 @@ struct DeviceOperationInstanceFactory< ...@@ -47,6 +53,11 @@ struct DeviceOperationInstanceFactory<
is_same_v<IndexDataType, I32>) is_same_v<IndexDataType, I32>)
add_device_maxpool_bwd_f32_instances(op_ptrs); add_device_maxpool_bwd_f32_instances(op_ptrs);
#endif #endif
#ifdef CK_ENABLE_INT8
else if constexpr(is_same_v<DOutDataType, I8> && is_same_v<DInDataType, I8> &&
is_same_v<IndexDataType, I32>)
add_device_maxpool_bwd_int8_instances(op_ptrs);
#endif
return op_ptrs; return op_ptrs;
} }
......
set(DEVICE_AVGPOOL_2D_BWD_INSTANCES)
list(APPEND DEVICE_AVGPOOL_2D_BWD_INSTANCES device_avg_pool2d_bwd_nhwc_bf16_instance.cpp
device_avg_pool2d_bwd_nhwc_f16_instance.cpp
device_avg_pool2d_bwd_nhwc_f32_instance.cpp
device_avg_pool2d_bwd_nhwc_f8_instance.cpp
device_avg_pool2d_bwd_nhwc_int8_instance.cpp)
add_instance_library(device_avg_pool2d_bwd_instance ${DEVICE_AVGPOOL_2D_BWD_INSTANCES})
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
void add_device_avgpool_2D_bwd_nhwc_bf16_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, BF16, BF16, NHWC, NHWC>>>& instances)
{
add_device_operation_instances(instances,
device_avgpool_2D_bwd_nhwc_instances<BF16, BF16, F32>{});
}
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
void add_device_avgpool_2D_bwd_nhwc_f16_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, F16, F16, NHWC, NHWC>>>& instances)
{
add_device_operation_instances(instances,
device_avgpool_2D_bwd_nhwc_instances<F16, F16, F32>{});
}
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
void add_device_avgpool_2D_bwd_nhwc_f32_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, F32, F32, NHWC, NHWC>>>& instances)
{
add_device_operation_instances(instances,
device_avgpool_2D_bwd_nhwc_instances<F32, F32, F32>{});
}
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
void add_device_avgpool_2D_bwd_nhwc_f8_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, F8, F8, NHWC, NHWC>>>& instances)
{
add_device_operation_instances(instances, device_avgpool_2D_bwd_nhwc_instances<F8, F8, F32>{});
}
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_avgpool2d_bwd_nhwc_nhwc.hpp"
#include "ck/utility/data_type.hpp"
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
using F16 = ck::half_t;
using BF16 = ck::bhalf_t;
using F8 = ck::f8_t;
using I8 = int8_t;
using I32 = int32_t;
using F32 = float;
using NHWC = ck::tensor_layout::convolution::NHWC;
template <typename OutType, typename InType, typename ComputeType>
using device_avgpool_2D_bwd_nhwc_instances = std::tuple<
// clang-format off
DeviceAvgPool2dBwd_NHWC_NHWC<OutType, InType, ComputeType, 256, 256, 1, 1, 1, 1>,
DeviceAvgPool2dBwd_NHWC_NHWC<OutType, InType, ComputeType, 256, 256, 1, 2, 2, 2>,
DeviceAvgPool2dBwd_NHWC_NHWC<OutType, InType, ComputeType, 256, 256, 1, 4, 4, 4>,
DeviceAvgPool2dBwd_NHWC_NHWC<OutType, InType, ComputeType, 256, 256, 1, 8, 8, 8>,
DeviceAvgPool2dBwd_NHWC_NHWC<OutType, InType, ComputeType, 256, 32, 8, 8, 8, 8>
// clang-format on
>;
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "device_avg_pool2d_bwd_nhwc_instance_common.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
void add_device_avgpool_2D_bwd_nhwc_int8_instances(
std::vector<std::unique_ptr<DeviceAvgPoolBwd<2, I8, I8, NHWC, NHWC>>>& instances)
{
add_device_operation_instances(instances, device_avgpool_2D_bwd_nhwc_instances<I8, I8, I32>{});
}
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
set(DEVICE_MAXPOOL_BWD_INSTANCES) set(DEVICE_MAXPOOL_BWD_INSTANCES)
list(APPEND DEVICE_MAXPOOL_BWD_INSTANCES device_max_pool_bwd_f16_instance.cpp list(APPEND DEVICE_MAXPOOL_BWD_INSTANCES device_max_pool_bwd_f16_instance.cpp
device_max_pool_bwd_bf16_instance.cpp device_max_pool_bwd_bf16_instance.cpp
device_max_pool_bwd_f32_instance.cpp) device_max_pool_bwd_f32_instance.cpp
device_max_pool_bwd_int8_instance.cpp)
add_instance_library(device_max_pool_bwd_instance ${DEVICE_MAXPOOL_BWD_INSTANCES}) add_instance_library(device_max_pool_bwd_instance ${DEVICE_MAXPOOL_BWD_INSTANCES})
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "max_pool_bwd_instance_common.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
void add_device_maxpool_bwd_int8_instances(
std::vector<std::unique_ptr<DeviceMaxPoolBwd<I8, I32, I8>>>& instances)
{
add_device_operation_instances(instances, device_maxpool_bwd_instances<I8, I32, I8>{});
}
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once #pragma once
...@@ -17,6 +17,8 @@ namespace instance { ...@@ -17,6 +17,8 @@ namespace instance {
using I32 = int32_t; using I32 = int32_t;
using F16 = ck::half_t; using F16 = ck::half_t;
using BF16 = ck::bhalf_t; using BF16 = ck::bhalf_t;
using I8 = int8_t;
using F8 = ck::f8_t;
using F32 = float; using F32 = float;
template <typename DOutDataType, typename IndexDataType, typename DInDataType> template <typename DOutDataType, typename IndexDataType, typename DInDataType>
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once #pragma once
...@@ -14,6 +14,7 @@ enum struct DataTypeEnum ...@@ -14,6 +14,7 @@ enum struct DataTypeEnum
Int8x4 = 4, Int8x4 = 4,
BFloat16 = 5, BFloat16 = 5,
Double = 6, Double = 6,
Float8 = 7,
Unknown = 100, Unknown = 100,
}; };
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/avg_pool2d_bwd.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_avgpool_bwd.hpp"
namespace ck {
namespace profiler {
template <typename TensorLayout>
std::vector<ck::index_t> f_tensor_strides_nchw(
ck::index_t N, ck::index_t C, ck::index_t H, ck::index_t W, TensorLayout layout)
{
using namespace ck::literals;
(void)N;
if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NHWC>::value)
return {C * H * W, 1_uz, W * C, C};
else
throw std::runtime_error("not supported yet");
};
template <typename DOutDataType, typename DInDataType, typename DOutLayout, typename DInLayout>
bool profile_avg_pool2d_bwd_impl(int do_verification,
int init_method,
bool do_log,
bool time_kernel,
std::vector<index_t> in_length,
std::vector<index_t> window_spatial_lengths,
std::vector<index_t> window_strides,
std::vector<index_t> window_dilations,
std::vector<index_t> input_left_pads,
std::vector<index_t> input_right_pads)
{
constexpr index_t InOutRank = 4;
constexpr index_t WindowRank = 2;
if(in_length.size() != InOutRank || window_spatial_lengths.size() != WindowRank ||
window_strides.size() != WindowRank || window_dilations.size() != WindowRank ||
input_left_pads.size() != WindowRank || input_right_pads.size() != WindowRank)
{
std::cout << "Parameter is incorrect" << std::endl;
return false;
}
std::vector<index_t> out_length(InOutRank);
const int N = in_length[0];
const int C = in_length[1];
out_length[0] = N;
out_length[1] = C;
// Calculate Ho, Wo
for(unsigned i = 2; i < InOutRank; ++i)
{
const int idx = i - 2;
auto pad1 = input_left_pads[idx];
auto pad2 = input_right_pads[idx];
auto windows_size = window_spatial_lengths[idx];
auto windows_stride = window_strides[idx];
auto windows_dilation = window_dilations[idx];
auto eff = (windows_size - 1) * windows_dilation + 1;
out_length[i] = (in_length[i] + pad1 + pad2 - eff) / windows_stride + 1;
}
const int Hi = in_length[2];
const int Wi = in_length[3];
const int Ho = out_length[2];
const int Wo = out_length[3];
auto f_host_tensor_descriptor =
[](std::size_t N_, std::size_t C_, std::size_t H, std::size_t W) {
using namespace ck::literals;
return HostTensorDescriptor({N_, C_, H, W}, {C_ * H * W, 1_uz, W * C_, C_});
};
Tensor<DOutDataType> out_n_c_ho_wo_host(f_host_tensor_descriptor(N, C, Ho, Wo));
Tensor<DInDataType> in_n_c_hi_wi_device(f_host_tensor_descriptor(N, C, Hi, Wi));
Tensor<DInDataType> in_n_c_hi_wi_host(f_host_tensor_descriptor(N, C, Hi, Wi));
switch(init_method)
{
case 0: {
out_n_c_ho_wo_host.GenerateTensorValue(GeneratorTensor_1<DOutDataType>{});
break;
}
case 1: {
out_n_c_ho_wo_host.GenerateTensorValue(GeneratorTensor_2<DOutDataType>{-5, 5});
break;
}
default: {
out_n_c_ho_wo_host.GenerateTensorValue(GeneratorTensor_3<DOutDataType>{-0.5, 0.5});
}
}
DeviceMem dout_device_buf(sizeof(DOutDataType) *
out_n_c_ho_wo_host.mDesc.GetElementSpaceSize());
DeviceMem din_device_buf(sizeof(DInDataType) * in_n_c_hi_wi_device.mDesc.GetElementSpaceSize());
dout_device_buf.ToDevice(out_n_c_ho_wo_host.mData.data());
using DeviceOp = ck::tensor_operation::device::
DeviceAvgPoolBwd<2, DOutDataType, DInDataType, DOutLayout, DInLayout>;
// get device op instances
const auto instance_ptrs =
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
DeviceOp>::GetInstances();
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
std::string best_instance_name;
float best_avg_time = std::numeric_limits<float>::max();
float best_gb_per_sec = 0;
if(do_verification)
{
using ReferencePoolingBwdInstance =
ck::tensor_operation::host::ReferenceAvgPoolBwd<2, DInDataType, DOutDataType>;
ReferencePoolingBwdInstance ref_pooling_bwd;
auto ref_pooling_bwd_argument = ref_pooling_bwd.MakeArgument(in_n_c_hi_wi_host,
out_n_c_ho_wo_host,
window_spatial_lengths,
window_strides,
window_dilations,
input_left_pads,
input_right_pads);
auto ref_invoker = ref_pooling_bwd.MakeInvoker();
ref_invoker.Run(ref_pooling_bwd_argument);
}
int num_kernel = 0;
bool pass = true;
bool instance_found = false;
for(auto& inst_ptr : instance_ptrs)
{
auto argument_ptr = inst_ptr->MakeArgumentPointer(
static_cast<DOutDataType*>(dout_device_buf.GetDeviceBuffer()),
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
{N, C, Ho, Wo},
{N, C, Hi, Wi},
f_tensor_strides_nchw(N, C, Ho, Wo, DOutLayout{}),
f_tensor_strides_nchw(N, C, Hi, Wi, DInLayout{}),
window_spatial_lengths,
window_strides,
window_dilations,
input_left_pads,
input_right_pads);
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
{
++num_kernel;
instance_found = true;
}
else
{
if(time_kernel)
{
std::cout << inst_ptr->GetTypeString() << " skipped due to unsupported argument: ";
LogRange(std::cout << "doutput lengths = ", out_length, ", ") << std::endl;
}
continue;
}
din_device_buf.SetZero();
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
std::size_t num_bytes = out_n_c_ho_wo_host.mDesc.GetElementSize() * sizeof(DOutDataType) +
in_n_c_hi_wi_device.mDesc.GetElementSize() * sizeof(DInDataType);
float gb_per_sec = num_bytes / 1.E6 / avg_time;
if(time_kernel)
{
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
<< inst_ptr->GetTypeString() << std::endl;
}
if(avg_time < best_avg_time)
{
best_instance_name = inst_ptr->GetTypeString();
best_avg_time = avg_time;
best_gb_per_sec = gb_per_sec;
}
if(do_verification)
{
din_device_buf.FromDevice(in_n_c_hi_wi_device.mData.data());
bool local_pass = ck::utils::check_err(in_n_c_hi_wi_device.mData,
in_n_c_hi_wi_host.mData,
"Error: Incorrect results",
1e-3,
1e-3);
if(do_log)
{
LogRangeAsType<float>(
std::cout << "in_n_c_hi_wi_device: ", in_n_c_hi_wi_device.mData, ",")
<< std::endl;
LogRangeAsType<float>(
std::cout << "in_n_c_hi_wi_host: ", in_n_c_hi_wi_host.mData, ",")
<< std::endl;
}
if(!local_pass)
{
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
LogRange(std::cout << "doutput lengths = [", out_length, ", ") << "]." << std::endl;
pass &= local_pass;
}
else
{
if(time_kernel)
{
std::cout << "pass" << std::endl;
}
}
}
}
if(time_kernel)
{
LogRange(std::cout << "length = ", out_length, ",") << std::endl;
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
<< best_instance_name << std::endl;
}
if(num_kernel == 0)
{
std::cout << "Error: No kernel is applicable" << std::endl;
return false;
}
return pass && instance_found;
}
} // namespace profiler
} // namespace ck
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include "ck/ck.hpp"
#include "ck/library/tensor_operation_instance/gpu/pool3d_fwd.hpp"
#include "ck/library/tensor_operation_instance/gpu/max_pool_bwd.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_pool_fwd.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_maxpool_bwd.hpp"
namespace ck {
namespace profiler {
template <typename InDataType,
typename OutDataType,
typename IndexDataType,
typename DOutDataType,
typename DInDataType,
bool PropagateNan>
bool profile_max_pool2d_bwd_impl(int do_verification,
int init_method,
bool do_log,
bool time_kernel,
std::vector<index_t> in_length,
std::vector<index_t> window_spatial_lengths,
std::vector<index_t> window_strides,
std::vector<index_t> window_dilations,
std::vector<index_t> input_left_pads,
std::vector<index_t> input_right_pads)
{
// AtomicAdd only support f32 for now. ComputeDataType must be float32
using ComputeDataType = float;
constexpr index_t InOutRank = 4;
constexpr index_t WindowRank = 2;
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
if(in_length.size() != InOutRank || window_spatial_lengths.size() != WindowRank ||
window_strides.size() != WindowRank || window_dilations.size() != WindowRank ||
input_left_pads.size() != WindowRank || input_right_pads.size() != WindowRank)
{
std::cout << "Parameter is incorrect" << std::endl;
return false;
}
std::vector<index_t> out_length(InOutRank);
int N = in_length[0];
int C = in_length[1];
out_length[0] = N;
out_length[1] = C;
// Calculate Ho, Wo
for(unsigned i = 2; i < InOutRank; ++i)
{
const int idx = i - 2;
auto pad1 = input_left_pads[idx];
auto pad2 = input_right_pads[idx];
auto windows_size = window_spatial_lengths[idx];
auto windows_stride = window_strides[idx];
auto windows_dilation = window_dilations[idx];
auto eff = (windows_size - 1) * windows_dilation + 1;
out_length[i] = (in_length[i] + pad1 + pad2 - eff) / windows_stride + 1;
}
int Hi = in_length[2];
int Wi = in_length[3];
int Ho = out_length[2];
int Wo = out_length[3];
auto f_host_tensor_descriptor =
[](std::size_t N_, std::size_t C_, std::size_t H, std::size_t W) {
using namespace ck::literals;
return HostTensorDescriptor({N_, C_, H, W}, {C_ * H * W, 1_uz, W * C_, C_});
};
Tensor<InDataType> in_n_c_hi_wi(f_host_tensor_descriptor(N, C, Hi, Wi));
Tensor<OutDataType> out_n_c_ho_wo(f_host_tensor_descriptor(N, C, Ho, Wo));
Tensor<IndexDataType> out_indices_n_c_ho_wo(f_host_tensor_descriptor(N, C, Ho, Wo));
Tensor<DOutDataType> dout_n_c_ho_wo(f_host_tensor_descriptor(N, C, Ho, Wo));
Tensor<DInDataType> din_n_c_hi_wi_host(f_host_tensor_descriptor(N, C, Hi, Wi));
Tensor<DInDataType> din_n_c_hi_wi_device(f_host_tensor_descriptor(N, C, Hi, Wi));
switch(init_method)
{
case 0: {
in_n_c_hi_wi.GenerateTensorValue(GeneratorTensor_1<InDataType>{});
dout_n_c_ho_wo.GenerateTensorValue(GeneratorTensor_1<DOutDataType>{});
break;
}
case 1: {
in_n_c_hi_wi.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
dout_n_c_ho_wo.GenerateTensorValue(GeneratorTensor_2<DOutDataType>{-5, 5});
break;
}
default: {
in_n_c_hi_wi.GenerateTensorValue(GeneratorTensor_3<InDataType>{-0.5, 0.5});
dout_n_c_ho_wo.GenerateTensorValue(GeneratorTensor_3<DOutDataType>{-0.5, 0.5});
}
}
DeviceMem indices_device_buf(sizeof(IndexDataType) *
out_indices_n_c_ho_wo.mDesc.GetElementSpaceSize());
DeviceMem dout_device_buf(sizeof(DOutDataType) * dout_n_c_ho_wo.mDesc.GetElementSpaceSize());
DeviceMem din_device_buf(sizeof(DInDataType) *
din_n_c_hi_wi_device.mDesc.GetElementSpaceSize());
// Generate index data from forwarding
{
using ReferencePoolingFwdInstance =
ck::tensor_operation::host::ReferencePoolingFwd<InOutRank,
WindowRank,
InDataType,
OutDataType,
ComputeDataType,
IndexDataType,
ck::ReduceTensorOp::MAX,
false,
true>;
ReferencePoolingFwdInstance ref_pooling_fwd;
auto ref_pooling_fwd_argument = ref_pooling_fwd.MakeArgument(in_n_c_hi_wi,
out_n_c_ho_wo,
out_indices_n_c_ho_wo,
window_spatial_lengths,
window_strides,
window_dilations,
input_left_pads,
input_right_pads);
auto ref_pooling_fwd_invoker = ref_pooling_fwd.MakeInvoker();
ref_pooling_fwd_invoker.Run(ref_pooling_fwd_argument);
}
indices_device_buf.ToDevice(out_indices_n_c_ho_wo.mData.data());
dout_device_buf.ToDevice(dout_n_c_ho_wo.mData.data());
using DeviceOp =
ck::tensor_operation::device::DeviceMaxPoolBwd<DOutDataType, IndexDataType, DInDataType>;
// get device op instances
const auto instance_ptrs =
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
DeviceOp>::GetInstances();
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
std::string best_instance_name;
float best_avg_time = std::numeric_limits<float>::max();
float best_gb_per_sec = 0;
if(do_verification)
{
using ReferencePoolingBwdInstance =
ck::tensor_operation::host::ReferenceMaxPoolBwd<DOutDataType,
IndexDataType,
ComputeDataType,
DInDataType,
PassThrough>;
ReferencePoolingBwdInstance ref_pooling_bwd;
auto ref_pooling_bwd_argument = ref_pooling_bwd.MakeArgument(
dout_n_c_ho_wo, out_indices_n_c_ho_wo, din_n_c_hi_wi_host, PassThrough{});
auto ref_invoker = ref_pooling_bwd.MakeInvoker();
ref_invoker.Run(ref_pooling_bwd_argument);
}
int num_kernel = 0;
bool pass = true;
bool instance_found = false;
for(auto& inst_ptr : instance_ptrs)
{
auto argument_ptr = inst_ptr->MakeArgumentPointer(
static_cast<DOutDataType*>(dout_device_buf.GetDeviceBuffer()),
static_cast<IndexDataType*>(indices_device_buf.GetDeviceBuffer()),
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
dout_n_c_ho_wo.mDesc.GetElementSpaceSize(),
din_n_c_hi_wi_device.mDesc.GetElementSpaceSize(),
window_spatial_lengths,
window_strides,
window_dilations);
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
{
++num_kernel;
instance_found = true;
}
else
{
if(time_kernel)
{
std::cout << inst_ptr->GetTypeString() << " skipped due to unsupported argument: ";
LogRange(std::cout << "doutput lengths = ", out_length, ", ") << std::endl;
}
continue;
}
size_t workspace_sz = inst_ptr->GetWorkSpaceSize(argument_ptr.get());
DeviceMem workspace_device_buf(workspace_sz);
inst_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_device_buf.GetDeviceBuffer());
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
std::size_t num_bytes =
dout_n_c_ho_wo.mDesc.GetElementSize() * sizeof(DOutDataType) +
out_indices_n_c_ho_wo.mDesc.GetElementSize() * sizeof(IndexDataType) +
din_n_c_hi_wi_device.mDesc.GetElementSize() * sizeof(DInDataType);
float gb_per_sec = num_bytes / 1.E6 / avg_time;
if(time_kernel)
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
<< inst_ptr->GetTypeString() << std::endl;
if(avg_time < best_avg_time)
{
best_instance_name = inst_ptr->GetTypeString();
best_avg_time = avg_time;
best_gb_per_sec = gb_per_sec;
}
if(do_verification)
{
din_device_buf.FromDevice(din_n_c_hi_wi_device.mData.data());
bool local_pass = ck::utils::check_err(din_n_c_hi_wi_device.mData,
din_n_c_hi_wi_host.mData,
"Error: Incorrect results",
1e-3,
1e-3);
if(do_log)
{
LogRangeAsType<float>(
std::cout << "out_indices_n_c_ho_wo: ", out_indices_n_c_ho_wo.mData, ",")
<< std::endl;
LogRangeAsType<float>(
std::cout << "din_n_c_hi_wi_device: ", din_n_c_hi_wi_device.mData, ",")
<< std::endl;
LogRangeAsType<float>(
std::cout << "din_n_c_hi_wi_host: ", din_n_c_hi_wi_host.mData, ",")
<< std::endl;
}
if(!local_pass)
{
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
LogRange(std::cout << "doutput lengths = [", out_length, ", ") << "]." << std::endl;
pass &= local_pass;
}
else
{
if(time_kernel)
{
std::cout << "pass" << std::endl;
}
}
}
}
if(time_kernel)
{
LogRange(std::cout << "length = ", out_length, ",") << std::endl;
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
<< best_instance_name << std::endl;
}
if(num_kernel == 0)
{
std::cout << "Error: No kernel is applicable" << std::endl;
return false;
}
return pass && instance_found;
}
} // namespace profiler
} // namespace ck
...@@ -13,6 +13,8 @@ set(PROFILER_SOURCES ...@@ -13,6 +13,8 @@ set(PROFILER_SOURCES
profile_max_pool3d_fwd.cpp profile_max_pool3d_fwd.cpp
profile_avg_pool3d_bwd.cpp profile_avg_pool3d_bwd.cpp
profile_max_pool3d_bwd.cpp profile_max_pool3d_bwd.cpp
profile_avg_pool2d_bwd.cpp
profile_max_pool2d_bwd.cpp
profile_softmax.cpp profile_softmax.cpp
profile_batchnorm_fwd.cpp profile_batchnorm_fwd.cpp
profile_batchnorm_bwd.cpp profile_batchnorm_bwd.cpp
...@@ -101,6 +103,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_reduce_instance) ...@@ -101,6 +103,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_reduce_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batchnorm_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batchnorm_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_pool2d_fwd_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_pool2d_fwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_pool3d_fwd_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_pool3d_fwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_avg_pool2d_bwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_avg_pool3d_bwd_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_avg_pool3d_bwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_max_pool_bwd_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_max_pool_bwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_image_to_column_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_image_to_column_instance)
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_avg_pool2d_bwd_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t;
struct maxPoolbwdArgParser
{
std::unordered_map<std::string, std::vector<int>> long_opts = {{"length", {}},
{"wsize", {}},
{"wstride", {}},
{"wdilation", {}},
{"pad1", {}},
{"pad2", {}}};
bool parse_opt(int argc, char* argv[], const std::string& key, int i)
{
if(std::string("--") + key == argv[i])
{
int pos = i;
while(++i < argc && argv[i][0] != '-') {}
int end = i;
for(int j = pos + 1; j < end; j++)
{
long_opts[key].push_back(std::stoi(argv[j]));
}
return true;
}
return false;
}
void operator()(int argc, char* argv[])
{
for(auto& kv : long_opts)
{
for(int i = 1; i < argc; i++)
{
if(parse_opt(argc, argv, kv.first, i))
break;
}
}
}
};
void print_help_avg_pool2d_bwd()
{
std::cout << "arg1: data type (0: fp16; 1: fp32; 3: int8; 5: bf16, 7: Float8)\n"
<< "arg2: verification (0: no; 1: yes)\n"
<< "arg3: initialization (0: no init; 1: integer value; 2: decimal value)\n"
<< "arg4: print tensor value (0: no; 1: yes)\n"
<< "arg5: time kernel (0=no, 1=yes)\n"
<< "--length: input tensor length for NCHW(e.g, --length 2 32 30 30) \n"
<< "--wsize: window size for YX (e.g, --wsize 2 2) \n"
<< "--wstride: window stride for HW (e.g, --wstride 2 2) \n"
<< "--wdilation: window dilation for HW (e.g, --wdilation 1 1) \n"
<< "--pad1: left side of padding in HW (e.g, --pad1 1 1) \n"
<< "--pad2: right side of padding in HW (e.g, --pad2 1 1) \n"
<< "eg: ckProfiler avg_pool2d_bwd 0 1 2 0 --length 2 32 30 30 --wsize 2 2 "
"--wstride 2 2 --wdilation 1 1 --pad1 1 1 --pad2 1 1"
<< std::endl;
}
int profile_avg_pool2d_bwd(int argc, char* argv[])
{
ck::DataTypeEnum data_type = ck::DataTypeEnum::Float8;
bool do_verification = true;
int init_method = 2;
bool do_log = false;
bool time_kernel = true;
std::vector<index_t> in_length = {2, 32, 30, 30};
std::vector<index_t> wsize = {2, 2};
std::vector<index_t> wstride = {2, 2};
std::vector<index_t> wdilation = {1, 1};
std::vector<index_t> pad1 = {1, 1};
std::vector<index_t> pad2 = {1, 1};
if(argc != 2 && argc != 33)
{
print_help_avg_pool2d_bwd();
return 0;
}
else if(argc == 33)
{
data_type = static_cast<ck::DataTypeEnum>(std::stoi(argv[2]));
do_verification = std::stoi(argv[3]);
init_method = std::stoi(argv[4]);
do_log = std::stoi(argv[5]);
time_kernel = std::stoi(argv[6]);
maxPoolbwdArgParser arg_parser;
arg_parser(argc, argv);
in_length = arg_parser.long_opts["length"];
wsize = arg_parser.long_opts["wsize"];
wstride = arg_parser.long_opts["wstride"];
wdilation = arg_parser.long_opts["wdilation"];
pad1 = arg_parser.long_opts["pad1"];
pad2 = arg_parser.long_opts["pad2"];
}
using F16 = ck::half_t;
using BF16 = ck::bhalf_t;
using F8 = ck::f8_t;
using F32 = float;
using I8 = int8_t;
using NHWC = ck::tensor_layout::convolution::NHWC;
if(data_type == ck::DataTypeEnum::Half)
{
ck::profiler::profile_avg_pool2d_bwd_impl<F16, F16, NHWC, NHWC>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
else if(data_type == ck::DataTypeEnum::BFloat16)
{
ck::profiler::profile_avg_pool2d_bwd_impl<BF16, BF16, NHWC, NHWC>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
else if(data_type == ck::DataTypeEnum::Float)
{
ck::profiler::profile_avg_pool2d_bwd_impl<F32, F32, NHWC, NHWC>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
else if(data_type == ck::DataTypeEnum::Float8)
{
ck::profiler::profile_avg_pool2d_bwd_impl<F8, F8, NHWC, NHWC>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
else if(data_type == ck::DataTypeEnum::Int8)
{
ck::profiler::profile_avg_pool2d_bwd_impl<I8, I8, NHWC, NHWC>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
else
{
throw std::runtime_error("not implemented yet");
}
return 0;
}
REGISTER_PROFILER_OPERATION("avg_pool2d_bwd", "avg_pool2d bwd", profile_avg_pool2d_bwd);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment