Commit 03cd2692 authored by Bartlomiej Wroblewski's avatar Bartlomiej Wroblewski
Browse files

Merge remote-tracking branch 'origin/develop' into bwroblew/warp_wise_dpp8

parents bf445c31 f5ec04f0
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_gemm_multiply_add_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_multiply_add"
#define OP_DESC "GEMM+MULTIPLY+ADD"
int profile_gemm_multiply_add(int argc, char* argv[])
{
enum struct MatrixLayout
{
MK_KN_MN_MN_MN, // 0
MK_NK_MN_MN_MN, // 1
};
enum struct MatrixDataType
{
F16_F16_F16_F16_F16, // 0
F16_F8_F32_F32_F16, // 1
};
if(argc != 16)
{
// clang-format off
printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp16; 1: fp16Afp8B)\n");
printf("arg3: matrix layout (0: E[m, n] = Multiply_Add((A[m, k] * B[k, n]) x D1[m, n] + D0[m, n]);\n");
printf(" 1: E[m, n] = Multiply_Add((A[m, k] * B[n, k]) x D1[m, n] + D0[m, n]);\n");
printf("arg4: verification (0: no; 1: yes)\n");
printf("arg5: initialization (0: no init; 1: integer value; 2: decimal value)\n");
printf("arg6: print tensor value (0: no; 1: yes)\n");
printf("arg7: time kernel (0=no, 1=yes)\n");
printf("arg8 to 15: M, N, K, StrideA, StrideB, StrideD0, StrideD1, StrideE\n");
// clang-format on
exit(1);
}
const auto data_type = static_cast<MatrixDataType>(std::stoi(argv[2]));
const auto layout = static_cast<MatrixLayout>(std::stoi(argv[3]));
const bool do_verification = std::stoi(argv[4]);
const int init_method = std::stoi(argv[5]);
const bool do_log = std::stoi(argv[6]);
const bool time_kernel = std::stoi(argv[7]);
const int M = std::stoi(argv[8]);
const int N = std::stoi(argv[9]);
const int K = std::stoi(argv[10]);
const int StrideA = std::stoi(argv[11]);
const int StrideB = std::stoi(argv[12]);
const int StrideD0 = std::stoi(argv[13]);
const int StrideD1 = std::stoi(argv[14]);
const int StrideE = std::stoi(argv[15]);
using F8 = ck::f8_t;
using F16 = ck::half_t;
using F32 = float;
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
auto profile = [&](auto a_type,
auto b_type,
auto acc_type,
auto d0_type,
auto d1_type,
auto e_type,
auto a_layout,
auto b_layout,
auto d0_layout,
auto d1_layout,
auto e_layout) {
using ADataType = decltype(a_type);
using BDataType = decltype(b_type);
using AccDataType = decltype(acc_type);
using D0DataType = decltype(d0_type);
using D1DataType = decltype(d1_type);
using EDataType = decltype(e_type);
using ALayout = decltype(a_layout);
using BLayout = decltype(b_layout);
using D0Layout = decltype(d0_layout);
using D1Layout = decltype(d1_layout);
using ELayout = decltype(e_layout);
const int DefaultStrideA = ck::is_same_v<ALayout, Row> ? K : M;
const int DefaultStrideB = ck::is_same_v<BLayout, Row> ? N : K;
const int DefaultStrideD0 = ck::is_same_v<D0Layout, Row> ? N : M;
const int DefaultStrideD1 = ck::is_same_v<D1Layout, Row> ? N : M;
const int DefaultStrideE = ck::is_same_v<ELayout, Row> ? N : M;
bool pass = ck::profiler::profile_gemm_multiply_add_impl<ADataType,
BDataType,
AccDataType,
D0DataType,
D1DataType,
EDataType,
ALayout,
BLayout,
D0Layout,
D1Layout,
ELayout>(
do_verification,
init_method,
do_log,
time_kernel,
M,
N,
K,
(StrideA < 0) ? DefaultStrideA : StrideA,
(StrideB < 0) ? DefaultStrideB : StrideB,
(StrideD0 < 0) ? DefaultStrideD0 : StrideD0,
(StrideD1 < 0) ? DefaultStrideD1 : StrideD1,
(StrideE < 0) ? DefaultStrideE : StrideE);
return pass ? 0 : 1;
};
if(data_type == MatrixDataType::F16_F16_F16_F16_F16 && layout == MatrixLayout::MK_KN_MN_MN_MN)
{
return profile(F16{}, F16{}, F32{}, F16{}, F16{}, F16{}, Row{}, Row{}, Row{}, Row{}, Row{});
}
else if(data_type == MatrixDataType::F16_F16_F16_F16_F16 &&
layout == MatrixLayout::MK_NK_MN_MN_MN)
{
return profile(F16{}, F16{}, F32{}, F16{}, F16{}, F16{}, Row{}, Col{}, Row{}, Row{}, Row{});
}
else if(data_type == MatrixDataType::F16_F8_F32_F32_F16 &&
layout == MatrixLayout::MK_KN_MN_MN_MN)
{
return profile(F16{}, F8{}, F32{}, F32{}, F32{}, F16{}, Row{}, Row{}, Row{}, Row{}, Row{});
}
else if(data_type == MatrixDataType::F16_F8_F32_F32_F16 &&
layout == MatrixLayout::MK_NK_MN_MN_MN)
{
return profile(F16{}, F8{}, F32{}, F32{}, F32{}, F16{}, Row{}, Col{}, Row{}, Row{}, Row{});
}
else
{
std::cout << "this data_type & layout is not implemented" << std::endl;
return 1;
}
}
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_multiply_add);
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_max_pool3d_bwd_impl.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t;
struct maxPoolbwdArgParser
{
std::unordered_map<std::string, std::vector<int>> long_opts = {{"length", {}},
{"wsize", {}},
{"wstride", {}},
{"wdilation", {}},
{"pad1", {}},
{"pad2", {}}};
bool parse_opt(int argc, char* argv[], const std::string& key, int i)
{
if(std::string("--") + key == argv[i])
{
int pos = i;
while(++i < argc && argv[i][0] != '-') {}
int end = i;
for(int j = pos + 1; j < end; j++)
{
long_opts[key].push_back(std::stoi(argv[j]));
}
return true;
}
return false;
}
void operator()(int argc, char* argv[])
{
for(auto& kv : long_opts)
{
for(int i = 1; i < argc; i++)
{
if(parse_opt(argc, argv, kv.first, i))
break;
}
}
}
};
void print_help_max_pool3d_bwd()
{
std::cout << "arg1: data type (0: fp16; 1: fp32; 5: bf16)\n"
<< "arg2: verification (0: no; 1: yes)\n"
<< "arg3: initialization (0: no init; 1: integer value; 2: decimal value)\n"
<< "arg4: print tensor value (0: no; 1: yes)\n"
<< "arg5: time kernel (0=no, 1=yes)\n"
<< "--length: input tensor length for NCDHW(e.g, --length 2 32 30 30 30) \n"
<< "--wsize: window size for ZYX (e.g, --wsize 2 2 2) \n"
<< "--wstride: window stride for DHW (e.g, --wstride 2 2 2) \n"
<< "--wdilation: window dilation for DHW (e.g, --wdilation 1 1 1) \n"
<< "--pad1: left side of padding in DHW (e.g, --pad1 1 1 1) \n"
<< "--pad2: right side of padding in DHW (e.g, --pad2 1 1 1) \n"
<< "eg: ckProfiler max_pool3d_bwd 0 1 2 0 1 --length 2 32 30 30 30 --wsize 2 2 2 "
"--wstride 2 2 2 --wdilation 1 1 1 --pad1 1 1 1 --pad2 1 1 1"
<< std::endl;
}
int profile_max_pool3d_bwd(int argc, char* argv[])
{
ck::DataTypeEnum data_type = ck::DataTypeEnum::Half;
bool do_verification = true;
int init_method = 0;
bool do_log = false;
bool time_kernel = true;
std::vector<index_t> in_length = {2, 32, 30, 30, 30};
std::vector<index_t> wsize = {2, 2, 2};
std::vector<index_t> wstride = {2, 2, 2};
std::vector<index_t> wdilation = {1, 1, 1};
std::vector<index_t> pad1 = {1, 1, 1};
std::vector<index_t> pad2 = {1, 1, 1};
if(argc != 2 && argc != 33)
{
print_help_max_pool3d_bwd();
return 0;
}
else if(argc == 33)
{
data_type = static_cast<ck::DataTypeEnum>(std::stoi(argv[2]));
do_verification = std::stoi(argv[3]);
init_method = std::stoi(argv[4]);
do_log = std::stoi(argv[5]);
time_kernel = std::stoi(argv[6]);
// parse the long options
maxPoolbwdArgParser arg_parser;
arg_parser(argc, argv);
in_length = arg_parser.long_opts["length"];
wsize = arg_parser.long_opts["wsize"];
wstride = arg_parser.long_opts["wstride"];
wdilation = arg_parser.long_opts["wdilation"];
pad1 = arg_parser.long_opts["pad1"];
pad2 = arg_parser.long_opts["pad2"];
}
#ifdef CK_ENABLE_FP16
using F16 = ck::half_t;
#endif
#ifdef CK_ENABLE_BF16
using BF16 = ck::bhalf_t;
#endif
#ifdef CK_ENABLE_FP32
using F32 = float;
#endif
using I32 = int32_t;
if(false)
;
#ifdef CK_ENABLE_FP16
else if(data_type == ck::DataTypeEnum::Half)
{
ck::profiler::profile_max_pool3d_bwd_impl<F16, F16, I32, F16, F16, false>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
#endif
#ifdef CK_ENABLE_BF16
else if(data_type == ck::DataTypeEnum::BFloat16)
{
ck::profiler::profile_max_pool3d_bwd_impl<BF16, BF16, I32, BF16, BF16, false>(
do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
#endif
#ifdef CK_ENABLE_FP32
else if(data_type == ck::DataTypeEnum::Float)
{
ck::profiler::profile_max_pool3d_bwd_impl<F32, F32, I32, F32, F32, false>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
#endif
else
{
throw std::runtime_error("not implemented yet");
}
return 0;
}
REGISTER_PROFILER_OPERATION("max_pool3d_bwd", "max_pool3d bwd", profile_max_pool3d_bwd);
......@@ -51,7 +51,7 @@ struct maxPoolFwdArgParser
void print_help_max_pool3d_fwd()
{
std::cout << "arg1: data type (0: fp16; 1: fp32)\n"
std::cout << "arg1: data type (0: fp16; 1: fp32; 5: bf16)\n"
<< "arg2: verification (0: no; 1: yes)\n"
<< "arg3: initialization (0: no init; 1: integer value; 2: decimal value)\n"
<< "arg4: print tensor value (0: no; 1: yes)\n"
......@@ -109,8 +109,15 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
pad2 = arg_parser.long_opts["pad2"];
}
using F16 = ck::half_t;
using F32 = float;
#ifdef CK_ENABLE_FP16
using F16 = ck::half_t;
#endif
#ifdef CK_ENABLE_BF16
using BF16 = ck::bhalf_t;
#endif
#ifdef CK_ENABLE_FP32
using F32 = float;
#endif
using I32 = int32_t;
using NDHWC = ck::tensor_layout::convolution::NDHWC;
......@@ -120,7 +127,10 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
constexpr auto ReduceOpId = ck::ReduceTensorOp::AVG;
#endif
if(data_type == ck::DataTypeEnum::Half)
if(false)
;
#ifdef CK_ENABLE_FP16
else if(data_type == ck::DataTypeEnum::Half)
{
if(return_index)
ck::profiler::
......@@ -149,6 +159,51 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
pad1,
pad2);
}
#endif
#ifdef CK_ENABLE_BF16
else if(data_type == ck::DataTypeEnum::BFloat16)
{
if(return_index)
ck::profiler::profile_pool3d_fwd_impl<BF16,
BF16,
BF16,
I32,
NDHWC,
NDHWC,
ReduceOpId,
false,
true>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
else
ck::profiler::profile_pool3d_fwd_impl<BF16,
BF16,
BF16,
I32,
NDHWC,
NDHWC,
ReduceOpId,
false,
false>(do_verification,
init_method,
do_log,
time_kernel,
in_length,
wsize,
wstride,
wdilation,
pad1,
pad2);
}
#endif
#ifdef CK_ENABLE_FP32
else if(data_type == ck::DataTypeEnum::Float)
{
if(return_index)
......@@ -178,6 +233,7 @@ int profile_max_pool3d_fwd(int argc, char* argv[])
pad1,
pad2);
}
#endif
else
{
throw std::runtime_error("not implemented yet");
......
......@@ -16,4 +16,3 @@ cmake
-D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \
-D USE_BITINT_EXTENSION_INT4=OFF \
${MY_PROJECT_SOURCE}
......@@ -57,7 +57,7 @@ add_subdirectory(data_type)
add_subdirectory(elementwise_normalization)
add_subdirectory(batchnorm)
add_subdirectory(contraction)
add_subdirectory(pool_fwd)
add_subdirectory(pool)
add_subdirectory(batched_gemm_multi_d)
add_subdirectory(grouped_convnd_bwd_data)
if(GPU_TARGETS MATCHES "gfx11")
......
add_custom_target(test_pool_fwd)
add_custom_target(test_pool)
add_gtest_executable(test_avg_pool3d_bwd test_avg_pool3d_bwd.cpp)
add_gtest_executable(test_max_pool3d_bwd test_max_pool3d_bwd.cpp)
add_gtest_executable(test_avg_pool3d_fwd test_avg_pool3d_fwd.cpp)
add_gtest_executable(test_max_pool3d_fwd test_max_pool3d_fwd.cpp)
target_link_libraries(test_avg_pool3d_bwd PRIVATE utility device_avg_pool3d_bwd_instance)
target_link_libraries(test_max_pool3d_bwd PRIVATE utility device_max_pool_bwd_instance)
target_link_libraries(test_avg_pool3d_fwd PRIVATE utility device_pool3d_fwd_instance)
target_link_libraries(test_max_pool3d_fwd PRIVATE utility device_pool3d_fwd_instance)
add_dependencies(test_pool_fwd test_avg_pool3d_fwd)
add_dependencies(test_pool_fwd test_max_pool3d_fwd)
add_dependencies(test_pool test_avg_pool3d_bwd)
add_dependencies(test_pool test_max_pool3d_bwd)
add_dependencies(test_pool test_avg_pool3d_fwd)
add_dependencies(test_pool test_max_pool3d_fwd)
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include "gtest/gtest.h"
#include "profiler/profile_avg_pool3d_bwd_impl.hpp"
#include "test_pool_fwd_common.hpp"
template <typename Tuple>
class TestAvgPool3dBwd : public ::testing::Test
{
protected:
using DOutDataType = std::tuple_element_t<0, Tuple>;
using DInDataType = std::tuple_element_t<1, Tuple>;
using ComputeDataType = std::tuple_element_t<2, Tuple>;
using DOutLayout = std::tuple_element_t<3, Tuple>;
using DInLayout = std::tuple_element_t<4, Tuple>;
std::vector<PoolingParam> params;
void Run()
{
for(auto param : params)
{
bool success =
ck::profiler::profile_avg_pool3d_bwd_impl<DOutDataType,
DInDataType,
ComputeDataType,
DOutLayout,
DInLayout>(true,
2,
false,
false,
param.length_,
param.window_spatial_lengths_,
param.window_strides_,
param.window_dilations_,
param.input_left_pads_,
param.input_right_pads_);
EXPECT_TRUE(success);
}
}
};
#if defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>,
std::tuple<BF16, BF16, F32, NDHWC, NDHWC>,
std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>,
std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, F32, NDHWC, NDHWC>,
std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>,
std::tuple<BF16, BF16, F32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP16)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_BF16)
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, F32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<F32, F32, F32, NDHWC, NDHWC>>;
#endif
TYPED_TEST_SUITE(TestAvgPool3dBwd, KernelTypes);
TYPED_TEST(TestAvgPool3dBwd, Test_Pool)
{
// length, window_length, window_stride, window_dilation, left_pad, right_pad
this->params = {{{1, 1, 1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}},
{{2, 16, 64, 64, 64}, {4, 4, 4}, {4, 4, 4}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}},
{{2, 32, 30, 30, 30}, {2, 2, 2}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}};
this->Run();
}
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include "gtest/gtest.h"
#include "profiler/profile_max_pool3d_bwd_impl.hpp"
#include "test_pool_fwd_common.hpp"
template <typename Tuple>
class TestMaxPool3dBwd : public ::testing::Test
{
protected:
using DOutDataType = std::tuple_element_t<0, Tuple>;
using DInDataType = std::tuple_element_t<1, Tuple>;
using IndexDataType = std::tuple_element_t<2, Tuple>;
using InDataType = DInDataType;
using OutDataType = DOutDataType;
std::vector<PoolingParam> params;
void Run()
{
for(auto param : params)
{
bool success =
ck::profiler::profile_max_pool3d_bwd_impl<InDataType,
OutDataType,
IndexDataType,
DOutDataType,
DInDataType,
false>(true,
2,
false,
false,
param.length_,
param.window_spatial_lengths_,
param.window_strides_,
param.window_dilations_,
param.input_left_pads_,
param.input_right_pads_);
EXPECT_TRUE(success);
}
}
};
#if defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>,
std::tuple<BF16, BF16, I32, NDHWC, NDHWC>,
std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>,
std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_BF16) && defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, I32, NDHWC, NDHWC>,
std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP16) && defined(CK_ENABLE_BF16)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>,
std::tuple<BF16, BF16, I32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP16)
using KernelTypes = ::testing::Types<std::tuple<F16, F16, I32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_BF16)
using KernelTypes = ::testing::Types<std::tuple<BF16, BF16, I32, NDHWC, NDHWC>>;
#elif defined(CK_ENABLE_FP32)
using KernelTypes = ::testing::Types<std::tuple<F32, F32, I32, NDHWC, NDHWC>>;
#endif
TYPED_TEST_SUITE(TestMaxPool3dBwd, KernelTypes);
TYPED_TEST(TestMaxPool3dBwd, Test_Pool)
{
// length, window_length, window_stride, window_dilation, left_pad, right_pad
this->params = {{{1, 1, 1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}},
{{2, 16, 64, 64, 64}, {4, 4, 4}, {4, 4, 4}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}},
{{2, 32, 30, 30, 30}, {2, 2, 2}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}};
// this->params = {{{2, 32, 30, 30, 30}, {2, 2, 2}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1,
// 1}}};
this->Run();
}
......@@ -4,10 +4,12 @@
#include "gtest/gtest.h"
#include "ck/ck.hpp"
using F16 = ck::half_t;
using F32 = float;
using I32 = int32_t;
using F16 = ck::half_t;
using BF16 = ck::bhalf_t;
using F32 = float;
using I32 = int32_t;
using ck::index_t;
using NDHWC = ck::tensor_layout::convolution::NDHWC;
struct PoolingParam
{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment