Unverified Commit 408534d4 authored by Rostyslav Geyyer's avatar Rostyslav Geyyer Committed by GitHub
Browse files

Merge branch 'develop' into lwpck-1815

parents a8efb3f0 da214a5a
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
......@@ -25,12 +25,13 @@ class TestGemmUniversal : public testing::Test
using F32 = float;
protected:
using ALayout = std::tuple_element_t<0, Tuple>;
using BLayout = std::tuple_element_t<1, Tuple>;
using CLayout = Row;
using ADataType = std::tuple_element_t<2, Tuple>;
using BDataType = std::tuple_element_t<3, Tuple>;
using CDataType = std::tuple_element_t<4, Tuple>;
using ALayout = std::tuple_element_t<0, Tuple>;
using BLayout = std::tuple_element_t<1, Tuple>;
using CLayout = Row;
using ADataType = std::tuple_element_t<2, Tuple>;
using BDataType = std::tuple_element_t<3, Tuple>;
using ComputeDataType = std::tuple_element_t<4, Tuple>;
using CDataType = std::tuple_element_t<5, Tuple>;
public:
static constexpr bool verify_ = true;
......@@ -66,6 +67,7 @@ class TestGemmUniversal : public testing::Test
{
bool pass = ck::profiler::profile_gemm_universal_impl<ADataType,
BDataType,
ComputeDataType,
F32,
CDataType,
ALayout,
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
#include <tuple>
......@@ -41,16 +41,24 @@ class TestGemmUniversal_MK_NK
};
// clang-format off
using KernelTypes = ::testing::Types<
// ADataType, BDataType, CDataType
std::tuple< F16, F16, F16>,
std::tuple< F16, F8, F16>,
std::tuple< F8, F16, F16>,
std::tuple< BF16, BF16, BF16>
using KernelTypes_MK_KN = ::testing::Types<
// ADataType, BDataType, ComputeDataType, CDataType
std::tuple< F16, F16, F16, F16>,
std::tuple< F16, F8, F16, F16>,
std::tuple< F8, F16, F16, F16>,
std::tuple< BF16, BF16, BF16, BF16>
>;
using KernelTypes_MK_NK = ::testing::Types<
// ADataType, BDataType, ComputeDataType, CDataType
std::tuple< F16, F16, F16, F16>,
std::tuple< F16, F8, F16, F16>,
std::tuple< F8, F16, F16, F16>,
std::tuple< BF16, BF16, BF16, BF16>,
std::tuple< F8, F8, F8, BF16>
>;
// clang-format on
TYPED_TEST_SUITE(TestGemmUniversal_MK_KN, KernelTypes);
TYPED_TEST_SUITE(TestGemmUniversal_MK_NK, KernelTypes);
TYPED_TEST_SUITE(TestGemmUniversal_MK_KN, KernelTypes_MK_KN);
TYPED_TEST_SUITE(TestGemmUniversal_MK_NK, KernelTypes_MK_NK);
#include "test_gemm_universal_ut_cases.inc"
......@@ -17,6 +17,7 @@ class TestGroupedConvndFwd : public ::testing::Test
using InLayout = std::tuple_element_t<1, Tuple>;
using WeiLayout = std::tuple_element_t<2, Tuple>;
using OutLayout = std::tuple_element_t<3, Tuple>;
using IndexType = std::tuple_element_t<4, Tuple>;
std::vector<ck::utils::conv::ConvParam> conv_params;
......@@ -33,7 +34,10 @@ class TestGroupedConvndFwd : public ::testing::Test
OutLayout,
DataType,
DataType,
DataType>(
DataType,
DataType,
DataType,
IndexType>(
true, // do_verification
1, // init_method: integer value
false, // do_log
......@@ -46,30 +50,31 @@ class TestGroupedConvndFwd : public ::testing::Test
using namespace ck::tensor_layout::convolution;
using KernelTypes1d = ::testing::Types<std::tuple<float, GNWC, GKXC, GNWK>,
std::tuple<ck::half_t, GNWC, GKXC, GNWK>,
std::tuple<ck::bhalf_t, GNWC, GKXC, GNWK>,
std::tuple<int8_t, GNWC, GKXC, GNWK>>;
using KernelTypes2d = ::testing::Types<std::tuple<float, GNHWC, GKYXC, GNHWK>,
std::tuple<ck::half_t, GNHWC, GKYXC, GNHWK>,
std::tuple<ck::bhalf_t, GNHWC, GKYXC, GNHWK>,
std::tuple<int8_t, GNHWC, GKYXC, GNHWK>,
std::tuple<float, NHWGC, GKYXC, NHWGK>,
std::tuple<ck::half_t, NHWGC, GKYXC, NHWGK>,
std::tuple<ck::bhalf_t, NHWGC, GKYXC, NHWGK>,
std::tuple<int8_t, NHWGC, GKYXC, NHWGK>>;
using KernelTypes3d = ::testing::Types<std::tuple<float, GNDHWC, GKZYXC, GNDHWK>,
std::tuple<ck::half_t, GNDHWC, GKZYXC, GNDHWK>,
std::tuple<ck::bhalf_t, GNDHWC, GKZYXC, GNDHWK>,
std::tuple<int8_t, GNDHWC, GKZYXC, GNDHWK>,
std::tuple<float, NDHWGC, GKZYXC, NDHWGK>,
std::tuple<ck::half_t, NDHWGC, GKZYXC, NDHWGK>,
std::tuple<ck::bhalf_t, NDHWGC, GKZYXC, NDHWGK>,
std::tuple<int8_t, NDHWGC, GKZYXC, NDHWGK>>;
using KernelTypes2dLargeCases = ::testing::Types<std::tuple<float, NHWGC, GKYXC, NHWGK>>;
using KernelTypes1d = ::testing::Types<std::tuple<float, GNWC, GKXC, GNWK, ck::index_t>,
std::tuple<ck::half_t, GNWC, GKXC, GNWK, ck::index_t>,
std::tuple<ck::bhalf_t, GNWC, GKXC, GNWK, ck::index_t>,
std::tuple<int8_t, GNWC, GKXC, GNWK, ck::index_t>>;
using KernelTypes2d = ::testing::Types<std::tuple<float, GNHWC, GKYXC, GNHWK, ck::index_t>,
std::tuple<ck::half_t, GNHWC, GKYXC, GNHWK, ck::index_t>,
std::tuple<ck::bhalf_t, GNHWC, GKYXC, GNHWK, ck::index_t>,
std::tuple<int8_t, GNHWC, GKYXC, GNHWK, ck::index_t>,
std::tuple<float, NHWGC, GKYXC, NHWGK, ck::index_t>,
std::tuple<ck::half_t, NHWGC, GKYXC, NHWGK, ck::index_t>,
std::tuple<ck::bhalf_t, NHWGC, GKYXC, NHWGK, ck::index_t>,
std::tuple<int8_t, NHWGC, GKYXC, NHWGK, ck::index_t>>;
using KernelTypes3d = ::testing::Types<std::tuple<float, GNDHWC, GKZYXC, GNDHWK, ck::index_t>,
std::tuple<ck::half_t, GNDHWC, GKZYXC, GNDHWK, ck::index_t>,
std::tuple<ck::bhalf_t, GNDHWC, GKZYXC, GNDHWK, ck::index_t>,
std::tuple<int8_t, GNDHWC, GKZYXC, GNDHWK, ck::index_t>,
std::tuple<float, NDHWGC, GKZYXC, NDHWGK, ck::index_t>,
std::tuple<ck::half_t, NDHWGC, GKZYXC, NDHWGK, ck::index_t>,
std::tuple<ck::bhalf_t, NDHWGC, GKZYXC, NDHWGK, ck::index_t>,
std::tuple<int8_t, NDHWGC, GKZYXC, NDHWGK, ck::index_t>>;
using KernelTypes2dLargeCases =
::testing::Types<std::tuple<float, NHWGC, GKYXC, NHWGK, ck::long_index_t>>;
template <typename Tuple>
class TestGroupedConvndFwd1d : public TestGroupedConvndFwd<Tuple>
......@@ -153,5 +158,8 @@ TYPED_TEST(TestGroupedConvndFwd2dLargeCases, Test2DLargeCases)
// With supported NumGroupsToMerge > 1
this->conv_params.push_back(
{2, 32, 64, 1, 1, {2, 2}, {672, 672}, {672, 672}, {1, 1}, {0, 0}, {0, 0}});
// When image is larger than 2GB
this->conv_params.push_back(
{2, 1, 1, 256, 256, {3, 3}, {4096, 2048}, {1024, 1024}, {3, 3}, {1, 1}, {1, 1}});
this->template Run<2>();
}
......@@ -13,6 +13,7 @@
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "test/smfmac_op/smfmac_op_util.hpp"
#include "ck/host_utility/device_prop.hpp"
using BF16 = ck::bhalf_t;
using F16 = ck::half_t;
......@@ -38,40 +39,43 @@ class TestSmfmac : public ::testing::Test
void Run()
{
bool pass = true;
constexpr auto matmul_default = ck::smfmac_op_util::matmul<Src1Type,
Src1VecSize,
Src2Type,
Src2VecSize,
GPUAccType,
AccVecSize,
DstType,
M,
N,
K>;
bool pass = true;
if(ck::get_device_name() == "gfx942")
{
constexpr auto matmul_default = ck::smfmac_op_util::matmul<Src1Type,
Src1VecSize,
Src2Type,
Src2VecSize,
GPUAccType,
AccVecSize,
DstType,
M,
N,
K>;
constexpr auto smfmac_kernel_container = std::make_tuple(matmul_default);
ck::static_for<0, std::tuple_size_v<decltype(smfmac_kernel_container)>, 1>{}([&](auto i) {
pass &= ck::smfmac_op_util::TestSmfmac<
std::tuple_element_t<i.value, decltype(smfmac_kernel_container)>,
Src1Type,
Src2Type,
DstType,
GPUAccType,
CPUAccType,
decltype(Row{}),
decltype(Row{}),
decltype(Row{}),
PassThrough,
PassThrough,
PassThrough,
AccVecSize,
M,
N,
K>{}(std::get<ck::Number<i>{}>(smfmac_kernel_container));
});
constexpr auto smfmac_kernel_container = std::make_tuple(matmul_default);
ck::static_for<0, std::tuple_size_v<decltype(smfmac_kernel_container)>, 1>{}(
[&](auto i) {
pass &= ck::smfmac_op_util::TestSmfmac<
std::tuple_element_t<i.value, decltype(smfmac_kernel_container)>,
Src1Type,
Src2Type,
DstType,
GPUAccType,
CPUAccType,
decltype(Row{}),
decltype(Row{}),
decltype(Row{}),
PassThrough,
PassThrough,
PassThrough,
AccVecSize,
M,
N,
K>{}(std::get<ck::Number<i>{}>(smfmac_kernel_container));
});
}
EXPECT_TRUE(pass);
}
};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment