Commit cc6a534f authored by aska-0096's avatar aska-0096
Browse files

Merge branch 'develop' of...

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/composable_kernel into navi3x_md_bgemm_conv_gemmsoftmaxgemm
parents 27dc055b cb3fac4d
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <memory>
#include <vector>
#include "include/ck/tensor_operation/gpu/device/device_base.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace instance {
void add_gemm_wavelet_f16_tn_256x256(std::vector<std::unique_ptr<BaseOperator>>& instances);
void add_gemm_wavelet_f16_tn_256x128(std::vector<std::unique_ptr<BaseOperator>>& instances);
void add_gemm_wavelet_f16_tn_128x128(std::vector<std::unique_ptr<BaseOperator>>& instances);
void add_gemm_wavelet_f16_tn_128x64(std::vector<std::unique_ptr<BaseOperator>>& instances);
} // namespace instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
add_custom_target(test_gemm_layernorm)
add_gtest_executable(test_gemm_add_relu_add_layernorm_fp16 test_gemm_add_relu_add_layernorm_fp16.cpp)
target_link_libraries(test_gemm_add_relu_add_layernorm_fp16 PRIVATE utility device_gemm_add_relu_add_layernorm_instance)
add_dependencies(test_gemm_layernorm test_gemm_add_relu_add_layernorm_fp16)
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "gtest/gtest.h"
#include "profiler/profile_gemm_add_relu_add_layernorm_impl.hpp"
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
using F16 = ck::half_t;
using F32 = float;
using ck::index_t;
template <typename Tuple>
class TestGemmAddReluAddLayernorm : public ::testing::Test
{
protected:
using ADataType = std::tuple_element_t<0, Tuple>;
using BDataType = std::tuple_element_t<1, Tuple>;
using AccDataType = std::tuple_element_t<2, Tuple>;
using D0DataType = std::tuple_element_t<3, Tuple>;
using D1DataType = std::tuple_element_t<4, Tuple>;
using EMeanVarDataType = std::tuple_element_t<5, Tuple>;
using GammaDataType = std::tuple_element_t<6, Tuple>;
using BetaDataType = std::tuple_element_t<7, Tuple>;
using HDataType = std::tuple_element_t<8, Tuple>;
using ALayout = std::tuple_element_t<9, Tuple>;
using BLayout = std::tuple_element_t<10, Tuple>;
using D0Layout = std::tuple_element_t<11, Tuple>;
using D1Layout = std::tuple_element_t<12, Tuple>;
using HLayout = std::tuple_element_t<13, Tuple>;
void Run()
{
std::vector<std::vector<ck::index_t>> lengths = {
{1024, 1024, 1024}, {2048, 640, 640}, {1, 1, 1}};
for(auto length : lengths)
{
int M = length[0];
int N = length[1];
int K = length[2];
int StrideA = ck::is_same_v<ALayout, Row> ? K : M;
int StrideB = ck::is_same_v<BLayout, Row> ? N : K;
int StrideD0 = 0;
int StrideD1 = ck::is_same_v<D1Layout, Row> ? N : M;
int StrideH = ck::is_same_v<HLayout, Row> ? N : M;
bool success = ck::profiler::profile_gemm_add_relu_add_layernorm_impl<ADataType,
BDataType,
AccDataType,
D0DataType,
D1DataType,
EMeanVarDataType,
GammaDataType,
BetaDataType,
HDataType,
ALayout,
BLayout,
D0Layout,
D1Layout,
HLayout>(
true, 1, false, false, M, N, K, StrideA, StrideB, StrideD0, StrideD1, StrideH);
EXPECT_TRUE(success);
}
}
};
using KernelTypes = ::testing::Types<
std::tuple<F16, F16, F32, F16, F16, F16, F16, F16, F16, Row, Row, Row, Row, Row>,
std::tuple<F16, F16, F32, F16, F16, F16, F16, F16, F16, Row, Col, Row, Row, Row>,
std::tuple<F16, F16, F32, F16, F16, F16, F16, F16, F16, Col, Row, Row, Row, Row>,
std::tuple<F16, F16, F32, F16, F16, F16, F16, F16, F16, Col, Col, Row, Row, Row>>;
TYPED_TEST_SUITE(TestGemmAddReluAddLayernorm, KernelTypes);
TYPED_TEST(TestGemmAddReluAddLayernorm, Test_FP16) { this->Run(); }
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream> #include <iostream>
#include <random>
#include "profiler/profile_grouped_gemm_impl.hpp" #include "profiler/profile_grouped_gemm_impl.hpp"
...@@ -18,7 +19,10 @@ using Col = ck::tensor_layout::gemm::ColumnMajor; ...@@ -18,7 +19,10 @@ using Col = ck::tensor_layout::gemm::ColumnMajor;
template <typename ALayout, typename BLayout, typename CLayout> template <typename ALayout, typename BLayout, typename CLayout>
bool TestGroupedGemm() bool TestGroupedGemm()
{ {
int group_count = rand() % 10 + 1;
std::mt19937 gen(19391);
std::uniform_int_distribution<> distrib(1, 10);
int group_count = distrib(gen);
// GEMM shape // GEMM shape
std::vector<ck::tensor_operation::device::GemmDesc> gemm_descs; std::vector<ck::tensor_operation::device::GemmDesc> gemm_descs;
...@@ -29,9 +33,9 @@ bool TestGroupedGemm() ...@@ -29,9 +33,9 @@ bool TestGroupedGemm()
for(int i = 0; i < group_count; i++) for(int i = 0; i < group_count; i++)
{ {
Ms.push_back(256 + 256 * (rand() % 10)); Ms.push_back(256 + 256 * distrib(gen));
Ns.push_back(256 + 256 * (rand() % 10)); Ns.push_back(256 + 256 * distrib(gen));
Ks.push_back(128 + 128 * (rand() % 10)); Ks.push_back(128 + 128 * distrib(gen));
StrideAs.push_back(std::is_same<Row, ALayout>::value ? Ks[i] : Ms[i]); StrideAs.push_back(std::is_same<Row, ALayout>::value ? Ks[i] : Ms[i]);
StrideBs.push_back(std::is_same<Row, BLayout>::value ? Ns[i] : Ks[i]); StrideBs.push_back(std::is_same<Row, BLayout>::value ? Ns[i] : Ks[i]);
......
add_custom_target(test_layernorm) add_custom_target(test_normalization)
add_gtest_executable(test_layernorm2d_fp32 test_layernorm2d_fp32.cpp) add_gtest_executable(test_layernorm2d_fp32 test_layernorm2d_fp32.cpp)
add_gtest_executable(test_layernorm2d_fp16 test_layernorm2d_fp16.cpp) add_gtest_executable(test_layernorm2d_fp16 test_layernorm2d_fp16.cpp)
add_gtest_executable(test_groupnorm_fp16 test_groupnorm_fp16.cpp) add_gtest_executable(test_groupnorm_fp16 test_groupnorm_fp16.cpp)
add_gtest_executable(test_groupnorm_fp32 test_groupnorm_fp32.cpp) add_gtest_executable(test_groupnorm_fp32 test_groupnorm_fp32.cpp)
target_link_libraries(test_layernorm2d_fp32 PRIVATE utility device_normalization_instance) target_link_libraries(test_layernorm2d_fp32 PRIVATE utility device_normalization_instance)
target_link_libraries(test_layernorm2d_fp16 PRIVATE utility device_normalization_instance) target_link_libraries(test_layernorm2d_fp16 PRIVATE utility device_normalization_instance)
target_link_libraries(test_groupnorm_fp16 PRIVATE utility device_normalization_instance) target_link_libraries(test_groupnorm_fp16 PRIVATE utility device_normalization_instance)
target_link_libraries(test_groupnorm_fp32 PRIVATE utility device_normalization_instance) target_link_libraries(test_groupnorm_fp32 PRIVATE utility device_normalization_instance)
add_dependencies(test_layernorm test_layernorm2d_fp32) add_dependencies(test_normalization test_layernorm2d_fp32)
add_dependencies(test_layernorm test_layernorm2d_fp16) add_dependencies(test_normalization test_layernorm2d_fp16)
add_dependencies(test_layernorm test_groupnorm_fp16) add_dependencies(test_normalization test_groupnorm_fp16)
add_dependencies(test_layernorm test_groupnorm_fp32) add_dependencies(test_normalization test_groupnorm_fp32)
...@@ -12,11 +12,11 @@ template <typename Tuple> ...@@ -12,11 +12,11 @@ template <typename Tuple>
class TestGroupnorm : public ::testing::Test class TestGroupnorm : public ::testing::Test
{ {
protected: protected:
using XDataType = std::tuple_element_t<0, Tuple>; using XDataType = std::tuple_element_t<0, Tuple>;
using GammaDataType = std::tuple_element_t<1, Tuple>; using GammaDataType = std::tuple_element_t<1, Tuple>;
using BetaDataType = std::tuple_element_t<2, Tuple>; using BetaDataType = std::tuple_element_t<2, Tuple>;
using AccDataType = std::tuple_element_t<3, Tuple>; using ComputeDataType = std::tuple_element_t<3, Tuple>;
using YDataType = std::tuple_element_t<4, Tuple>; using YDataType = std::tuple_element_t<4, Tuple>;
void Run() void Run()
{ {
...@@ -36,7 +36,7 @@ class TestGroupnorm : public ::testing::Test ...@@ -36,7 +36,7 @@ class TestGroupnorm : public ::testing::Test
ck::profiler::profile_groupnorm_impl<XDataType, ck::profiler::profile_groupnorm_impl<XDataType,
GammaDataType, GammaDataType,
BetaDataType, BetaDataType,
AccDataType, ComputeDataType,
YDataType>(true, 2, false, false, length); YDataType>(true, 2, false, false, length);
EXPECT_TRUE(success); EXPECT_TRUE(success);
} }
...@@ -44,7 +44,7 @@ class TestGroupnorm : public ::testing::Test ...@@ -44,7 +44,7 @@ class TestGroupnorm : public ::testing::Test
}; };
using KernelTypes = ::testing::Types< using KernelTypes = ::testing::Types<
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType> // XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
std::tuple<F16, F16, F16, F32, F16>>; std::tuple<F16, F16, F16, F32, F16>>;
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes); TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
......
...@@ -12,11 +12,11 @@ template <typename Tuple> ...@@ -12,11 +12,11 @@ template <typename Tuple>
class TestGroupnorm : public ::testing::Test class TestGroupnorm : public ::testing::Test
{ {
protected: protected:
using XDataType = std::tuple_element_t<0, Tuple>; using XDataType = std::tuple_element_t<0, Tuple>;
using GammaDataType = std::tuple_element_t<1, Tuple>; using GammaDataType = std::tuple_element_t<1, Tuple>;
using BetaDataType = std::tuple_element_t<2, Tuple>; using BetaDataType = std::tuple_element_t<2, Tuple>;
using AccDataType = std::tuple_element_t<3, Tuple>; using ComputeDataType = std::tuple_element_t<3, Tuple>;
using YDataType = std::tuple_element_t<4, Tuple>; using YDataType = std::tuple_element_t<4, Tuple>;
void Run() void Run()
{ {
...@@ -34,7 +34,7 @@ class TestGroupnorm : public ::testing::Test ...@@ -34,7 +34,7 @@ class TestGroupnorm : public ::testing::Test
ck::profiler::profile_groupnorm_impl<XDataType, ck::profiler::profile_groupnorm_impl<XDataType,
GammaDataType, GammaDataType,
BetaDataType, BetaDataType,
AccDataType, ComputeDataType,
YDataType>(true, 2, false, false, length); YDataType>(true, 2, false, false, length);
EXPECT_TRUE(success); EXPECT_TRUE(success);
} }
...@@ -42,7 +42,7 @@ class TestGroupnorm : public ::testing::Test ...@@ -42,7 +42,7 @@ class TestGroupnorm : public ::testing::Test
}; };
using KernelTypes = ::testing::Types< using KernelTypes = ::testing::Types<
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType> // XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
std::tuple<F32, F32, F32, F32, F32>>; std::tuple<F32, F32, F32, F32, F32>>;
TYPED_TEST_SUITE(TestGroupnorm, KernelTypes); TYPED_TEST_SUITE(TestGroupnorm, KernelTypes);
......
...@@ -12,11 +12,11 @@ template <typename Tuple> ...@@ -12,11 +12,11 @@ template <typename Tuple>
class TestLayernorm2d : public ::testing::Test class TestLayernorm2d : public ::testing::Test
{ {
protected: protected:
using XDataType = std::tuple_element_t<0, Tuple>; using XDataType = std::tuple_element_t<0, Tuple>;
using GammaDataType = std::tuple_element_t<1, Tuple>; using GammaDataType = std::tuple_element_t<1, Tuple>;
using BetaDataType = std::tuple_element_t<2, Tuple>; using BetaDataType = std::tuple_element_t<2, Tuple>;
using AccDataType = std::tuple_element_t<3, Tuple>; using ComputeDataType = std::tuple_element_t<3, Tuple>;
using YDataType = std::tuple_element_t<4, Tuple>; using YDataType = std::tuple_element_t<4, Tuple>;
void Run() void Run()
{ {
...@@ -29,7 +29,7 @@ class TestLayernorm2d : public ::testing::Test ...@@ -29,7 +29,7 @@ class TestLayernorm2d : public ::testing::Test
bool success = ck::profiler::profile_layernorm_impl<XDataType, bool success = ck::profiler::profile_layernorm_impl<XDataType,
GammaDataType, GammaDataType,
BetaDataType, BetaDataType,
AccDataType, ComputeDataType,
YDataType, YDataType,
2>(true, 2, false, false, length); 2>(true, 2, false, false, length);
EXPECT_TRUE(success); EXPECT_TRUE(success);
...@@ -38,7 +38,7 @@ class TestLayernorm2d : public ::testing::Test ...@@ -38,7 +38,7 @@ class TestLayernorm2d : public ::testing::Test
}; };
using KernelTypes = ::testing::Types< using KernelTypes = ::testing::Types<
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType> // XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
std::tuple<F16, F16, F16, F32, F16>>; std::tuple<F16, F16, F16, F32, F16>>;
TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes); TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes);
......
...@@ -12,11 +12,11 @@ template <typename Tuple> ...@@ -12,11 +12,11 @@ template <typename Tuple>
class TestLayernorm2d : public ::testing::Test class TestLayernorm2d : public ::testing::Test
{ {
protected: protected:
using XDataType = std::tuple_element_t<0, Tuple>; using XDataType = std::tuple_element_t<0, Tuple>;
using GammaDataType = std::tuple_element_t<1, Tuple>; using GammaDataType = std::tuple_element_t<1, Tuple>;
using BetaDataType = std::tuple_element_t<2, Tuple>; using BetaDataType = std::tuple_element_t<2, Tuple>;
using AccDataType = std::tuple_element_t<3, Tuple>; using ComputeDataType = std::tuple_element_t<3, Tuple>;
using YDataType = std::tuple_element_t<4, Tuple>; using YDataType = std::tuple_element_t<4, Tuple>;
void Run() void Run()
{ {
...@@ -29,7 +29,7 @@ class TestLayernorm2d : public ::testing::Test ...@@ -29,7 +29,7 @@ class TestLayernorm2d : public ::testing::Test
bool success = ck::profiler::profile_layernorm_impl<XDataType, bool success = ck::profiler::profile_layernorm_impl<XDataType,
GammaDataType, GammaDataType,
BetaDataType, BetaDataType,
AccDataType, ComputeDataType,
YDataType, YDataType,
2>(true, 2, false, false, length); 2>(true, 2, false, false, length);
EXPECT_TRUE(success); EXPECT_TRUE(success);
...@@ -38,7 +38,7 @@ class TestLayernorm2d : public ::testing::Test ...@@ -38,7 +38,7 @@ class TestLayernorm2d : public ::testing::Test
}; };
using KernelTypes = ::testing::Types< using KernelTypes = ::testing::Types<
// XDataType, GammaDataType, BetaDataType, AccDataType, YDataType> // XDataType, GammaDataType, BetaDataType, ComputeDataType, YDataType>
std::tuple<F32, F32, F32, F32, F32>>; std::tuple<F32, F32, F32, F32, F32>>;
TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes); TYPED_TEST_SUITE(TestLayernorm2d, KernelTypes);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment