"...git@developer.sourcefind.cn:OpenDAS/mmdetection3d.git" did not exist on "2eebdc2d7a61b10d06bcd87a49cf81f6e75c7ff8"
Unverified Commit 4e6a5575 authored by Qianfeng's avatar Qianfeng Committed by GitHub
Browse files

BatchNorm forward instance/external api/profiler/tests/client example (#511)



* Update to device_batchnorm_forward base class to include all template parameters for problem description

* Add batchnorm forward instances and external api

* Add batchnorm forward profiler module which uses the external api

* Add some comments in batchnorm_forward example to explain the dimensions in lengths[]

* Replace the reference_batchnorm_forward_nhwc_c by generic reference_batchnorm_forward

* Improvement to the batchnorm infer base API

* Add batchnorm forward client example which shows using the batchnorm forward external API

* Add test for batchnorm forward

* Tuning the batchnorm profiler initialized values and error threshold

* Add support for bhalf_t in instances/external api/tests

* Add support for int8_t in instances/external api/tests

* Add support for double in instances/external api/tests

* Let ScaleDataType and BiasDataType be same as XDataType and YDataType when creating instances

* Checking before running best instance in batchnorm_fwd_nhwc client example

* Add checking for YElementwiseOp in batchnorm_forward external API

* Add more types in batchnorm forward profiler

* Add more test lengths
Co-authored-by: default avatarrocking5566 <ChunYu.Lai@amd.com>
parent 43a889b7
...@@ -24,6 +24,7 @@ int profile_softmax(int, char*[]); ...@@ -24,6 +24,7 @@ int profile_softmax(int, char*[]);
int profile_layernorm(int, char*[]); int profile_layernorm(int, char*[]);
int profile_groupnorm(int, char*[]); int profile_groupnorm(int, char*[]);
int profile_reduce(int, char*[]); int profile_reduce(int, char*[]);
int profile_batchnorm_forward(int, char*[]);
static void print_helper_message() static void print_helper_message()
{ {
...@@ -46,7 +47,8 @@ static void print_helper_message() ...@@ -46,7 +47,8 @@ static void print_helper_message()
" grouped_conv_fwd: Grouped Convolution Forward\n" " grouped_conv_fwd: Grouped Convolution Forward\n"
" grouped_conv_bwd_weight: Grouped Convolution Backward Weight\n" " grouped_conv_bwd_weight: Grouped Convolution Backward Weight\n"
" softmax: Softmax\n" " softmax: Softmax\n"
" reduce: Reduce\n"); " reduce: Reduce\n"
" bnorm_fwd: Batchnorm forward\n");
// clang-format on // clang-format on
} }
...@@ -142,6 +144,10 @@ int main(int argc, char* argv[]) ...@@ -142,6 +144,10 @@ int main(int argc, char* argv[])
{ {
return profile_groupnorm(argc, argv); return profile_groupnorm(argc, argv);
} }
else if(strcmp(argv[1], "bnorm_fwd") == 0)
{
return profile_batchnorm_forward(argc, argv);
}
else else
{ {
print_helper_message(); print_helper_message();
......
...@@ -53,3 +53,4 @@ add_subdirectory(softmax) ...@@ -53,3 +53,4 @@ add_subdirectory(softmax)
add_subdirectory(normalization) add_subdirectory(normalization)
add_subdirectory(data_type) add_subdirectory(data_type)
add_subdirectory(elementwise_normalization) add_subdirectory(elementwise_normalization)
add_subdirectory(batchnorm_fwd)
add_gtest_executable(test_batchnorm_fwd_rank_4 batchnorm_fwd_rank_4.cpp)
target_link_libraries(test_batchnorm_fwd_rank_4 PRIVATE utility device_batchnorm_instance)
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <iostream>
#include <initializer_list>
#include <vector>
#include <tuple>
#include <gtest/gtest.h>
#include "profiler/include/profile_batchnorm_forward_impl.hpp"
using F16 = ck::half_t;
using F32 = float;
using BF16 = ck::bhalf_t;
using I8 = int8_t;
using F64 = double;
template <typename Tuple>
class TestBatchNormFwdRank4 : public ::testing::Test
{
private:
const double epsilon = std::numeric_limits<float>::epsilon();
const double averageFactor = 0.1;
protected:
using XDataType = std::tuple_element_t<0, Tuple>;
using YDataType = std::tuple_element_t<1, Tuple>;
using AccDataType = std::tuple_element_t<2, Tuple>;
using ScaleDataType = std::tuple_element_t<3, Tuple>;
using BiasDataType = std::tuple_element_t<4, Tuple>;
using MeanVarDataType = std::tuple_element_t<5, Tuple>;
std::vector<std::vector<size_t>> list_of_lengths = {
{128, 16, 3, 1024}, {128, 16, 6, 512}, {1, 1, 1, 1}, {4, 4, 4, 4}, {32, 32, 32, 32}};
std::vector<int> reduceDims;
template <int NumReduceDim>
void Run()
{
for(auto& inOutLengths : list_of_lengths)
{
bool pass = true;
EXPECT_FALSE(reduceDims.size() != NumReduceDim);
pass =
pass && ck::profiler::profile_batchnorm_forward_impl<XDataType,
YDataType,
AccDataType,
ScaleDataType,
BiasDataType,
MeanVarDataType,
4,
NumReduceDim>(true,
3,
false,
false,
inOutLengths,
reduceDims,
true,
true,
epsilon,
averageFactor);
pass =
pass && ck::profiler::profile_batchnorm_forward_impl<XDataType,
YDataType,
AccDataType,
ScaleDataType,
BiasDataType,
MeanVarDataType,
4,
NumReduceDim>(true,
3,
false,
false,
inOutLengths,
reduceDims,
false,
false,
epsilon,
averageFactor);
EXPECT_TRUE(pass);
}
}
};
using KernelTypes = ::testing::Types<std::tuple<F16, F16, F32, F16, F16, F32>,
std::tuple<F32, F32, F32, F32, F32, F32>,
std::tuple<BF16, BF16, F32, BF16, BF16, F32>,
std::tuple<I8, I8, F32, I8, I8, F32>,
std::tuple<F64, F64, F64, F64, F64, F64>>;
TYPED_TEST_SUITE(TestBatchNormFwdRank4, KernelTypes);
// nhwc
TYPED_TEST(TestBatchNormFwdRank4, nhwc)
{
this->reduceDims = {0, 1, 2};
this->template Run<3>();
}
// nchw
TYPED_TEST(TestBatchNormFwdRank4, nchw)
{
this->reduceDims = {0, 2, 3};
this->template Run<3>();
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment