"script/profile_resnet50.sh" did not exist on "a2edd7d802b46737e886f0f42a4ee61af03243b7"
Unverified Commit abf4bdb9 authored by Adam Osewski's avatar Adam Osewski Committed by GitHub
Browse files

Common forward convolution utility refactor. (#141)



* Convolution ND

* Code unification across dimensions for generating tensor descriptors.
* Example
* Instances

* Move convnd f32 instance file to comply with repo structure.

* Conv 1D tensor layouts.

* Formatting and use ReferenceConv

* Reference ConvFwd supporting 1D and 2D convolution.

* Debug printing TensorLayout name.

* Conv fwd 1D instance f32

* Refactor conv ND example.

Needed to support various conv dimensio.

Needed to support various conv dimensions

* Rename conv nd example director to prevent conflicts.

* Refactor some common utility to single file.

Plus some tests.

* Refactor GetHostTensorDescriptor + UT.

* Add 1D test case.

* Test reference convolution 1d/2d

* Remove some leftovers.

* Fix convolution example error for 1D

* Refactor test check errors utility function.

* Test Conv2D Fwd XDL

* More UT for 1D case.

* Parameterize input & weight initializers.

* Rename example to prevent conflicts.

* Split convnd instance into separate files for 1d/2d

* Address review comments.

* Fix data type for flops/gbytes calculations.

* Assign example number 11.

* 3D cases for convolution utility functions.

* 3D reference convolution.

* Add support for 3D convolution.

* Check for inputs bigger than  2GB.

* Formatting

* Support for bf16/f16/f32/i8 - conv instances + UT.

* Use check_err from test_util.hpp.

* Split convnd test into separate files for each dim.

* Fix data generation and use proper instances.

* Formatting

* Skip tensor initialization if not necessary.

* Fix CMakefiles.

* Remove redundant conv2d_fwd test.

* Lower problem size for conv3D UT.

* 3D case for convnd example.

* Remove leftovers after merge.

* Add Conv Specialization string to GetTypeString

* Skip instance causing numerical errors.

* Small fixes.

* Remove redundant includes.

* Fix namespace name error.

* Script for automatic testing and logging convolution fwd UTs

* Comment out numactl cmd.

* Refine weights initalization and relax rtol for fp16

* Move test_util.hpp to check_err.hpp

* Refine weights initalization and relax rtol for fp16

* Refactor common part of test conv utils.

* Move utility function to single common place.

* Add additional common functions to utility.

* Refactor convnd_fwd_xdl examples.

* Remove redundant files.
* Unify structure.

* Add constructor to ConvParams.

* And add input parameters validation.

* Modify conv examples to use single utility file.

* Remove check_error from host_tensor.hpp

* Get rid of check_indices function.

* Remove bf16_to_f32 function overload for scalars.

* Fix namespace.

* Add half_float::half for check_err.

* Fix conv params size in UT.

* Fix weights initialization for int8.

* Fix weights initialization for int8.

* Add type_convert when store output in ref conv 1D.

* Get back old conv2d_fwd_xdl operation.

* Silence conv debug print.

* format

* clean

* clean

* Fix merge.

* Fix namespace for check_err

* Formatting.

* Fix merge artifacts.

* Remove deleted header.

* Fix some includes and use ck::utils::check_err.

* Remove unused check_indices restored by previous merge.

* Fix namespaces after merge.

* Fix compilation error.

* Small fixes.

* Use common functions.
* Fix filename
* Fix namespaces.

* Fix merge artifact - retrieve removed by accident fun.

* Fix ConvForwardSpecialization.

* Adhere to coding style rules.

* Fix merge artifacts.
Co-authored-by: default avatarAdam Osewski <aosewski@amd.com>
Co-authored-by: default avatarChao Liu <chao.liu2@amd.com>
parent 6717168c
......@@ -5,10 +5,11 @@
#include "data_type.hpp"
#include "element_wise_operation.hpp"
#include "conv_test_util.hpp"
#include "conv_fwd_util.hpp"
#include "conv_util.hpp"
#include "host_tensor.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
#include "check_err.hpp"
// Forward declarations for conv instances.
......@@ -34,10 +35,10 @@ void add_device_conv1d_fwd_xdl_nwc_kxc_nwk_int8_instances(std::vector<DeviceConv
namespace {
bool TestConv1DNWC()
bool test_conv1D_nwc()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 1;
params.N = 2;
params.K = 16;
......@@ -49,30 +50,31 @@ bool TestConv1DNWC()
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
auto host_tensors = test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
auto host_tensors =
ck::utils::conv::get_host_tensors<float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& host_output = std::get<2>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<1>(params, input, weights, host_output);
ck::utils::conv::run_reference_convolution_forward<1>(params, input, weights, host_output);
test::conv::RunConv<1>(params, input, weights, device_output);
res = res &&
test::check_err(
ck::utils::check_err(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
return res;
}
template <typename T>
bool TestConv1DNWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
bool test_conv1d_nwc_instances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
{
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 1;
params.filter_spatial_lengths = std::vector<ck::index_t>{3};
params.input_spatial_lengths = std::vector<ck::index_t>{71};
......@@ -81,51 +83,52 @@ bool TestConv1DNWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
auto host_tensors = test::conv::GetHostTensors<T,
T,
T,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
auto host_tensors =
ck::utils::conv::get_host_tensors<T,
T,
T,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
const Tensor<T>& input = std::get<0>(host_tensors);
const Tensor<T>& weights = std::get<1>(host_tensors);
Tensor<T>& host_output = std::get<2>(host_tensors);
Tensor<T>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<1>(params, input, weights, host_output);
return test::conv::RunConvInstances<1>(
ck::utils::conv::run_reference_convolution_forward<1>(params, input, weights, host_output);
return ck::utils::conv::run_convolution_forward_instances<1>(
params, conv_ptrs, input, weights, device_output, host_output);
}
bool TestConv1DNWCBF16Instances()
bool test_conv1d_nwc_bf16_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_bf16_instances(conv_ptrs);
return TestConv1DNWCInstances<ck::bhalf_t>(conv_ptrs);
return test_conv1d_nwc_instances<ck::bhalf_t>(conv_ptrs);
}
bool TestConv1DNWCF16Instances()
bool test_conv1d_nwc_f16_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_f16_instances(conv_ptrs);
return TestConv1DNWCInstances<ck::half_t>(conv_ptrs);
return test_conv1d_nwc_instances<ck::half_t>(conv_ptrs);
}
bool TestConv1DNWCF32Instances()
bool test_conv1d_nwc_f32_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_f32_instances(conv_ptrs);
return TestConv1DNWCInstances<float>(conv_ptrs);
return test_conv1d_nwc_instances<float>(conv_ptrs);
}
bool TestConv1DNWCInt8Instances()
bool test_conv1d_nwc_int8_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_int8_instances(conv_ptrs);
return TestConv1DNWCInstances<int8_t>(conv_ptrs);
return test_conv1d_nwc_instances<int8_t>(conv_ptrs);
}
} // anonymous namespace
......@@ -133,18 +136,20 @@ bool TestConv1DNWCInt8Instances()
int main()
{
bool res{true};
res = TestConv1DNWC();
std::cout << "TestConv1DNWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv1D_nwc();
std::cout << "test_conv1D_nwc ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWCBF16Instances();
res = test_conv1d_nwc_bf16_instances();
std::cout << "\nTestConv1DNWCBF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv1DNWCF16Instances();
std::cout << "\nTestConv1DNWCF16Instances ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWCF32Instances();
std::cout << "\nTestConv1DNWCF32Instances ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWCInt8Instances();
std::cout << "\nTestConv1DNWCInt8Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv1d_nwc_f16_instances();
std::cout << "\ntest_conv1d_nwc_f16_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = test_conv1d_nwc_f32_instances();
std::cout << "\ntest_conv1d_nwc_f32_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = test_conv1d_nwc_int8_instances();
std::cout << "\ntes_tconv1_dnw_cint_8instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
return res ? 0 : 1;
......
......@@ -6,10 +6,11 @@
#include "data_type.hpp"
#include "element_wise_operation.hpp"
#include "conv_test_util.hpp"
#include "conv_fwd_util.hpp"
#include "conv_util.hpp"
#include "host_tensor.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
#include "check_err.hpp"
// Forward declarations for conv instances.
using DeviceConvFwdNoOpPtr =
......@@ -36,35 +37,35 @@ void add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_int8_instances(std::vector<DeviceC
namespace {
bool TestConv2DNHWC()
bool test_conv2d_nhwc()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.N = 2;
params.K = 16;
params.C = 4;
params.input_spatial_lengths = std::vector<ck::index_t>{16, 16};
params.conv_filter_strides = std::vector<ck::index_t>{1, 1};
auto host_tensors = test::conv::GetHostTensors(params);
auto host_tensors = ck::utils::conv::get_host_tensors(params);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& host_output = std::get<2>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<2>(params, input, weights, host_output);
ck::utils::conv::run_reference_convolution_forward<2>(params, input, weights, host_output);
test::conv::RunConv<2>(params, input, weights, device_output);
res = res &&
test::check_err(
ck::utils::check_err(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
return res;
}
template <typename T>
bool TestConv2DNHWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
bool test_conv2d_nhwc_instances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
{
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 2;
params.filter_spatial_lengths = std::vector<ck::index_t>{3, 3};
params.input_spatial_lengths = std::vector<ck::index_t>{71, 71};
......@@ -73,54 +74,55 @@ bool TestConv2DNHWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
params.input_left_pads = std::vector<ck::index_t>{1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1};
auto host_tensors = test::conv::GetHostTensors<T,
T,
T,
ck::tensor_layout::convolution::NHWC,
ck::tensor_layout::convolution::KYXC,
ck::tensor_layout::convolution::NHWK>(params);
auto host_tensors =
ck::utils::conv::get_host_tensors<T,
T,
T,
ck::tensor_layout::convolution::NHWC,
ck::tensor_layout::convolution::KYXC,
ck::tensor_layout::convolution::NHWK>(params);
const Tensor<T>& input = std::get<0>(host_tensors);
const Tensor<T>& weights = std::get<1>(host_tensors);
Tensor<T>& host_output = std::get<2>(host_tensors);
Tensor<T>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<2>(params, input, weights, host_output);
return test::conv::RunConvInstances<2>(
ck::utils::conv::run_reference_convolution_forward<2>(params, input, weights, host_output);
return ck::utils::conv::run_convolution_forward_instances<2>(
params, conv_ptrs, input, weights, device_output, host_output);
}
bool TestConv2DNHWCBF16Instances()
bool test_conv2d_nhwc_bf16_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_bf16_instances(conv_ptrs);
return TestConv2DNHWCInstances<ck::bhalf_t>(conv_ptrs);
return test_conv2d_nhwc_instances<ck::bhalf_t>(conv_ptrs);
}
bool TestConv2DNHWCF16Instances()
bool test_conv2d_nhwc_f16_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_f16_instances(conv_ptrs);
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk_f16_instances(conv_ptrs);
return TestConv2DNHWCInstances<ck::half_t>(conv_ptrs);
return test_conv2d_nhwc_instances<ck::half_t>(conv_ptrs);
}
bool TestConv2DNHWCF32Instances()
bool test_conv2d_nhwc_f32_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_f32_instances(conv_ptrs);
return TestConv2DNHWCInstances<float>(conv_ptrs);
return test_conv2d_nhwc_instances<float>(conv_ptrs);
}
bool TestConv2DNHWCInt8Instances()
bool test_conv2d_nhwc_int8_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_int8_instances(conv_ptrs);
return TestConv2DNHWCInstances<int8_t>(conv_ptrs);
return test_conv2d_nhwc_instances<int8_t>(conv_ptrs);
}
} // anonymous namespace
......@@ -128,19 +130,20 @@ bool TestConv2DNHWCInt8Instances()
int main()
{
bool res{true};
res = TestConv2DNHWC();
std::cout << "TestConv2DNHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv2d_nhwc();
std::cout << "test_conv2d_nhwc ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv2DNHWCBF16Instances();
std::cout << "\nTestConv2DNHWCBF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv2d_nhwc_bf16_instances();
std::cout << "\ntest_conv2d_nhwc_bf16_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv2DNHWCF16Instances();
std::cout << "\nTestConv2DNHWCF16Instances ....." << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv2DNHWCF32Instances();
std::cout << "\nTestConv2DNHWCF32Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv2d_nhwc_f16_instances();
std::cout << "\ntest_conv2d_nhwc_f16_instances ....." << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv2DNHWCInt8Instances();
std::cout << "\nTestConv2DNHWCInt8Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv2d_nhwc_f32_instances();
std::cout << "\ntest_conv2d_nhwc_f32_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = test_conv2d_nhwc_int8_instances();
std::cout << "\ntest_conv2d_nhwc_int8_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
return res ? 0 : 1;
......
......@@ -6,10 +6,11 @@
#include "data_type.hpp"
#include "element_wise_operation.hpp"
#include "conv_test_util.hpp"
#include "conv_fwd_util.hpp"
#include "conv_util.hpp"
#include "host_tensor.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
#include "check_err.hpp"
// Forward declarations for conv instances.
using DeviceConvFwdNoOpPtr =
......@@ -34,10 +35,10 @@ void add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_int8_instances(std::vector<Devi
namespace {
bool TestConv3DNDHWC()
bool test_conv3d_ndhwc()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
......@@ -49,30 +50,31 @@ bool TestConv3DNDHWC()
params.input_left_pads = std::vector<ck::index_t>{1, 1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors = test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params);
auto host_tensors =
ck::utils::conv::get_host_tensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& host_output = std::get<2>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<3>(params, input, weights, host_output);
ck::utils::conv::run_reference_convolution_forward<3>(params, input, weights, host_output);
test::conv::RunConv<3>(params, input, weights, device_output);
res = res &&
test::check_err(
ck::utils::check_err(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
return res;
}
bool TestConv3DNDHWC2GBInput()
bool test_conv3d_ndhwc_2gb_input()
{
// >2GB Input
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
......@@ -85,12 +87,12 @@ bool TestConv3DNDHWC2GBInput()
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors =
test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
ck::utils::conv::get_host_tensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
......@@ -113,10 +115,10 @@ bool TestConv3DNDHWC2GBInput()
return false;
}
bool TestConv3DNDHWC2GBFilters()
bool test_conv3d_ndhwc_2gb_filters()
{
// >2GB Filters
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
......@@ -129,12 +131,12 @@ bool TestConv3DNDHWC2GBFilters()
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors =
test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
ck::utils::conv::get_host_tensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
......@@ -157,10 +159,10 @@ bool TestConv3DNDHWC2GBFilters()
return false;
}
bool TestConv3DNDHWC2GBOutput()
bool test_conv3d_ndhwc_2gb_output()
{
// >2GB Output
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
......@@ -173,12 +175,12 @@ bool TestConv3DNDHWC2GBOutput()
params.input_right_pads = std::vector<ck::index_t>{2, 2, 2};
auto host_tensors =
test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
ck::utils::conv::get_host_tensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
......@@ -202,9 +204,9 @@ bool TestConv3DNDHWC2GBOutput()
}
template <typename T>
bool TestConv3DNDHWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
bool test_conv3d_ndhwc_instances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
{
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.N = 64;
params.num_dim_spatial = 3;
params.filter_spatial_lengths = std::vector<ck::index_t>{3, 3, 2};
......@@ -214,52 +216,53 @@ bool TestConv3DNDHWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs
params.input_left_pads = std::vector<ck::index_t>{1, 1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors = test::conv::GetHostTensors<T,
T,
T,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params);
auto host_tensors =
ck::utils::conv::get_host_tensors<T,
T,
T,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params);
const Tensor<T>& input = std::get<0>(host_tensors);
const Tensor<T>& weights = std::get<1>(host_tensors);
Tensor<T>& host_output = std::get<2>(host_tensors);
Tensor<T>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<3>(params, input, weights, host_output);
return test::conv::RunConvInstances<3>(
ck::utils::conv::run_reference_convolution_forward<3>(params, input, weights, host_output);
return ck::utils::conv::run_convolution_forward_instances<3>(
params, conv_ptrs, input, weights, device_output, host_output);
}
bool TestConv3DNDHWCBF16Instances()
bool test_conv3d_ndhwc_bf16_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_bf16_instances(conv_ptrs);
return TestConv3DNDHWCInstances<ck::bhalf_t>(conv_ptrs);
return test_conv3d_ndhwc_instances<ck::bhalf_t>(conv_ptrs);
}
bool TestConv3DNDHWCF16Instances()
bool test_conv3d_ndhwc_f16_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_f16_instances(conv_ptrs);
return TestConv3DNDHWCInstances<ck::half_t>(conv_ptrs);
return test_conv3d_ndhwc_instances<ck::half_t>(conv_ptrs);
}
bool TestConv3DNDHWCF32Instances()
bool test_conv3d_ndhwc_f32_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_f32_instances(conv_ptrs);
return TestConv3DNDHWCInstances<float>(conv_ptrs);
return test_conv3d_ndhwc_instances<float>(conv_ptrs);
}
bool TestConv3DNDHWCInt8Instances()
bool test_conv3d_ndhwc_int8_instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_int8_instances(conv_ptrs);
return TestConv3DNDHWCInstances<int8_t>(conv_ptrs);
return test_conv3d_ndhwc_instances<int8_t>(conv_ptrs);
}
} // anonymous namespace
......@@ -267,27 +270,30 @@ bool TestConv3DNDHWCInt8Instances()
int main()
{
bool res{true};
res = TestConv3DNDHWC();
std::cout << "TestConv3DNDHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv3d_ndhwc();
std::cout << "test_conv3d_ndhwc ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNDHWC2GBInput();
std::cout << "\nTestConv3DNDHWC2GBInput ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNDHWC2GBFilters();
std::cout << "\nTestConv3DNDHWC2GBFilters ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNDHWC2GBOutput();
std::cout << "\nTestConv3DNDHWC2GBOutput ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv3d_ndhwc_2gb_input();
std::cout << "\ntest_conv3d_ndhwc_2gb_input ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = test_conv3d_ndhwc_2gb_filters();
std::cout << "\ntest_conv3d_ndhwc_2gb_filters ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = test_conv3d_ndhwc_2gb_output();
std::cout << "\ntest_conv3d_ndhwc_2gb_output ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv3DNDHWCBF16Instances();
std::cout << "\nTestConv3DNDHWCBF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv3d_ndhwc_bf16_instances();
std::cout << "\ntest_conv3d_ndhwc_bf16_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv3DNDHWCF16Instances();
std::cout << "\nTestConv3DNDHWCF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv3d_ndhwc_f16_instances();
std::cout << "\ntest_conv3d_ndhwc_f16_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv3DNDHWCF32Instances();
std::cout << "\nTestConv3DNDHWCF32Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv3d_ndhwc_f32_instances();
std::cout << "\ntest_conv3d_ndhwc_f32_instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv3DNDHWCInt8Instances();
std::cout << "\nTestConv3DNDHWCInt8Instances ..... " << (res ? "SUCCESS" : "FAILURE")
res = test_conv3d_ndhwc_int8_instances();
std::cout << "\ntest_conv3d_ndhw_cint_8instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
return res ? 0 : 1;
......
#ifndef TEST_CONV_UTIL_HPP
#define TEST_CONV_UTIL_HPP
#include <tuple>
#include "config.hpp"
#include "conv_fwd_util.hpp"
#include "device_convnd_fwd_xdl_nhwc_kyxc_nhwk.hpp"
#include "element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "sequence.hpp"
namespace {
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
static constexpr auto ConvFwdDefault =
ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
template <ck::index_t SpatialDims, typename InDataType, typename WeiDataType, typename OutDataType>
using DeviceConvNDFwdInstance = ck::tensor_operation::device::
DeviceConvNDFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K<
// clang-format off
InDataType, //
WeiDataType, //
OutDataType, //
InDataType, //
InElementOp, // Input Elementwise Operation
WeiElementOp, // Weights Elementwise Operation
OutElementOp, // Output Elementwise Operation
ConvFwdDefault, // ConvForwardSpecialization
SpatialDims, // SptialDims
64, // BlockSize
16, // MPerBlock
16, // NPerBlock
4, // K0PerBlock
1, // K1
16, // MPerXDL
16, // NPerXDL
1, // MXdlPerWave
1, // NXdlPerWave
S<1, 16, 1>, // ABlockTransferThreadClusterLengths_K0_M_K1
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
S<1, 0, 2>, // ABlockTransferSrcAccessOrder
2, // ABlockTransferSrcVectorDim
1, // ABlockTransferSrcScalarPerVector
1, // ABlockTransferDstScalarPerVector_K1
true, // ABlockLdsAddExtraM
S<1, 16, 1>, // BBlockTransferThreadClusterLengths_K0_N_K1
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
S<1, 0, 2>, // BBlockTransferSrcAccessOrder
2, // BBlockTransferSrcVectorDim
1, // BBlockTransferSrcScalarPerVector
1, // BBlockTransferDstScalarPerVector_K1
true, // BBlockTransferAddExtraN
7, // CThreadTransferSrcDstVectorDim
1>; // CThreadTransferDstScalarPerVector
// clang-format on
} // namespace
namespace test {
namespace conv {
template <ck::index_t NDim,
typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float>
void RunConv(const ck::utils::conv::ConvParams& params,
const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output)
{
ck::utils::conv::run_convolution_forward<NDim,
InDataType,
WeiDataType,
OutDataType,
DeviceConvNDFwdInstance>(
params, input, weights, output);
}
} // namespace conv
} // namespace test
#endif
......@@ -19,7 +19,6 @@
#include "element_wise_operation.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
#include "test_util.hpp"
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
......
......@@ -19,7 +19,6 @@
#include "element_wise_operation.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
#include "test_util.hpp"
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
......
......@@ -19,7 +19,6 @@
#include "element_wise_operation.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
#include "test_util.hpp"
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
......
#ifndef GEMM_UTILS_HPP
#define GEMM_UTILS_HPP
#include "check_err.hpp"
#include "config.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "reference_gemm.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace ck {
namespace gemm_util {
......@@ -202,20 +202,17 @@ struct TestGemm
bool res = false;
if(std::is_same<CDataType, float>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
res = ck::utils::check_err(c_device.mData, c_host.mData);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
else if(std::is_same<CDataType, ck::half_t>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
res = ck::utils::check_err(c_device.mData, c_host.mData);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
else if(std::is_same<CDataType, int8_t>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
res = ck::utils::check_err(c_device.mData, c_host.mData);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
......@@ -330,9 +327,8 @@ struct TestGemmBF16
bf16_to_f32_(c_device_bf16, c_device_fp32);
// Assert
bool res = test::check_err(
bool res = ck::utils::check_err(
c_device_fp32.mData, c_host_fp32.mData, "Error: incorrect results!", 1e-2f, 1e-3f);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res;
......
......@@ -4,6 +4,8 @@
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "check_err.hpp"
#include "config.hpp"
#include "print.hpp"
#include "device.hpp"
......@@ -15,7 +17,6 @@
#include "element_wise_operation.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
#include "test_util.hpp"
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
......@@ -46,24 +47,6 @@ using ALayout = ck::tensor_layout::gemm::RowMajor;
using BLayout = ck::tensor_layout::gemm::ColumnMajor;
using CLayout = ck::tensor_layout::gemm::RowMajor;
template <typename T>
static bool check_err(const Tensor<T>& ref, const Tensor<T>& result)
{
float max_diff = 1e-2;
for(int i = 0; i < ref.mData.size(); ++i)
{
float diff = std::abs(double(ref.mData[i]) - double(result.mData[i]));
if(max_diff < diff)
{
std::cout << double(ref.mData[i]) << "," << double(result.mData[i]) << std::endl;
return false;
}
}
return true;
}
bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
{
int group_count = rand() % 10 + 1;
......@@ -188,7 +171,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
ref_invoker.Run(ref_argument);
bool res = check_err(c_device_tensors[i], c_host_tensors[i]);
bool res = ck::utils::check_err(c_host_tensors[i].mData, c_device_tensors[i].mData);
std::cout << "group_id: " << i << (res ? " SUCCESS" : " FAILURE") << std::endl;
......
#ifndef TEST_CONV_UTIL_HPP
#define TEST_CONV_UTIL_HPP
#include <algorithm>
#include <cstdlib>
#include <numeric>
#include <random>
#include <stdexcept>
#include <tuple>
#include <type_traits>
#include <vector>
#include "config.hpp"
#include "conv_utils.hpp"
#include "device.hpp"
#include "device_tensor.hpp"
#include "device_convnd_fwd_xdl_nhwc_kyxc_nhwk.hpp"
#include "element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "reference_conv_fwd.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace {
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
static constexpr auto ConvFwdDefault =
ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
template <ck::index_t SpatialDims, typename InDataType, typename WeiDataType, typename OutDataType>
using DeviceConvNDFwdInstance = ck::tensor_operation::device::
DeviceConvNDFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K<
// clang-format off
InDataType, //
WeiDataType, //
OutDataType, //
InDataType, //
InElementOp, // Input Elementwise Operation
WeiElementOp, // Weights Elementwise Operation
OutElementOp, // Output Elementwise Operation
ConvFwdDefault, // ConvForwardSpecialization
SpatialDims, // SptialDims
64, // BlockSize
16, // MPerBlock
16, // NPerBlock
4, // K0PerBlock
1, // K1
16, // MPerXDL
16, // NPerXDL
1, // MXdlPerWave
1, // NXdlPerWave
S<1, 16, 1>, // ABlockTransferThreadClusterLengths_K0_M_K1
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
S<1, 0, 2>, // ABlockTransferSrcAccessOrder
2, // ABlockTransferSrcVectorDim
1, // ABlockTransferSrcScalarPerVector
1, // ABlockTransferDstScalarPerVector_K1
true, // ABlockLdsAddExtraM
S<1, 16, 1>, // BBlockTransferThreadClusterLengths_K0_N_K1
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
S<1, 0, 2>, // BBlockTransferSrcAccessOrder
2, // BBlockTransferSrcVectorDim
1, // BBlockTransferSrcScalarPerVector
1, // BBlockTransferDstScalarPerVector_K1
true, // BBlockTransferAddExtraN
7, // CThreadTransferSrcDstVectorDim
1>; // CThreadTransferDstScalarPerVector
// clang-format on
} // namespace
namespace test {
namespace conv {
using DeviceConvFwdNoOpPtr =
ck::tensor_operation::device::DeviceConvFwdPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
template <typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float,
typename InLayout = ck::tensor_layout::convolution::NHWC,
typename WeiLayout = ck::tensor_layout::convolution::KYXC,
typename OutLayout = ck::tensor_layout::convolution::NHWK>
auto GetHostTensors(const ck::conv_util::ConvParams& params, bool init = true)
{
std::vector<std::size_t> input_dims{static_cast<std::size_t>(params.N),
static_cast<std::size_t>(params.C)};
input_dims.insert(std::end(input_dims),
std::begin(params.input_spatial_lengths),
std::end(params.input_spatial_lengths));
std::vector<std::size_t> filter_dims{static_cast<std::size_t>(params.K),
static_cast<std::size_t>(params.C)};
filter_dims.insert(std::end(filter_dims),
std::begin(params.filter_spatial_lengths),
std::end(params.filter_spatial_lengths));
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
std::vector<std::size_t> output_dims{static_cast<std::size_t>(params.N),
static_cast<std::size_t>(params.K)};
output_dims.insert(std::end(output_dims),
std::begin(output_spatial_lengths),
std::end(output_spatial_lengths));
Tensor<InDataType> input(ck::conv_util::GetHostTensorDescriptor(input_dims, InLayout{}));
Tensor<WeiDataType> weights(ck::conv_util::GetHostTensorDescriptor(filter_dims, WeiLayout{}));
Tensor<OutDataType> host_output(
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
Tensor<OutDataType> device_output(
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
if(init)
{
std::mt19937 gen(11939);
if constexpr(std::is_same<InDataType, uint8_t>::value)
{
std::uniform_int_distribution<> dis(-5, 5);
std::generate(
input.begin(), input.end(), [&dis, &gen]() { return InDataType(dis(gen)); });
std::generate(
weights.begin(), weights.end(), [&dis, &gen]() { return WeiDataType(dis(gen)); });
}
else
{
std::uniform_real_distribution<> dis(0.f, 1.f);
std::generate(
input.begin(), input.end(), [&dis, &gen]() { return InDataType(dis(gen)); });
std::generate(
weights.begin(), weights.end(), [&dis, &gen]() { return WeiDataType(dis(gen)); });
}
std::fill(host_output.begin(), host_output.end(), OutDataType(0.f));
std::fill(device_output.begin(), device_output.end(), OutDataType(0.f));
}
return std::make_tuple(input, weights, host_output, device_output);
}
template <ck::index_t NDim,
typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float>
void RunReferenceConv(const ck::conv_util::ConvParams& params,
const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output)
{
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<InDataType,
WeiDataType,
OutDataType,
InElementOp,
WeiElementOp,
OutElementOp,
NDim>();
auto ref_invoker = ref_conv.MakeInvoker();
auto ref_argument = ref_conv.MakeArgument(input,
weights,
output,
params.conv_filter_strides,
params.conv_filter_dilations,
params.input_left_pads,
params.input_right_pads,
InElementOp{},
WeiElementOp{},
OutElementOp{});
ref_invoker.Run(ref_argument);
}
template <ck::index_t NDim,
typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float>
void RunConv(const ck::conv_util::ConvParams& params,
const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output)
{
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace());
DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace());
DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace());
in_device_buf.ToDevice(input.mData.data());
wei_device_buf.ToDevice(weights.mData.data());
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
auto conv = DeviceConvNDFwdInstance<NDim, InDataType, WeiDataType, OutDataType>();
auto invoker = conv.MakeInvoker();
auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
params.N,
params.K,
params.C,
params.input_spatial_lengths,
params.filter_spatial_lengths,
output_spatial_lengths,
params.conv_filter_strides,
params.conv_filter_dilations,
params.input_left_pads,
params.input_right_pads,
InElementOp{},
WeiElementOp{},
OutElementOp{});
if(!conv.IsSupportedArgument(argument))
{
throw std::runtime_error(
"Error! device_conv with the specified compilation parameters does "
"not support this Conv problem");
}
invoker.Run(argument);
out_device_buf.FromDevice(output.mData.data());
}
template <ck::index_t NDim,
typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float>
bool RunConvInstances(const ck::conv_util::ConvParams& params,
const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs,
const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output,
const Tensor<OutDataType>& host_output)
{
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace());
DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace());
DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace());
in_device_buf.ToDevice(input.mData.data());
wei_device_buf.ToDevice(weights.mData.data());
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
bool res{true};
for(auto& conv_ptr : conv_ptrs)
{
auto invoker = conv_ptr->MakeInvokerPointer();
auto argument = conv_ptr->MakeArgumentPointer(
static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
params.N,
params.K,
params.C,
params.input_spatial_lengths,
params.filter_spatial_lengths,
output_spatial_lengths,
params.conv_filter_strides,
params.conv_filter_dilations,
params.input_left_pads,
params.input_right_pads,
InElementOp{},
WeiElementOp{},
OutElementOp{});
if(conv_ptr->IsSupportedArgument(argument.get()))
{
float atol{1e-5f};
float rtol{1e-4f};
if constexpr(std::is_same_v<InDataType, ck::half_t>)
{
atol = 1e-4f;
rtol = 2.5e-3f;
}
invoker->Run(argument.get());
out_device_buf.FromDevice(output.mData.data());
res = res &&
test::check_err(
output.mData, host_output.mData, "Error: incorrect results!", atol, rtol);
hipGetErrorString(
hipMemset(out_device_buf.GetDeviceBuffer(), 0, out_device_buf.mMemSize));
}
}
return res;
}
} // namespace conv
} // namespace test
#endif
......@@ -4,6 +4,8 @@
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "check_err.hpp"
#include "config.hpp"
#include "magic_division.hpp"
#include "device.hpp"
......@@ -54,29 +56,6 @@ __host__ void cpu_magic_number_division(uint32_t magic_multiplier,
}
}
template <typename T>
T check_error(const std::vector<T>& ref, const std::vector<T>& result)
{
T error = 0;
T max_diff = 0;
T ref_value = 0, result_value = 0;
for(std::size_t i = 0; i < ref.size(); ++i)
{
T diff = std::abs(ref[i] - result[i]);
error += diff;
if(max_diff < diff)
{
max_diff = diff;
ref_value = ref[i];
result_value = result[i];
}
}
return max_diff;
}
int main(int, char*[])
{
uint64_t num_divisor = 4096;
......@@ -135,9 +114,9 @@ int main(int, char*[])
naive_result_dev_buf.FromDevice(naive_result_host.data());
magic_result_dev_buf.FromDevice(magic_result_host.data());
int32_t max_diff = check_error(naive_result_host, magic_result_host);
bool res = ck::utils::check_err(magic_result_host, naive_result_host);
if(max_diff != 0)
if(!res)
{
pass = false;
continue;
......@@ -149,9 +128,9 @@ int main(int, char*[])
magic_result_host2.data(),
num_dividend);
max_diff = check_error(naive_result_host, magic_result_host2);
res = ck::utils::check_err(magic_result_host2, naive_result_host);
if(max_diff != 0)
if(!res)
{
pass = false;
continue;
......
#include "getopt.h"
#include "check_err.hpp"
#include "device_reduce_instance.hpp"
#include "reduction_enums.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "test_util.hpp"
#include "reduce_util.hpp"
using namespace ck;
......@@ -289,13 +290,13 @@ bool test_reduce_no_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(!single_result)
......@@ -376,13 +377,13 @@ bool test_reduce_no_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(!single_result)
......
......@@ -4,7 +4,7 @@
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "test_util.hpp"
#include "check_err.hpp"
#include "reduce_util.hpp"
using namespace ck;
......@@ -273,21 +273,21 @@ bool test_reduce_with_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(NeedIndices)
{
out_indices_dev.FromDevice(out_indices.mData.data());
single_result = single_result && test::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
single_result = single_result && ck::utils::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
};
if(!single_result)
......@@ -370,21 +370,22 @@ bool test_reduce_with_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(NeedIndices)
{
out_indices_dev.FromDevice(out_indices.mData.data());
single_result = single_result && test::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
single_result =
single_result && ck::utils::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
};
if(!single_result)
......
......@@ -6,13 +6,13 @@
#include <type_traits>
#include <vector>
#include "check_err.hpp"
#include "config.hpp"
#include "conv_utils.hpp"
#include "conv_fwd_util.hpp"
#include "element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "reference_conv_fwd.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace {
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
......@@ -57,9 +57,10 @@ template <ck::index_t NDim,
typename OutLayout = ck::tensor_layout::convolution::NHWK,
typename FillInputOp = FillMonotonicSeq<InDataType>,
typename FillWeightsOp = FillConstant<WeiDataType>>
Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
const FillInputOp& fill_input_op = FillInputOp{},
const FillWeightsOp& fill_weights_op = FillWeightsOp{0.5f})
Tensor<OutDataType>
run_reference_convolution_forward(const ck::utils::conv::ConvParams& params,
const FillInputOp& fill_input_op = FillInputOp{},
const FillWeightsOp& fill_weights_op = FillWeightsOp{0.5f})
{
std::vector<std::size_t> input_dims{static_cast<std::size_t>(params.N),
static_cast<std::size_t>(params.C)};
......@@ -80,18 +81,16 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
std::begin(output_spatial_lengths),
std::end(output_spatial_lengths));
Tensor<InDataType> input(ck::conv_util::GetHostTensorDescriptor(input_dims, InLayout{}));
Tensor<WeiDataType> weights(ck::conv_util::GetHostTensorDescriptor(filter_dims, WeiLayout{}));
Tensor<InDataType> input(ck::utils::conv::get_host_tensor_descriptor(input_dims, InLayout{}));
Tensor<WeiDataType> weights(
ck::utils::conv::get_host_tensor_descriptor(filter_dims, WeiLayout{}));
Tensor<OutDataType> host_output(
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
ck::utils::conv::get_host_tensor_descriptor(output_dims, OutLayout{}));
fill_input_op(input.begin(), input.end());
fill_weights_op(weights.begin(), weights.end());
std::fill(host_output.begin(), host_output.end(), OutDataType(0.f));
// std::cout <<"input: " << input.mDesc << std::endl << input.mData << std::endl;
// std::cout <<"weight: " << weights.mDesc << std::endl << weights.mData << std::endl;
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<InDataType,
WeiDataType,
OutDataType,
......@@ -116,10 +115,10 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
return host_output;
}
bool TestConv2DNHWC()
bool test_conv2d_nhwc()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.N = 1;
params.K = 1;
params.C = 2;
......@@ -130,7 +129,7 @@ bool TestConv2DNHWC()
params.input_left_pads = std::vector<ck::index_t>{0, 0};
params.input_right_pads = std::vector<ck::index_t>{0, 0};
auto out_tensor = RunReferenceConv<2>(params);
auto out_tensor = run_reference_convolution_forward<2>(params);
std::vector<std::size_t> ref_dims{1, 1, 4, 4};
std::vector<float> ref_data{130.5,
148.5,
......@@ -148,10 +147,10 @@ bool TestConv2DNHWC()
472.5,
490.5,
508.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
params.N = 1;
params.K = 2;
......@@ -163,7 +162,7 @@ bool TestConv2DNHWC()
params.input_left_pads = std::vector<ck::index_t>{1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1};
out_tensor = RunReferenceConv<2>(params);
out_tensor = run_reference_convolution_forward<2>(params);
ref_dims = std::vector<std::size_t>{1, 2, 5, 5};
ref_data = std::vector<float>{
210., 210., 327., 327., 351., 351., 375., 375., 399., 399.,
......@@ -171,18 +170,18 @@ bool TestConv2DNHWC()
747., 747., 1138.5, 1138.5, 1174.5, 1174.5, 1210.5, 1210.5, 1246.5, 1246.5,
1035., 1035., 1570.5, 1570.5, 1606.5, 1606.5, 1642.5, 1642.5, 1678.5, 1678.5,
1323., 1323., 2002.5, 2002.5, 2038.5, 2038.5, 2074.5, 2074.5, 2110.5, 2110.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
return res;
}
bool TestConv1DNWC()
bool test_conv1d_nwc()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 1;
params.N = 1;
params.K = 1;
......@@ -194,19 +193,20 @@ bool TestConv1DNWC()
params.input_left_pads = std::vector<ck::index_t>{0};
params.input_right_pads = std::vector<ck::index_t>{0};
auto out_tensor = RunReferenceConv<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
auto out_tensor =
run_reference_convolution_forward<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
std::vector<std::size_t> ref_dims{1, 1, 4};
std::vector<float> ref_data{7.5, 13.5, 19.5, 25.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
params.num_dim_spatial = 1;
params.N = 1;
......@@ -219,19 +219,19 @@ bool TestConv1DNWC()
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
out_tensor = RunReferenceConv<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
out_tensor = run_reference_convolution_forward<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
ref_dims = std::vector<std::size_t>{1, 2, 5};
ref_data = std::vector<float>{9., 9., 19.5, 19.5, 31.5, 31.5, 43.5, 43.5, 55.5, 55.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
params.num_dim_spatial = 1;
params.N = 2;
......@@ -244,13 +244,13 @@ bool TestConv1DNWC()
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
auto out_tensor2 = RunReferenceConv<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(
auto out_tensor2 = run_reference_convolution_forward<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(
params, FillMonotonicSeq<float>{0.f, 0.1f});
ref_dims = std::vector<std::size_t>{2, 16, 16};
......@@ -319,18 +319,18 @@ bool TestConv1DNWC()
72.9, 72.9, 72.9, 72.9, 72.9, 72.9, 72.9, 72.9,
49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4,
49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4};
res = res && test::check_err(out_tensor2.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor2.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor2.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor2.mData, ref_data, "Error: incorrect results!");
return res;
}
bool TestConv3DNCDHW()
bool test_conv3d_ncdhw()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 3;
params.N = 1;
params.K = 1;
......@@ -342,13 +342,13 @@ bool TestConv3DNCDHW()
params.input_left_pads = std::vector<ck::index_t>{0, 0, 0};
params.input_right_pads = std::vector<ck::index_t>{0, 0, 0};
auto out_tensor = RunReferenceConv<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
auto out_tensor = run_reference_convolution_forward<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
params, FillMonotonicSeq<float>{0.f, 0.1f});
std::vector<std::size_t> ref_dims{1, 1, 4, 4, 4};
std::vector<float> ref_data{
......@@ -360,10 +360,11 @@ bool TestConv3DNCDHW()
634.5, 637.2, 639.9, 642.60004, 650.7, 653.4, 656.10004, 658.8,
699.3, 702., 704.7, 707.4, 715.5, 718.2, 720.9, 723.60004,
731.7, 734.4001, 737.10004, 739.8, 747.9001, 750.60004, 753.3, 756.};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 1]: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error [case 1]: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 1]: wrong output tensor dimensions!");
res = res &&
ck::utils::check_err(out_tensor.mData, ref_data, "Error [case 1]: incorrect results!");
params.N = 1;
params.K = 2;
......@@ -375,13 +376,13 @@ bool TestConv3DNCDHW()
params.input_left_pads = std::vector<ck::index_t>{0, 0, 0};
params.input_right_pads = std::vector<ck::index_t>{0, 0, 0};
out_tensor = RunReferenceConv<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
out_tensor = run_reference_convolution_forward<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
params, FillMonotonicSeq<float>{0.f, 0.1f});
ref_dims = std::vector<std::size_t>{1, 2, 4, 4, 4};
ref_data = std::vector<float>{
......@@ -401,11 +402,11 @@ bool TestConv3DNCDHW()
5283.9004, 5292., 5300.0996, 5308.2, 5381.0996, 5389.2, 5397.3, 5405.4004,
6255.9004, 6264.0005, 6272.1, 6280.2, 6353.1, 6361.2, 6369.301, 6377.4,
6450.301, 6458.4, 6466.5, 6474.6, 6547.5, 6555.6, 6563.699, 6571.801};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 2]: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 2]: wrong output tensor dimensions!");
res =
res && test::check_err(
res && ck::utils::check_err(
out_tensor.mData, ref_data, "Error [case 2]: incorrect results!", 1e-4f, 1e-6f);
return res;
......@@ -416,11 +417,11 @@ bool TestConv3DNCDHW()
int main(void)
{
bool res{true};
res = TestConv2DNHWC();
std::cout << "TestConv2DNHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWC();
res = test_conv2d_nhwc();
std::cout << "test_conv2d_nhwc ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv1d_nwc();
std::cout << "TestConv1DNHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNCDHW();
std::cout << "TestConv3DNCDHW ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv3d_ncdhw();
std::cout << "test_conv3d_ncdhw ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res ? 0 : 1;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment