Commit e72c0c43 authored by carlushuang's avatar carlushuang
Browse files

Merge remote-tracking branch 'origin/develop' into cpu_avx2

parents d714fa15 313bbea5
#include <iostream>
#include <stdexcept>
#include <tuple>
#include <vector>
#include "data_type.hpp"
#include "element_wise_operation.hpp"
#include "conv_test_util.hpp"
#include "host_tensor.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
// Forward declarations for conv instances.
using DeviceConvFwdNoOpPtr =
ck::tensor_operation::device::DeviceConvFwdPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
namespace ck {
namespace tensor_operation {
namespace device {
namespace device_conv1d_fwd_instance {
void add_device_conv1d_fwd_xdl_nwc_kxc_nwk_bf16_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv1d_fwd_xdl_nwc_kxc_nwk_f16_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv1d_fwd_xdl_nwc_kxc_nwk_f32_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv1d_fwd_xdl_nwc_kxc_nwk_int8_instances(std::vector<DeviceConvFwdNoOpPtr>&);
} // namespace device_conv1d_fwd_instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
namespace {
bool TestConv1DNWC()
{
bool res{true};
ck::conv_util::ConvParams params;
params.num_dim_spatial = 1;
params.N = 2;
params.K = 16;
params.C = 4;
params.filter_spatial_lengths = std::vector<ck::index_t>{3};
params.input_spatial_lengths = std::vector<ck::index_t>{16};
params.conv_filter_strides = std::vector<ck::index_t>{1};
params.conv_filter_dilations = std::vector<ck::index_t>{1};
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
auto host_tensors = test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& host_output = std::get<2>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<1>(params, input, weights, host_output);
test::conv::RunConv<1>(params, input, weights, device_output);
res = res &&
test::check_err(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
return res;
}
template <typename T>
bool TestConv1DNWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
{
ck::conv_util::ConvParams params;
params.num_dim_spatial = 1;
params.filter_spatial_lengths = std::vector<ck::index_t>{3};
params.input_spatial_lengths = std::vector<ck::index_t>{71};
params.conv_filter_strides = std::vector<ck::index_t>{2};
params.conv_filter_dilations = std::vector<ck::index_t>{1};
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
auto host_tensors = test::conv::GetHostTensors<T,
T,
T,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
const Tensor<T>& input = std::get<0>(host_tensors);
const Tensor<T>& weights = std::get<1>(host_tensors);
Tensor<T>& host_output = std::get<2>(host_tensors);
Tensor<T>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<1>(params, input, weights, host_output);
return test::conv::RunConvInstances<1>(
params, conv_ptrs, input, weights, device_output, host_output);
}
bool TestConv1DNWCBF16Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_bf16_instances(conv_ptrs);
return TestConv1DNWCInstances<ck::bhalf_t>(conv_ptrs);
}
bool TestConv1DNWCF16Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_f16_instances(conv_ptrs);
return TestConv1DNWCInstances<ck::half_t>(conv_ptrs);
}
bool TestConv1DNWCF32Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_f32_instances(conv_ptrs);
return TestConv1DNWCInstances<float>(conv_ptrs);
}
bool TestConv1DNWCInt8Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv1d_fwd_instance::
add_device_conv1d_fwd_xdl_nwc_kxc_nwk_int8_instances(conv_ptrs);
return TestConv1DNWCInstances<int8_t>(conv_ptrs);
}
} // anonymous namespace
int main()
{
bool res{true};
res = TestConv1DNWC();
std::cout << "TestConv1DNWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWCBF16Instances();
std::cout << "\nTestConv1DNWCBF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv1DNWCF16Instances();
std::cout << "\nTestConv1DNWCF16Instances ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWCF32Instances();
std::cout << "\nTestConv1DNWCF32Instances ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWCInt8Instances();
std::cout << "\nTestConv1DNWCInt8Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
}
#include <half.hpp>
#include <iostream>
#include <stdexcept>
#include <tuple>
#include <vector>
#include "data_type.hpp"
#include "element_wise_operation.hpp"
#include "conv_test_util.hpp"
#include "host_tensor.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
// Forward declarations for conv instances.
using DeviceConvFwdNoOpPtr =
ck::tensor_operation::device::DeviceConvFwdPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
namespace ck {
namespace tensor_operation {
namespace device {
namespace device_conv2d_fwd_instance {
void add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_bf16_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_f16_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk_f16_instances(
std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_f32_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_int8_instances(std::vector<DeviceConvFwdNoOpPtr>&);
} // namespace device_conv2d_fwd_instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
namespace {
bool TestConv2DNHWC()
{
bool res{true};
ck::conv_util::ConvParams params;
params.N = 2;
params.K = 16;
params.C = 4;
params.input_spatial_lengths = std::vector<ck::index_t>{16, 16};
params.conv_filter_strides = std::vector<ck::index_t>{1, 1};
auto host_tensors = test::conv::GetHostTensors(params);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& host_output = std::get<2>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<2>(params, input, weights, host_output);
test::conv::RunConv<2>(params, input, weights, device_output);
res = res &&
test::check_err(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
return res;
}
template <typename T>
bool TestConv2DNHWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
{
ck::conv_util::ConvParams params;
params.num_dim_spatial = 2;
params.filter_spatial_lengths = std::vector<ck::index_t>{3, 3};
params.input_spatial_lengths = std::vector<ck::index_t>{71, 71};
params.conv_filter_strides = std::vector<ck::index_t>{2, 2};
params.conv_filter_dilations = std::vector<ck::index_t>{1, 1};
params.input_left_pads = std::vector<ck::index_t>{1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1};
auto host_tensors = test::conv::GetHostTensors<T,
T,
T,
ck::tensor_layout::convolution::NHWC,
ck::tensor_layout::convolution::KYXC,
ck::tensor_layout::convolution::NHWK>(params);
const Tensor<T>& input = std::get<0>(host_tensors);
const Tensor<T>& weights = std::get<1>(host_tensors);
Tensor<T>& host_output = std::get<2>(host_tensors);
Tensor<T>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<2>(params, input, weights, host_output);
return test::conv::RunConvInstances<2>(
params, conv_ptrs, input, weights, device_output, host_output);
}
bool TestConv2DNHWCBF16Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_bf16_instances(conv_ptrs);
return TestConv2DNHWCInstances<ck::bhalf_t>(conv_ptrs);
}
bool TestConv2DNHWCF16Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_f16_instances(conv_ptrs);
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk_f16_instances(conv_ptrs);
return TestConv2DNHWCInstances<ck::half_t>(conv_ptrs);
}
bool TestConv2DNHWCF32Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_f32_instances(conv_ptrs);
return TestConv2DNHWCInstances<float>(conv_ptrs);
}
bool TestConv2DNHWCInt8Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv2d_fwd_instance::
add_device_conv2d_fwd_xdl_nhwc_kyxc_nhwk_int8_instances(conv_ptrs);
return TestConv2DNHWCInstances<int8_t>(conv_ptrs);
}
} // anonymous namespace
int main()
{
bool res{true};
res = TestConv2DNHWC();
std::cout << "TestConv2DNHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv2DNHWCBF16Instances();
std::cout << "\nTestConv2DNHWCBF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv2DNHWCF16Instances();
std::cout << "\nTestConv2DNHWCF16Instances ....." << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv2DNHWCF32Instances();
std::cout << "\nTestConv2DNHWCF32Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv2DNHWCInt8Instances();
std::cout << "\nTestConv2DNHWCInt8Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
return 0;
}
#include <half.hpp>
#include <iostream>
#include <stdexcept>
#include <tuple>
#include <vector>
#include "data_type.hpp"
#include "element_wise_operation.hpp"
#include "conv_test_util.hpp"
#include "host_tensor.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
// Forward declarations for conv instances.
using DeviceConvFwdNoOpPtr =
ck::tensor_operation::device::DeviceConvFwdPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
namespace ck {
namespace tensor_operation {
namespace device {
namespace device_conv3d_fwd_instance {
void add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_bf16_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_f16_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_f32_instances(std::vector<DeviceConvFwdNoOpPtr>&);
void add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_int8_instances(std::vector<DeviceConvFwdNoOpPtr>&);
} // namespace device_conv3d_fwd_instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
namespace {
bool TestConv3DNDHWC()
{
bool res{true};
ck::conv_util::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
params.C = 4;
params.filter_spatial_lengths = std::vector<ck::index_t>{3, 3, 3};
params.input_spatial_lengths = std::vector<ck::index_t>{16, 16, 16};
params.conv_filter_strides = std::vector<ck::index_t>{1, 1, 1};
params.conv_filter_dilations = std::vector<ck::index_t>{1, 1, 1};
params.input_left_pads = std::vector<ck::index_t>{1, 1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors = test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& host_output = std::get<2>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<3>(params, input, weights, host_output);
test::conv::RunConv<3>(params, input, weights, device_output);
res = res &&
test::check_err(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
return res;
}
bool TestConv3DNDHWC2GBInput()
{
// >2GB Input
ck::conv_util::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
params.C = 32;
params.filter_spatial_lengths = std::vector<ck::index_t>{3, 3, 3};
params.input_spatial_lengths = std::vector<ck::index_t>{32, 1000, 1000};
params.conv_filter_strides = std::vector<ck::index_t>{1, 1, 1};
params.conv_filter_dilations = std::vector<ck::index_t>{1, 1, 1};
params.input_left_pads = std::vector<ck::index_t>{1, 1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors =
test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
try
{
test::conv::RunConv<3>(params, input, weights, device_output);
}
catch(const std::runtime_error& err)
{
std::string err_msg{"Error! device_conv with the specified compilation parameters does "
"not support this Conv problem"};
if(err.what() != err_msg)
{
return false;
}
return true;
}
std::cout << "Error: Failure checking oversized tensor!" << std::endl;
return false;
}
bool TestConv3DNDHWC2GBFilters()
{
// >2GB Filters
ck::conv_util::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
params.C = 32;
params.filter_spatial_lengths = std::vector<ck::index_t>{4, 1000, 1000};
params.input_spatial_lengths = std::vector<ck::index_t>{16, 16, 16};
params.conv_filter_strides = std::vector<ck::index_t>{1, 1, 1};
params.conv_filter_dilations = std::vector<ck::index_t>{1, 1, 1};
params.input_left_pads = std::vector<ck::index_t>{1, 1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors =
test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
try
{
test::conv::RunConv<3>(params, input, weights, device_output);
}
catch(const std::runtime_error& err)
{
std::string err_msg{"Error! device_conv with the specified compilation parameters does "
"not support this Conv problem"};
if(err.what() != err_msg)
{
return false;
}
return true;
}
std::cout << "Error: Failure checking oversized tensor!" << std::endl;
return false;
}
bool TestConv3DNDHWC2GBOutput()
{
// >2GB Output
ck::conv_util::ConvParams params;
params.num_dim_spatial = 3;
params.N = 2;
params.K = 16;
params.C = 2;
params.filter_spatial_lengths = std::vector<ck::index_t>{1, 1, 1};
params.input_spatial_lengths = std::vector<ck::index_t>{1000, 1000, 30};
params.conv_filter_strides = std::vector<ck::index_t>{1, 1, 1};
params.conv_filter_dilations = std::vector<ck::index_t>{1, 1, 1};
params.input_left_pads = std::vector<ck::index_t>{2, 2, 2};
params.input_right_pads = std::vector<ck::index_t>{2, 2, 2};
auto host_tensors =
test::conv::GetHostTensors<float,
float,
float,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params, false);
const Tensor<float>& input = std::get<0>(host_tensors);
const Tensor<float>& weights = std::get<1>(host_tensors);
Tensor<float>& device_output = std::get<3>(host_tensors);
try
{
test::conv::RunConv<3>(params, input, weights, device_output);
}
catch(const std::runtime_error& err)
{
std::string err_msg{"Error! device_conv with the specified compilation parameters does "
"not support this Conv problem"};
if(err.what() != err_msg)
{
return false;
}
return true;
}
std::cout << "Error: Failure checking oversized tensor!" << std::endl;
return false;
}
template <typename T>
bool TestConv3DNDHWCInstances(const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs)
{
ck::conv_util::ConvParams params;
params.N = 64;
params.num_dim_spatial = 3;
params.filter_spatial_lengths = std::vector<ck::index_t>{3, 3, 2};
params.input_spatial_lengths = std::vector<ck::index_t>{32, 32, 2};
params.conv_filter_strides = std::vector<ck::index_t>{2, 2, 2};
params.conv_filter_dilations = std::vector<ck::index_t>{1, 1, 1};
params.input_left_pads = std::vector<ck::index_t>{1, 1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1, 1};
auto host_tensors = test::conv::GetHostTensors<T,
T,
T,
ck::tensor_layout::convolution::NDHWC,
ck::tensor_layout::convolution::KZYXC,
ck::tensor_layout::convolution::NDHWK>(params);
const Tensor<T>& input = std::get<0>(host_tensors);
const Tensor<T>& weights = std::get<1>(host_tensors);
Tensor<T>& host_output = std::get<2>(host_tensors);
Tensor<T>& device_output = std::get<3>(host_tensors);
test::conv::RunReferenceConv<3>(params, input, weights, host_output);
return test::conv::RunConvInstances<3>(
params, conv_ptrs, input, weights, device_output, host_output);
}
bool TestConv3DNDHWCBF16Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_bf16_instances(conv_ptrs);
return TestConv3DNDHWCInstances<ck::bhalf_t>(conv_ptrs);
}
bool TestConv3DNDHWCF16Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_f16_instances(conv_ptrs);
return TestConv3DNDHWCInstances<ck::half_t>(conv_ptrs);
}
bool TestConv3DNDHWCF32Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_f32_instances(conv_ptrs);
return TestConv3DNDHWCInstances<float>(conv_ptrs);
}
bool TestConv3DNDHWCInt8Instances()
{
std::vector<DeviceConvFwdNoOpPtr> conv_ptrs;
ck::tensor_operation::device::device_conv3d_fwd_instance::
add_device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk_int8_instances(conv_ptrs);
return TestConv3DNDHWCInstances<int8_t>(conv_ptrs);
}
} // anonymous namespace
int main()
{
bool res{true};
res = TestConv3DNDHWC();
std::cout << "TestConv3DNDHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNDHWC2GBInput();
std::cout << "\nTestConv3DNDHWC2GBInput ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNDHWC2GBFilters();
std::cout << "\nTestConv3DNDHWC2GBFilters ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNDHWC2GBOutput();
std::cout << "\nTestConv3DNDHWC2GBOutput ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNDHWCBF16Instances();
std::cout << "\nTestConv3DNDHWCBF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv3DNDHWCF16Instances();
std::cout << "\nTestConv3DNDHWCF16Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv3DNDHWCF32Instances();
std::cout << "\nTestConv3DNDHWCF32Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
res = TestConv3DNDHWCInt8Instances();
std::cout << "\nTestConv3DNDHWCInt8Instances ..... " << (res ? "SUCCESS" : "FAILURE")
<< std::endl;
return 0;
}
...@@ -2,6 +2,10 @@ add_test_executable(test_gemm_fp32 gemm_fp32.cpp) ...@@ -2,6 +2,10 @@ add_test_executable(test_gemm_fp32 gemm_fp32.cpp)
target_link_libraries(test_gemm_fp32 PRIVATE host_tensor) target_link_libraries(test_gemm_fp32 PRIVATE host_tensor)
target_link_libraries(test_gemm_fp32 PRIVATE device_gemm_instance) target_link_libraries(test_gemm_fp32 PRIVATE device_gemm_instance)
add_test_executable(test_gemm_fp16 gemm_fp16.cpp)
target_link_libraries(test_gemm_fp16 PRIVATE host_tensor)
target_link_libraries(test_gemm_fp16 PRIVATE device_gemm_instance)
add_test_executable(test_gemm_bf16 gemm_bf16.cpp) add_test_executable(test_gemm_bf16 gemm_bf16.cpp)
target_link_libraries(test_gemm_bf16 PRIVATE host_tensor) target_link_libraries(test_gemm_bf16 PRIVATE host_tensor)
target_link_libraries(test_gemm_bf16 PRIVATE device_gemm_instance) target_link_libraries(test_gemm_bf16 PRIVATE device_gemm_instance)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
using PassThrough = ck::tensor_operation::element_wise::PassThrough; using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using DeviceGemmPtr_ = using DeviceGemmNoOpPtr =
ck::tensor_operation::device::DeviceGemmPtr<ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::device::DeviceGemmPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>; ck::tensor_operation::element_wise::PassThrough>;
...@@ -32,132 +32,86 @@ namespace ck { ...@@ -32,132 +32,86 @@ namespace ck {
namespace tensor_operation { namespace tensor_operation {
namespace device { namespace device {
namespace device_gemm_instance { namespace device_gemm_instance {
void add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_mk_nk_mn_instances(std::vector<DeviceGemmPtr_>&); void add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_km_kn_mn_instances(
} std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_km_nk_mn_instances(
std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_mk_nk_mn_instances(
std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_mk_kn_mn_instances(
std::vector<DeviceGemmNoOpPtr>&);
} // namespace device_gemm_instance
} // namespace device } // namespace device
} // namespace tensor_operation } // namespace tensor_operation
} // namespace ck } // namespace ck
namespace { int main()
using BF16 = ck::bhalf_t;
using ADataType = BF16;
using BDataType = BF16;
using CDataType = BF16;
using AccDataType = float;
using ALayout = ck::tensor_layout::gemm::RowMajor;
using BLayout = ck::tensor_layout::gemm::ColumnMajor;
using CLayout = ck::tensor_layout::gemm::RowMajor;
auto PrepareGemmTensor(const ck::gemm_util::GemmParams& params)
{
auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
}
};
// use fp32 host kernel to verify bf16 device kernel
Tensor<ADataType> a_m_k_bf16(
f_host_tensor_descriptor(params.M, params.K, params.StrideA, ALayout{}));
Tensor<BDataType> b_k_n_bf16(
f_host_tensor_descriptor(params.K, params.N, params.StrideB, BLayout{}));
Tensor<CDataType> c_m_n_device_bf16(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
Tensor<float> a_m_k_fp32(
f_host_tensor_descriptor(params.M, params.K, params.StrideA, ALayout{}));
Tensor<float> b_k_n_fp32(
f_host_tensor_descriptor(params.K, params.N, params.StrideB, BLayout{}));
Tensor<float> c_m_n_host_fp32(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
Tensor<float> c_m_n_device_fp32(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
a_m_k_bf16.GenerateTensorValue(GeneratorTensor_3<ADataType>{-0.5, 0.5});
b_k_n_bf16.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
bf16_to_f32_(a_m_k_bf16, a_m_k_fp32);
bf16_to_f32_(b_k_n_bf16, b_k_n_fp32);
return std::make_tuple(a_m_k_bf16,
b_k_n_bf16,
c_m_n_device_bf16,
a_m_k_fp32,
b_k_n_fp32,
c_m_n_host_fp32,
c_m_n_device_fp32);
}
bool TestGemm(DeviceGemmPtr_& gemmPtr)
{ {
// Arrange using RowMajor = ck::tensor_layout::gemm::RowMajor;
ck::gemm_util::GemmParams params; using ColumnMajor = ck::tensor_layout::gemm::ColumnMajor;
params.M = 1024;
params.N = 1024;
params.K = 1024;
params.StrideA = 1024;
params.StrideB = 1024;
params.StrideC = 1024;
auto host_tensors = PrepareGemmTensor(params);
const Tensor<ADataType>& a_bf16 = std::get<0>(host_tensors);
const Tensor<BDataType>& b_bf16 = std::get<1>(host_tensors);
Tensor<CDataType>& c_device_bf16 = std::get<2>(host_tensors);
Tensor<float>& a_fp32 = std::get<3>(host_tensors);
Tensor<float>& b_fp32 = std::get<4>(host_tensors);
Tensor<float>& c_host_fp32 = std::get<5>(host_tensors);
Tensor<float>& c_device_fp32 = std::get<6>(host_tensors);
auto a_element_op = PassThrough{}; bool res = true;
auto b_element_op = PassThrough{}; std::vector<DeviceGemmNoOpPtr> gemmPtrs;
auto c_element_op = PassThrough{};
// use fp32 host kernel to verify bf16 device kernel
using ReferenceGemmInstance = ck::tensor_operation::host::
ReferenceGemm<float, float, float, PassThrough, PassThrough, PassThrough>;
ck::gemm_util::RunHostGEMM<ReferenceGemmInstance>(
a_fp32, b_fp32, c_host_fp32, a_element_op, b_element_op, c_element_op);
// Act ck::tensor_operation::device::device_gemm_instance::
ck::gemm_util::RunDeviceGEMM( add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_km_kn_mn_instances(gemmPtrs);
gemmPtr, params, a_bf16, b_bf16, c_device_bf16, a_element_op, b_element_op, c_element_op);
bf16_to_f32_(c_device_bf16, c_device_fp32); for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemmBF16<DeviceGemmNoOpPtr,
ColumnMajor,
RowMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
// Assert gemmPtrs.clear();
bool res = test_util::check_err( ck::tensor_operation::device::device_gemm_instance::
c_device_fp32.mData, c_host_fp32.mData, "Error: incorrect results!", 1e-2f, 1e-3f); add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_km_nk_mn_instances(gemmPtrs);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl; for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemmBF16<DeviceGemmNoOpPtr,
ColumnMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
return res; gemmPtrs.clear();
} ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_mk_kn_mn_instances(gemmPtrs);
} // anonymous namespace for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemmBF16<DeviceGemmNoOpPtr,
RowMajor,
RowMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
int main() gemmPtrs.clear();
{
std::vector<DeviceGemmPtr_> gemmPtrs;
ck::tensor_operation::device::device_gemm_instance:: ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_mk_nk_mn_instances(gemmPtrs); add_device_gemm_xdl_c_shuffle_bf16_bf16_bf16_mk_nk_mn_instances(gemmPtrs);
bool res = true;
for(auto& gemmPtr : gemmPtrs) for(auto& gemmPtr : gemmPtrs)
{ {
res &= TestGemm(gemmPtr); res &= ck::gemm_util::TestGemmBF16<DeviceGemmNoOpPtr,
RowMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
} }
std::cout << "TestGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl; std::cout << "TestGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res ? 0 : 1;
} }
#include <algorithm>
#include <cstdlib>
#include <half.hpp>
#include <iostream>
#include <numeric>
#include <tuple>
#include <vector>
#include "gemm_util.hpp"
#include "config.hpp"
#include "print.hpp"
#include "device.hpp"
#include "host_gemm.hpp"
#include "device_tensor.hpp"
#include "device_gemm_xdl.hpp"
#include "device_gemm_xdl_c_shuffle.hpp"
#include "element_wise_operation.hpp"
#include "gemm_specialization.hpp"
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using DeviceGemmNoOpPtr =
ck::tensor_operation::device::DeviceGemmPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
namespace ck {
namespace tensor_operation {
namespace device {
namespace device_gemm_instance {
void add_device_gemm_xdl_f16_f16_f16_km_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_f16_f16_f16_km_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_f16_f16_f16_mk_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_f16_f16_f16_mk_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f16_f16_f16_km_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f16_f16_f16_km_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f16_f16_f16_mk_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f16_f16_f16_km_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f16_f16_f16_km_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f16_f16_f16_mk_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f16_f16_f16_mk_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_2_stage_f16_f16_f16_mk_nk_mn_instances(
std::vector<DeviceGemmNoOpPtr>&);
} // namespace device_gemm_instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
int main()
{
using ADataType = ck::half_t;
using BDataType = ck::half_t;
using CDataType = ck::half_t;
using RowMajor = ck::tensor_layout::gemm::RowMajor;
using ColumnMajor = ck::tensor_layout::gemm::ColumnMajor;
bool res = true;
std::vector<DeviceGemmNoOpPtr> gemmPtrs;
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_f16_f16_f16_km_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f16_f16_f16_km_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_f16_f16_f16_km_kn_mn_instances(gemmPtrs);
for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
ColumnMajor,
RowMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
gemmPtrs.clear();
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_f16_f16_f16_km_nk_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f16_f16_f16_km_nk_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_f16_f16_f16_km_nk_mn_instances(gemmPtrs);
for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
ColumnMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
gemmPtrs.clear();
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_f16_f16_f16_mk_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_f16_f16_f16_mk_kn_mn_instances(gemmPtrs);
for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
RowMajor,
RowMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
gemmPtrs.clear();
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_f16_f16_f16_mk_nk_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f16_f16_f16_mk_nk_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_f16_f16_f16_mk_nk_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_2_stage_f16_f16_f16_mk_nk_mn_instances(gemmPtrs);
for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
RowMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
std::cout << "TestGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res ? 0 : 1;
}
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
using PassThrough = ck::tensor_operation::element_wise::PassThrough; using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using DeviceGemmPtr_ = using DeviceGemmNoOpPtr =
ck::tensor_operation::device::DeviceGemmPtr<ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::device::DeviceGemmPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>; ck::tensor_operation::element_wise::PassThrough>;
...@@ -32,107 +32,124 @@ namespace ck { ...@@ -32,107 +32,124 @@ namespace ck {
namespace tensor_operation { namespace tensor_operation {
namespace device { namespace device {
namespace device_gemm_instance { namespace device_gemm_instance {
void add_device_gemm_xdl_f32_f32_f32_mk_nk_mn_instances(std::vector<DeviceGemmPtr_>&); void add_device_gemm_xdl_f32_f32_f32_km_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
} void add_device_gemm_xdl_f32_f32_f32_km_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_f32_f32_f32_mk_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_f32_f32_f32_mk_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f32_f32_f32_km_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f32_f32_f32_km_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f32_f32_f32_mk_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_splitk_f32_f32_f32_mk_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f32_f32_f32_km_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f32_f32_f32_km_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f32_f32_f32_mk_nk_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_f32_f32_f32_mk_kn_mn_instances(std::vector<DeviceGemmNoOpPtr>&);
} // namespace device_gemm_instance
} // namespace device } // namespace device
} // namespace tensor_operation } // namespace tensor_operation
} // namespace ck } // namespace ck
namespace { int main()
{
using ADataType = float;
using BDataType = float;
using CDataType = float;
using ADataType = float; using RowMajor = ck::tensor_layout::gemm::RowMajor;
using BDataType = float; using ColumnMajor = ck::tensor_layout::gemm::ColumnMajor;
using CDataType = float;
using AccDataType = float;
using ALayout = ck::tensor_layout::gemm::RowMajor; bool res = true;
using BLayout = ck::tensor_layout::gemm::ColumnMajor; std::vector<DeviceGemmNoOpPtr> gemmPtrs;
using CLayout = ck::tensor_layout::gemm::RowMajor; ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_f32_f32_f32_km_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f32_f32_f32_km_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_f32_f32_f32_km_kn_mn_instances(gemmPtrs);
auto PrepareGemmTensor(const ck::gemm_util::GemmParams& params) for(auto& gemmPtr : gemmPtrs)
{ {
auto f_host_tensor_descriptor = res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) { ADataType,
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value) BDataType,
{ CDataType,
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), ColumnMajor,
std::vector<std::size_t>({stride, 1})); RowMajor,
} RowMajor,
else PassThrough,
{ PassThrough,
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), PassThrough>{}(gemmPtr);
std::vector<std::size_t>({1, stride})); }
}
};
Tensor<ADataType> a_m_k(
f_host_tensor_descriptor(params.M, params.K, params.StrideA, ALayout{}));
Tensor<BDataType> b_k_n(
f_host_tensor_descriptor(params.K, params.N, params.StrideB, BLayout{}));
Tensor<CDataType> c_m_n_host_result(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
Tensor<CDataType> c_m_n_device_result(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{-0.5, 0.5});
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
return std::make_tuple(a_m_k, b_k_n, c_m_n_host_result, c_m_n_device_result);
}
bool TestGemm(DeviceGemmPtr_& gemmPtr) gemmPtrs.clear();
{ ck::tensor_operation::device::device_gemm_instance::
// Arrange add_device_gemm_xdl_f32_f32_f32_km_nk_mn_instances(gemmPtrs);
ck::gemm_util::GemmParams params; ck::tensor_operation::device::device_gemm_instance::
params.M = 1024; add_device_gemm_xdl_splitk_f32_f32_f32_km_nk_mn_instances(gemmPtrs);
params.N = 1024; ck::tensor_operation::device::device_gemm_instance::
params.K = 1024; add_device_gemm_xdl_c_shuffle_f32_f32_f32_km_nk_mn_instances(gemmPtrs);
params.StrideA = 1024;
params.StrideB = 1024;
params.StrideC = 1024;
auto host_tensors = PrepareGemmTensor(params);
const Tensor<ADataType>& a = std::get<0>(host_tensors);
const Tensor<BDataType>& b = std::get<1>(host_tensors);
Tensor<CDataType>& c_host = std::get<2>(host_tensors);
Tensor<CDataType>& c_device = std::get<3>(host_tensors);
auto a_element_op = PassThrough{};
auto b_element_op = PassThrough{};
auto c_element_op = PassThrough{};
using ReferenceGemmInstance = ck::tensor_operation::host::
ReferenceGemm<ADataType, BDataType, CDataType, PassThrough, PassThrough, PassThrough>;
ck::gemm_util::RunHostGEMM<ReferenceGemmInstance>(
a, b, c_host, a_element_op, b_element_op, c_element_op);
// Act
ck::gemm_util::RunDeviceGEMM(
gemmPtr, params, a, b, c_device, a_element_op, b_element_op, c_element_op);
// Assert
bool res = test_util::check_err(
c_device.mData, c_host.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res;
}
} // anonymous namespace for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
ColumnMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
int main() gemmPtrs.clear();
{
std::vector<DeviceGemmPtr_> gemmPtrs;
ck::tensor_operation::device::device_gemm_instance:: ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_f32_f32_f32_mk_nk_mn_instances(gemmPtrs); add_device_gemm_xdl_f32_f32_f32_mk_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f32_f32_f32_mk_kn_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_f32_f32_f32_mk_kn_mn_instances(gemmPtrs);
bool res = true; for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
RowMajor,
RowMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
gemmPtrs.clear();
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_f32_f32_f32_mk_nk_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f32_f32_f32_mk_nk_mn_instances(gemmPtrs);
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_f32_f32_f32_mk_nk_mn_instances(gemmPtrs);
for(auto& gemmPtr : gemmPtrs) for(auto& gemmPtr : gemmPtrs)
{ {
res &= TestGemm(gemmPtr); res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
RowMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
} }
std::cout << "TestGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl; std::cout << "TestGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res ? 0 : 1;
} }
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
using PassThrough = ck::tensor_operation::element_wise::PassThrough; using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using DeviceGemmPtr_ = using DeviceGemmNoOpPtr =
ck::tensor_operation::device::DeviceGemmPtr<ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::device::DeviceGemmPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>; ck::tensor_operation::element_wise::PassThrough>;
...@@ -32,106 +32,102 @@ namespace ck { ...@@ -32,106 +32,102 @@ namespace ck {
namespace tensor_operation { namespace tensor_operation {
namespace device { namespace device {
namespace device_gemm_instance { namespace device_gemm_instance {
void add_device_gemm_xdl_c_shuffle_int8_int8_int8_mk_nk_mn_instances(std::vector<DeviceGemmPtr_>&); void add_device_gemm_xdl_c_shuffle_int8_int8_int8_km_kn_mn_instances(
} std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_int8_int8_int8_km_nk_mn_instances(
std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_int8_int8_int8_mk_nk_mn_instances(
std::vector<DeviceGemmNoOpPtr>&);
void add_device_gemm_xdl_c_shuffle_int8_int8_int8_mk_kn_mn_instances(
std::vector<DeviceGemmNoOpPtr>&);
} // namespace device_gemm_instance
} // namespace device } // namespace device
} // namespace tensor_operation } // namespace tensor_operation
} // namespace ck } // namespace ck
namespace { int main()
{
using ADataType = int8_t;
using BDataType = int8_t;
using CDataType = int8_t;
using ADataType = int8_t; using RowMajor = ck::tensor_layout::gemm::RowMajor;
using BDataType = int8_t; using ColumnMajor = ck::tensor_layout::gemm::ColumnMajor;
using CDataType = int8_t;
using AccDataType = int32_t;
using ALayout = ck::tensor_layout::gemm::RowMajor; std::vector<DeviceGemmNoOpPtr> gemmPtrs;
using BLayout = ck::tensor_layout::gemm::ColumnMajor; bool res = true;
using CLayout = ck::tensor_layout::gemm::RowMajor;
auto PrepareGemmTensor(const ck::gemm_util::GemmParams& params) ck::tensor_operation::device::device_gemm_instance::
{ add_device_gemm_xdl_c_shuffle_int8_int8_int8_km_kn_mn_instances(gemmPtrs);
auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
}
};
Tensor<ADataType> a_m_k(
f_host_tensor_descriptor(params.M, params.K, params.StrideA, ALayout{}));
Tensor<BDataType> b_k_n(
f_host_tensor_descriptor(params.K, params.N, params.StrideB, BLayout{}));
Tensor<CDataType> c_m_n_host_result(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
Tensor<CDataType> c_m_n_device_result(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
return std::make_tuple(a_m_k, b_k_n, c_m_n_host_result, c_m_n_device_result);
}
bool TestGemm(DeviceGemmPtr_& gemmPtr) for(auto& gemmPtr : gemmPtrs)
{ {
// Arrange res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ck::gemm_util::GemmParams params; ADataType,
params.M = 1024; BDataType,
params.N = 1024; CDataType,
params.K = 1024; ColumnMajor,
params.StrideA = 1024; RowMajor,
params.StrideB = 1024; RowMajor,
params.StrideC = 1024; PassThrough,
PassThrough,
auto host_tensors = PrepareGemmTensor(params); PassThrough>{}(gemmPtr);
const Tensor<ADataType>& a = std::get<0>(host_tensors); }
const Tensor<BDataType>& b = std::get<1>(host_tensors);
Tensor<CDataType>& c_host = std::get<2>(host_tensors);
Tensor<CDataType>& c_device = std::get<3>(host_tensors);
auto a_element_op = PassThrough{};
auto b_element_op = PassThrough{};
auto c_element_op = PassThrough{};
using ReferenceGemmInstance = ck::tensor_operation::host::
ReferenceGemm<ADataType, BDataType, CDataType, PassThrough, PassThrough, PassThrough>;
ck::gemm_util::RunHostGEMM<ReferenceGemmInstance>(
a, b, c_host, a_element_op, b_element_op, c_element_op);
// Act
ck::gemm_util::RunDeviceGEMM(
gemmPtr, params, a, b, c_device, a_element_op, b_element_op, c_element_op);
// Assert
bool res = test_util::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res;
}
} // anonymous namespace gemmPtrs.clear();
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_int8_int8_int8_km_nk_mn_instances(gemmPtrs);
int main() for(auto& gemmPtr : gemmPtrs)
{ {
std::vector<DeviceGemmPtr_> gemmPtrs; res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
ColumnMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
gemmPtrs.clear();
ck::tensor_operation::device::device_gemm_instance:: ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_int8_int8_int8_mk_nk_mn_instances(gemmPtrs); add_device_gemm_xdl_c_shuffle_int8_int8_int8_mk_kn_mn_instances(gemmPtrs);
bool res = true; for(auto& gemmPtr : gemmPtrs)
{
res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
RowMajor,
RowMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
}
gemmPtrs.clear();
ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_c_shuffle_int8_int8_int8_mk_nk_mn_instances(gemmPtrs);
for(auto& gemmPtr : gemmPtrs) for(auto& gemmPtr : gemmPtrs)
{ {
res &= TestGemm(gemmPtr); res &= ck::gemm_util::TestGemm<DeviceGemmNoOpPtr,
ADataType,
BDataType,
CDataType,
RowMajor,
ColumnMajor,
RowMajor,
PassThrough,
PassThrough,
PassThrough>{}(gemmPtr);
} }
std::cout << "TestGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl; std::cout << "TestGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res ? 0 : 1;
} }
...@@ -4,6 +4,10 @@ ...@@ -4,6 +4,10 @@
#include "config.hpp" #include "config.hpp"
#include "device.hpp" #include "device.hpp"
#include "host_tensor.hpp" #include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "reference_gemm.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace ck { namespace ck {
namespace gemm_util { namespace gemm_util {
...@@ -98,6 +102,243 @@ void RunDeviceGEMM(DeviceGemmPtr_& gemmPtr, ...@@ -98,6 +102,243 @@ void RunDeviceGEMM(DeviceGemmPtr_& gemmPtr,
c_m_n_device_buf.FromDevice(C.mData.data()); c_m_n_device_buf.FromDevice(C.mData.data());
} }
template <typename DeviceGemmPtr_,
typename ADataType,
typename BDataType,
typename CDataType,
typename ALayout,
typename BLayout,
typename CLayout,
typename AElementwiseOperation,
typename BElementwiseOperation,
typename CElementwiseOperation>
struct TestGemm
{
auto PrepareGemmTensor(const ck::gemm_util::GemmParams& params)
{
auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
}
};
Tensor<ADataType> a_m_k(
f_host_tensor_descriptor(params.M, params.K, params.StrideA, ALayout{}));
Tensor<BDataType> b_k_n(
f_host_tensor_descriptor(params.K, params.N, params.StrideB, BLayout{}));
Tensor<CDataType> c_m_n_host_result(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
Tensor<CDataType> c_m_n_device_result(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
auto f_generate_tensor_value = [](auto desc, auto type) {
using dataType = decltype(type);
if(std::is_same<dataType, int8_t>::value)
{
desc.GenerateTensorValue(GeneratorTensor_2<int8_t>{-5, 5});
}
else
{
desc.GenerateTensorValue(GeneratorTensor_3<dataType>{-0.5, 0.5});
}
};
f_generate_tensor_value(a_m_k, ADataType{});
f_generate_tensor_value(b_k_n, BDataType{});
return std::make_tuple(a_m_k, b_k_n, c_m_n_host_result, c_m_n_device_result);
}
auto operator()(DeviceGemmPtr_& gemmPtr)
{
std::cout << "ALayout = " << ALayout{}.name << ", BLayout = " << BLayout{}.name
<< ", CLayout = " << CLayout{}.name << std::endl;
std::cout << gemmPtr->GetTypeString() << std::endl;
// Arrange
ck::gemm_util::GemmParams params;
params.M = 1024;
params.N = 1024;
params.K = 1024;
params.StrideA = 1024;
params.StrideB = 1024;
params.StrideC = 1024;
auto host_tensors = PrepareGemmTensor(params);
const Tensor<ADataType>& a = std::get<0>(host_tensors);
const Tensor<BDataType>& b = std::get<1>(host_tensors);
Tensor<CDataType>& c_host = std::get<2>(host_tensors);
Tensor<CDataType>& c_device = std::get<3>(host_tensors);
auto a_element_op = AElementwiseOperation{};
auto b_element_op = BElementwiseOperation{};
auto c_element_op = CElementwiseOperation{};
using ReferenceGemmInstance =
ck::tensor_operation::host::ReferenceGemm<ADataType,
BDataType,
CDataType,
AElementwiseOperation,
BElementwiseOperation,
CElementwiseOperation>;
ck::gemm_util::RunHostGEMM<ReferenceGemmInstance>(
a, b, c_host, a_element_op, b_element_op, c_element_op);
// Act
ck::gemm_util::RunDeviceGEMM(
gemmPtr, params, a, b, c_device, a_element_op, b_element_op, c_element_op);
// Assert
bool res = false;
if(std::is_same<CDataType, float>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
else if(std::is_same<CDataType, ck::half_t>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
else if(std::is_same<CDataType, int8_t>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
return res;
}
};
template <typename DeviceGemmPtr_,
typename ALayout,
typename BLayout,
typename CLayout,
typename AElementwiseOperation,
typename BElementwiseOperation,
typename CElementwiseOperation>
struct TestGemmBF16
{
using BF16 = ck::bhalf_t;
auto PrepareGemmTensorBF16(const ck::gemm_util::GemmParams& params)
{
auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
}
};
// use fp32 host kernel to verify bf16 device kernel
Tensor<BF16> a_m_k_bf16(
f_host_tensor_descriptor(params.M, params.K, params.StrideA, ALayout{}));
Tensor<BF16> b_k_n_bf16(
f_host_tensor_descriptor(params.K, params.N, params.StrideB, BLayout{}));
Tensor<BF16> c_m_n_device_bf16(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
Tensor<float> a_m_k_fp32(
f_host_tensor_descriptor(params.M, params.K, params.StrideA, ALayout{}));
Tensor<float> b_k_n_fp32(
f_host_tensor_descriptor(params.K, params.N, params.StrideB, BLayout{}));
Tensor<float> c_m_n_host_fp32(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
Tensor<float> c_m_n_device_fp32(
f_host_tensor_descriptor(params.M, params.N, params.StrideC, CLayout{}));
a_m_k_bf16.GenerateTensorValue(GeneratorTensor_3<BF16>{-0.5, 0.5});
b_k_n_bf16.GenerateTensorValue(GeneratorTensor_3<BF16>{-0.5, 0.5});
bf16_to_f32_(a_m_k_bf16, a_m_k_fp32);
bf16_to_f32_(b_k_n_bf16, b_k_n_fp32);
return std::make_tuple(a_m_k_bf16,
b_k_n_bf16,
c_m_n_device_bf16,
a_m_k_fp32,
b_k_n_fp32,
c_m_n_host_fp32,
c_m_n_device_fp32);
}
auto operator()(DeviceGemmPtr_& gemmPtr)
{
// Arrange
ck::gemm_util::GemmParams params;
params.M = 1024;
params.N = 1024;
params.K = 1024;
params.StrideA = 1024;
params.StrideB = 1024;
params.StrideC = 1024;
auto host_tensors = PrepareGemmTensorBF16(params);
const Tensor<BF16>& a_bf16 = std::get<0>(host_tensors);
const Tensor<BF16>& b_bf16 = std::get<1>(host_tensors);
Tensor<BF16>& c_device_bf16 = std::get<2>(host_tensors);
Tensor<float>& a_fp32 = std::get<3>(host_tensors);
Tensor<float>& b_fp32 = std::get<4>(host_tensors);
Tensor<float>& c_host_fp32 = std::get<5>(host_tensors);
Tensor<float>& c_device_fp32 = std::get<6>(host_tensors);
auto a_element_op = AElementwiseOperation{};
auto b_element_op = BElementwiseOperation{};
auto c_element_op = CElementwiseOperation{};
// use fp32 host kernel to verify bf16 device kernel
using ReferenceGemmInstance =
ck::tensor_operation::host::ReferenceGemm<float,
float,
float,
AElementwiseOperation,
BElementwiseOperation,
CElementwiseOperation>;
ck::gemm_util::RunHostGEMM<ReferenceGemmInstance>(
a_fp32, b_fp32, c_host_fp32, a_element_op, b_element_op, c_element_op);
// Act
ck::gemm_util::RunDeviceGEMM(gemmPtr,
params,
a_bf16,
b_bf16,
c_device_bf16,
a_element_op,
b_element_op,
c_element_op);
bf16_to_f32_(c_device_bf16, c_device_fp32);
// Assert
bool res = test::check_err(
c_device_fp32.mData, c_host_fp32.mData, "Error: incorrect results!", 1e-2f, 1e-3f);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res;
};
};
} // namespace gemm_util } // namespace gemm_util
} // namespace ck } // namespace ck
#endif #endif
include_directories(BEFORE
${PROJECT_SOURCE_DIR}/profiler/include
${PROJECT_SOURCE_DIR}/test/include
${PROJECT_SOURCE_DIR}/external/include/half
)
add_test_executable(test_gemm_reduce_fp16 gemm_reduce_fp16.cpp)
target_link_libraries(test_gemm_reduce_fp16 PRIVATE host_tensor)
target_link_libraries(test_gemm_reduce_fp16 PRIVATE device_gemm_reduce_instance)
#include <algorithm>
#include <cstdlib>
#include <half.hpp>
#include <iostream>
#include <numeric>
#include <tuple>
#include <vector>
#include "profile_gemm_reduce_impl.hpp"
int main()
{
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
int M = 512;
int N = 256;
int K = 128;
bool pass = true;
pass = pass &&
ck::profiler::
profile_gemm_reduce_impl<ck::half_t, ck::half_t, ck::half_t, float, Row, Row, Row>(
true, 1, false, 1, M, N, K, K, N, N);
pass = pass &&
ck::profiler::
profile_gemm_reduce_impl<ck::half_t, ck::half_t, ck::half_t, float, Row, Col, Row>(
true, 1, false, 1, M, N, K, K, K, N);
pass = pass &&
ck::profiler::
profile_gemm_reduce_impl<ck::half_t, ck::half_t, ck::half_t, float, Col, Row, Row>(
true, 1, false, 1, M, N, K, M, N, N);
pass = pass &&
ck::profiler::
profile_gemm_reduce_impl<ck::half_t, ck::half_t, ck::half_t, float, Col, Col, Row>(
true, 1, false, 1, M, N, K, M, K, N);
if(pass)
{
std::cout << "test GEMM+Reduce fp16: Pass" << std::endl;
return 0;
}
else
{
std::cout << "test GEMM+Reduce fp16: Fail" << std::endl;
return -1;
}
}
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include "tensor_layout.hpp" #include "tensor_layout.hpp"
#include "device_gemm_xdl_splitk.hpp" #include "device_gemm_xdl_splitk.hpp"
enum GemmMatrixLayout enum struct GemmMatrixLayout
{ {
MK_KN_MN, // 0 MK_KN_MN, // 0
MK_NK_MN, // 1 MK_NK_MN, // 1
...@@ -59,7 +59,7 @@ static bool check_out(const Tensor<T>& ref, const Tensor<T>& result) ...@@ -59,7 +59,7 @@ static bool check_out(const Tensor<T>& ref, const Tensor<T>& result)
struct gemmArgs struct gemmArgs
{ {
int layout; GemmMatrixLayout layout;
int M; int M;
int N; int N;
int K; int K;
...@@ -216,13 +216,13 @@ int main(int argc, char* argv[]) ...@@ -216,13 +216,13 @@ int main(int argc, char* argv[])
std::vector<gemmArgs> test_cases; std::vector<gemmArgs> test_cases;
if(argc == 1) if(argc == 1)
{ {
test_cases = {{0, 3, 3, 3, 3, 3, 3, 1}}; test_cases = {{GemmMatrixLayout::MK_KN_MN, 3, 3, 3, 3, 3, 3, 1}};
// JD: Populate with more and meaningful // JD: Populate with more and meaningful
return 0; return 0;
} }
else if(argc == 9) else if(argc == 9)
{ {
const int layout = static_cast<GemmMatrixLayout>(std::stoi(argv[1])); const auto layout = static_cast<GemmMatrixLayout>(std::stoi(argv[1]));
const int M = std::stoi(argv[2]); const int M = std::stoi(argv[2]);
const int N = std::stoi(argv[3]); const int N = std::stoi(argv[3]);
......
add_test_executable(test_grouped_gemm_fp16 grouped_gemm_fp16.cpp)
target_link_libraries(test_grouped_gemm_fp16 PRIVATE host_tensor)
target_link_libraries(test_grouped_gemm_fp16 PRIVATE device_grouped_gemm_instance)
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "config.hpp"
#include "print.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_gemm.hpp"
#include "device_tensor.hpp"
#include "device_grouped_gemm_xdl.hpp"
#include "element_wise_operation.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
#include "test_util.hpp"
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using DeviceGroupedGemmPtr_ = ck::tensor_operation::device::DeviceGroupedGemmPtr<
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
namespace ck {
namespace tensor_operation {
namespace device {
namespace device_grouped_gemm_instance {
void add_device_grouped_gemm_xdl_f16_f16_f16_mk_nk_mn_instances(
std::vector<DeviceGroupedGemmPtr_>&);
}
} // namespace device
} // namespace tensor_operation
} // namespace ck
namespace {
using ADataType = ck::half_t;
using BDataType = ck::half_t;
using CDataType = ck::half_t;
using AccDataType = float;
using ALayout = ck::tensor_layout::gemm::RowMajor;
using BLayout = ck::tensor_layout::gemm::ColumnMajor;
using CLayout = ck::tensor_layout::gemm::RowMajor;
template <typename T>
static bool check_err(const Tensor<T>& ref, const Tensor<T>& result)
{
float max_diff = 1e-2;
for(int i = 0; i < ref.mData.size(); ++i)
{
float diff = std::abs(double(ref.mData[i]) - double(result.mData[i]));
if(max_diff < diff)
{
std::cout << double(ref.mData[i]) << "," << double(result.mData[i]) << std::endl;
return false;
}
}
return true;
}
bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
{
int group_count = 4;
// GEMM shape
std::vector<ck::tensor_operation::device::GemmShape> gemm_shapes;
std::vector<const void*> p_a, p_b;
std::vector<void*> p_c;
gemm_shapes.reserve(group_count);
for(int i = 0; i < group_count; i++)
{
int M = 256 + 256 * i;
int N = 128 + 128 * i;
int K = 128 + 64 * i;
int AStride = std::is_same<ck::tensor_layout::gemm::RowMajor, ALayout>::value ? K : M;
int BStride = std::is_same<ck::tensor_layout::gemm::RowMajor, BLayout>::value ? N : K;
int CStride = std::is_same<ck::tensor_layout::gemm::RowMajor, CLayout>::value ? N : M;
gemm_shapes.push_back({M, N, K, AStride, BStride, CStride});
}
auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1}));
}
else
{
return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({1, stride}));
}
};
std::vector<Tensor<ADataType>> a_tensors;
;
std::vector<Tensor<BDataType>> b_tensors;
std::vector<Tensor<CDataType>> c_host_tensors;
std::vector<Tensor<CDataType>> c_device_tensors;
a_tensors.reserve(group_count);
b_tensors.reserve(group_count);
c_host_tensors.reserve(group_count);
c_device_tensors.reserve(group_count);
using DeviceMemPtr = std::unique_ptr<DeviceMem>;
std::vector<DeviceMemPtr> a_tensors_device, b_tensors_device, c_tensors_device;
a_tensors_device.reserve(group_count);
b_tensors_device.reserve(group_count);
c_tensors_device.reserve(group_count);
for(int i = 0; i < gemm_shapes.size(); i++)
{
a_tensors.emplace_back(Tensor<ADataType>(f_host_tensor_descriptor(
gemm_shapes[i].M, gemm_shapes[i].K, gemm_shapes[i].StrideA, ALayout{})));
b_tensors.emplace_back(Tensor<BDataType>(f_host_tensor_descriptor(
gemm_shapes[i].K, gemm_shapes[i].N, gemm_shapes[i].StrideB, BLayout{})));
c_host_tensors.emplace_back(Tensor<CDataType>(f_host_tensor_descriptor(
gemm_shapes[i].M, gemm_shapes[i].N, gemm_shapes[i].StrideC, CLayout{})));
c_device_tensors.emplace_back(Tensor<CDataType>(f_host_tensor_descriptor(
gemm_shapes[i].M, gemm_shapes[i].N, gemm_shapes[i].StrideC, CLayout{})));
a_tensors[i].GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
b_tensors[i].GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
}
for(int i = 0; i < gemm_shapes.size(); i++)
{
a_tensors_device.emplace_back(
std::make_unique<DeviceMem>(sizeof(ADataType) * a_tensors[i].mDesc.GetElementSize()));
b_tensors_device.emplace_back(
std::make_unique<DeviceMem>(sizeof(BDataType) * b_tensors[i].mDesc.GetElementSize()));
c_tensors_device.emplace_back(std::make_unique<DeviceMem>(
sizeof(CDataType) * c_device_tensors[i].mDesc.GetElementSize()));
a_tensors_device[i]->ToDevice(a_tensors[i].mData.data());
b_tensors_device[i]->ToDevice(b_tensors[i].mData.data());
p_a.push_back(a_tensors_device[i]->GetDeviceBuffer());
p_b.push_back(b_tensors_device[i]->GetDeviceBuffer());
p_c.push_back(c_tensors_device[i]->GetDeviceBuffer());
}
auto a_element_op = PassThrough{};
auto b_element_op = PassThrough{};
auto c_element_op = PassThrough{};
// do GEMM
auto invoker_ptr = groupedGemmPtr->MakeInvokerPointer();
auto argument_ptr = groupedGemmPtr->MakeArgumentPointer(
p_a, p_b, p_c, gemm_shapes, a_element_op, b_element_op, c_element_op);
invoker_ptr->Run(argument_ptr.get());
for(int i = 0; i < gemm_shapes.size(); i++)
{
c_tensors_device[i]->FromDevice(c_device_tensors[i].mData.data());
using ReferenceGemmInstance = ck::tensor_operation::host::
ReferenceGemm<ADataType, BDataType, CDataType, PassThrough, PassThrough, PassThrough>;
auto ref_gemm = ReferenceGemmInstance{};
auto ref_invoker = ref_gemm.MakeInvoker();
auto ref_argument = ref_gemm.MakeArgument(a_tensors[i],
b_tensors[i],
c_host_tensors[i],
a_element_op,
b_element_op,
c_element_op);
ref_invoker.Run(ref_argument);
bool res = check_err(c_device_tensors[i], c_host_tensors[i]);
std::cout << "group_id: " << i << (res ? " SUCCESS" : " FAILURE") << std::endl;
if(!res)
return false;
}
return true;
}
} // anonymous namespace
int main()
{
std::vector<DeviceGroupedGemmPtr_> groupedGemmPtrs;
ck::tensor_operation::device::device_grouped_gemm_instance::
add_device_grouped_gemm_xdl_f16_f16_f16_mk_nk_mn_instances(groupedGemmPtrs);
bool res = true;
for(auto& gemmPtr : groupedGemmPtrs)
{
res &= TestGroupedGemm(gemmPtr);
}
std::cout << "TestGroupedGemm ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
#include <algorithm> #ifndef TEST_CONV_UTIL_HPP
#include <cstdlib> #define TEST_CONV_UTIL_HPP
#include <half.hpp>
#include <iostream> #include <algorithm>
#include <numeric> #include <cstdlib>
#include <tuple> #include <numeric>
#include <vector> #include <random>
#include <stdexcept>
#include "config.hpp" #include <tuple>
#include "conv_utils.hpp" #include <type_traits>
#include "device.hpp" #include <vector>
#include "device_tensor.hpp"
#include "device_convnd_fwd_xdl_nhwc_kyxc_nhwk.hpp" #include "config.hpp"
#include "element_wise_operation.hpp" #include "conv_utils.hpp"
#include "host_tensor.hpp" #include "device.hpp"
#include "reference_conv_fwd.hpp" #include "device_tensor.hpp"
#include "tensor_layout.hpp" #include "device_convnd_fwd_xdl_nhwc_kyxc_nhwk.hpp"
#include "test_util.hpp" #include "element_wise_operation.hpp"
#include "host_tensor.hpp"
namespace { #include "reference_conv_fwd.hpp"
template <ck::index_t... Is> #include "tensor_layout.hpp"
using S = ck::Sequence<Is...>; #include "test_util.hpp"
using InElementOp = ck::tensor_operation::element_wise::PassThrough; namespace {
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
using OutElementOp = ck::tensor_operation::element_wise::PassThrough; template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
static constexpr auto ConvFwdDefault =
ck::tensor_operation::device::ConvolutionForwardSpecialization_t::Default; using InElementOp = ck::tensor_operation::element_wise::PassThrough;
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
template <ck::index_t SpatialDims, typename InDataType, typename WeiDataType, typename OutDataType> using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
using DeviceConvNDFwdInstance = ck::tensor_operation::device::
DeviceConvNDFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K< static constexpr auto ConvFwdDefault =
// clang-format off ck::tensor_operation::device::ConvolutionForwardSpecialization_t::Default;
InDataType, //
WeiDataType, // template <ck::index_t SpatialDims, typename InDataType, typename WeiDataType, typename OutDataType>
OutDataType, // using DeviceConvNDFwdInstance = ck::tensor_operation::device::
InDataType, // DeviceConvNDFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K<
InElementOp, // Input Elementwise Operation // clang-format off
WeiElementOp, // Weights Elementwise Operation InDataType, //
OutElementOp, // Output Elementwise Operation WeiDataType, //
ConvFwdDefault, // ConvForwardSpecialization OutDataType, //
SpatialDims, // SptialDims InDataType, //
64, // BlockSize InElementOp, // Input Elementwise Operation
16, // MPerBlock WeiElementOp, // Weights Elementwise Operation
16, // NPerBlock OutElementOp, // Output Elementwise Operation
4, // K0PerBlock ConvFwdDefault, // ConvForwardSpecialization
1, // K1 SpatialDims, // SptialDims
16, // MPerXDL 64, // BlockSize
16, // NPerXDL 16, // MPerBlock
1, // MXdlPerWave 16, // NPerBlock
1, // NXdlPerWave 4, // K0PerBlock
S<1, 16, 1>, // ABlockTransferThreadClusterLengths_K0_M_K1 1, // K1
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder 16, // MPerXDL
S<1, 0, 2>, // ABlockTransferSrcAccessOrder 16, // NPerXDL
2, // ABlockTransferSrcVectorDim 1, // MXdlPerWave
1, // ABlockTransferSrcScalarPerVector 1, // NXdlPerWave
1, // ABlockTransferDstScalarPerVector_K1 S<1, 16, 1>, // ABlockTransferThreadClusterLengths_K0_M_K1
true, // ABlockLdsAddExtraM S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
S<1, 16, 1>, // BBlockTransferThreadClusterLengths_K0_N_K1 S<1, 0, 2>, // ABlockTransferSrcAccessOrder
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder 2, // ABlockTransferSrcVectorDim
S<1, 0, 2>, // BBlockTransferSrcAccessOrder 1, // ABlockTransferSrcScalarPerVector
2, // BBlockTransferSrcVectorDim 1, // ABlockTransferDstScalarPerVector_K1
1, // BBlockTransferSrcScalarPerVector true, // ABlockLdsAddExtraM
1, // BBlockTransferDstScalarPerVector_K1 S<1, 16, 1>, // BBlockTransferThreadClusterLengths_K0_N_K1
true, // BBlockTransferAddExtraN S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
7, // CThreadTransferSrcDstVectorDim S<1, 0, 2>, // BBlockTransferSrcAccessOrder
1>; // CThreadTransferDstScalarPerVector 2, // BBlockTransferSrcVectorDim
// clang-format on 1, // BBlockTransferSrcScalarPerVector
1, // BBlockTransferDstScalarPerVector_K1
template <typename InDataType = float, true, // BBlockTransferAddExtraN
typename WeiDataType = float, 7, // CThreadTransferSrcDstVectorDim
typename OutDataType = float, 1>; // CThreadTransferDstScalarPerVector
typename InLayout = ck::tensor_layout::convolution::NHWC, // clang-format on
typename WeiLayout = ck::tensor_layout::convolution::KYXC,
typename OutLayout = ck::tensor_layout::convolution::NHWK> } // namespace
auto GetHostTensors(const ck::conv_util::ConvParams& params)
{ namespace test {
std::vector<std::size_t> input_dims{static_cast<std::size_t>(params.N), namespace conv {
static_cast<std::size_t>(params.C)};
input_dims.insert(std::end(input_dims), using DeviceConvFwdNoOpPtr =
std::begin(params.input_spatial_lengths), ck::tensor_operation::device::DeviceConvFwdPtr<ck::tensor_operation::element_wise::PassThrough,
std::end(params.input_spatial_lengths)); ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
std::vector<std::size_t> filter_dims{static_cast<std::size_t>(params.K),
static_cast<std::size_t>(params.C)}; template <typename InDataType = float,
filter_dims.insert(std::end(filter_dims), typename WeiDataType = float,
std::begin(params.filter_spatial_lengths), typename OutDataType = float,
std::end(params.filter_spatial_lengths)); typename InLayout = ck::tensor_layout::convolution::NHWC,
typename WeiLayout = ck::tensor_layout::convolution::KYXC,
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths(); typename OutLayout = ck::tensor_layout::convolution::NHWK>
std::vector<std::size_t> output_dims{static_cast<std::size_t>(params.N), auto GetHostTensors(const ck::conv_util::ConvParams& params, bool init = true)
static_cast<std::size_t>(params.K)}; {
output_dims.insert(std::end(output_dims), std::vector<std::size_t> input_dims{static_cast<std::size_t>(params.N),
std::begin(output_spatial_lengths), static_cast<std::size_t>(params.C)};
std::end(output_spatial_lengths)); input_dims.insert(std::end(input_dims),
std::begin(params.input_spatial_lengths),
Tensor<InDataType> input(ck::conv_util::GetHostTensorDescriptor(input_dims, InLayout{})); std::end(params.input_spatial_lengths));
Tensor<WeiDataType> weights(ck::conv_util::GetHostTensorDescriptor(filter_dims, WeiLayout{}));
Tensor<OutDataType> host_output( std::vector<std::size_t> filter_dims{static_cast<std::size_t>(params.K),
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{})); static_cast<std::size_t>(params.C)};
Tensor<OutDataType> device_output( filter_dims.insert(std::end(filter_dims),
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{})); std::begin(params.filter_spatial_lengths),
std::end(params.filter_spatial_lengths));
std::generate(input.begin(), input.end(), [n = 0]() mutable {
return InDataType(n++) * InDataType(0.1f); const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
}); std::vector<std::size_t> output_dims{static_cast<std::size_t>(params.N),
std::fill(weights.begin(), weights.end(), WeiDataType(0.5f)); static_cast<std::size_t>(params.K)};
std::fill(host_output.begin(), host_output.end(), OutDataType(0.f)); output_dims.insert(std::end(output_dims),
std::fill(device_output.begin(), device_output.end(), OutDataType(0.f)); std::begin(output_spatial_lengths),
std::end(output_spatial_lengths));
return std::make_tuple(input, weights, host_output, device_output);
} Tensor<InDataType> input(ck::conv_util::GetHostTensorDescriptor(input_dims, InLayout{}));
Tensor<WeiDataType> weights(ck::conv_util::GetHostTensorDescriptor(filter_dims, WeiLayout{}));
template <ck::index_t NDim, Tensor<OutDataType> host_output(
typename InDataType = float, ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
typename WeiDataType = float, Tensor<OutDataType> device_output(
typename OutDataType = float> ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
void RunReferenceConv(const ck::conv_util::ConvParams& params,
const Tensor<InDataType>& input, if(init)
const Tensor<WeiDataType>& weights, {
Tensor<OutDataType>& output) std::mt19937 gen(11939);
{ if constexpr(std::is_same<InDataType, uint8_t>::value)
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<InDataType, {
WeiDataType, std::uniform_int_distribution<> dis(-5, 5);
OutDataType, std::generate(
InElementOp, input.begin(), input.end(), [&dis, &gen]() { return InDataType(dis(gen)); });
WeiElementOp, std::generate(
OutElementOp, weights.begin(), weights.end(), [&dis, &gen]() { return WeiDataType(dis(gen)); });
NDim>(); }
auto ref_invoker = ref_conv.MakeInvoker(); else
auto ref_argument = ref_conv.MakeArgument(input, {
weights, std::uniform_real_distribution<> dis(0.f, 1.f);
output, std::generate(
params.conv_filter_strides, input.begin(), input.end(), [&dis, &gen]() { return InDataType(dis(gen)); });
params.conv_filter_dilations, std::generate(
params.input_left_pads, weights.begin(), weights.end(), [&dis, &gen]() { return WeiDataType(dis(gen)); });
params.input_right_pads, }
InElementOp{}, std::fill(host_output.begin(), host_output.end(), OutDataType(0.f));
WeiElementOp{}, std::fill(device_output.begin(), device_output.end(), OutDataType(0.f));
OutElementOp{}); }
ref_invoker.Run(ref_argument); return std::make_tuple(input, weights, host_output, device_output);
} }
template <ck::index_t NDim, template <ck::index_t NDim,
typename InDataType = float, typename InDataType = float,
typename WeiDataType = float, typename WeiDataType = float,
typename OutDataType = float> typename OutDataType = float>
void RunConv(const ck::conv_util::ConvParams& params, void RunReferenceConv(const ck::conv_util::ConvParams& params,
const Tensor<InDataType>& input, const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights, const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output) Tensor<OutDataType>& output)
{ {
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace()); auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<InDataType,
DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace()); WeiDataType,
DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace()); OutDataType,
InElementOp,
in_device_buf.ToDevice(input.mData.data()); WeiElementOp,
wei_device_buf.ToDevice(weights.mData.data()); OutElementOp,
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths(); NDim>();
auto ref_invoker = ref_conv.MakeInvoker();
auto conv = DeviceConvNDFwdInstance<NDim, InDataType, WeiDataType, OutDataType>(); auto ref_argument = ref_conv.MakeArgument(input,
auto invoker = conv.MakeInvoker(); weights,
auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()), output,
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()), params.conv_filter_strides,
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()), params.conv_filter_dilations,
params.N, params.input_left_pads,
params.K, params.input_right_pads,
params.C, InElementOp{},
params.input_spatial_lengths, WeiElementOp{},
params.filter_spatial_lengths, OutElementOp{});
output_spatial_lengths,
params.conv_filter_strides, ref_invoker.Run(ref_argument);
params.conv_filter_dilations, }
params.input_left_pads,
params.input_right_pads, template <ck::index_t NDim,
InElementOp{}, typename InDataType = float,
WeiElementOp{}, typename WeiDataType = float,
OutElementOp{}); typename OutDataType = float>
void RunConv(const ck::conv_util::ConvParams& params,
if(!conv.IsSupportedArgument(argument)) const Tensor<InDataType>& input,
{ const Tensor<WeiDataType>& weights,
throw std::runtime_error( Tensor<OutDataType>& output)
"Error! device_conv with the specified compilation parameters does " {
"not support this Conv problem"); DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace());
} DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace());
DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace());
invoker.Run(argument);
out_device_buf.FromDevice(output.mData.data()); in_device_buf.ToDevice(input.mData.data());
} wei_device_buf.ToDevice(weights.mData.data());
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
bool TestConv2DNHWC()
{ auto conv = DeviceConvNDFwdInstance<NDim, InDataType, WeiDataType, OutDataType>();
bool res{true}; auto invoker = conv.MakeInvoker();
ck::conv_util::ConvParams params; auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
params.N = 2; static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
params.K = 16; static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
params.C = 4; params.N,
params.input_spatial_lengths = std::vector<ck::index_t>{16, 16}; params.K,
params.conv_filter_strides = std::vector<ck::index_t>{1, 1}; params.C,
params.input_spatial_lengths,
auto host_tensors = GetHostTensors(params); params.filter_spatial_lengths,
const Tensor<float>& input = std::get<0>(host_tensors); output_spatial_lengths,
const Tensor<float>& weights = std::get<1>(host_tensors); params.conv_filter_strides,
Tensor<float>& host_output = std::get<2>(host_tensors); params.conv_filter_dilations,
Tensor<float>& device_output = std::get<3>(host_tensors); params.input_left_pads,
params.input_right_pads,
RunReferenceConv<2>(params, input, weights, host_output); InElementOp{},
RunConv<2>(params, input, weights, device_output); WeiElementOp{},
res = res && OutElementOp{});
test_util::check_err(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f); if(!conv.IsSupportedArgument(argument))
{
return res; throw std::runtime_error(
} "Error! device_conv with the specified compilation parameters does "
"not support this Conv problem");
bool TestConv1DNWC() }
{
bool res{true}; invoker.Run(argument);
ck::conv_util::ConvParams params; out_device_buf.FromDevice(output.mData.data());
params.num_dim_spatial = 1; }
params.N = 2;
params.K = 16; template <ck::index_t NDim,
params.C = 4; typename InDataType = float,
params.filter_spatial_lengths = std::vector<ck::index_t>{3}; typename WeiDataType = float,
params.input_spatial_lengths = std::vector<ck::index_t>{16}; typename OutDataType = float>
params.conv_filter_strides = std::vector<ck::index_t>{1}; bool RunConvInstances(const ck::conv_util::ConvParams& params,
params.conv_filter_dilations = std::vector<ck::index_t>{1}; const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs,
params.input_left_pads = std::vector<ck::index_t>{1}; const Tensor<InDataType>& input,
params.input_right_pads = std::vector<ck::index_t>{1}; const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output,
auto host_tensors = GetHostTensors<float, const Tensor<OutDataType>& host_output)
float, {
float, DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace());
ck::tensor_layout::convolution::NWC, DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace());
ck::tensor_layout::convolution::KXC, DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace());
ck::tensor_layout::convolution::NWK>(params);
const Tensor<float>& input = std::get<0>(host_tensors); in_device_buf.ToDevice(input.mData.data());
const Tensor<float>& weights = std::get<1>(host_tensors); wei_device_buf.ToDevice(weights.mData.data());
Tensor<float>& host_output = std::get<2>(host_tensors); const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
Tensor<float>& device_output = std::get<3>(host_tensors);
bool res{true};
RunReferenceConv<1>(params, input, weights, host_output); for(auto& conv_ptr : conv_ptrs)
RunConv<1>(params, input, weights, device_output); {
res = res && auto invoker = conv_ptr->MakeInvokerPointer();
test_util::check_err( auto argument = conv_ptr->MakeArgumentPointer(
device_output.mData, host_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f); static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
return res; static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
} params.N,
params.K,
} // anonymous namespace params.C,
params.input_spatial_lengths,
int main() params.filter_spatial_lengths,
{ output_spatial_lengths,
bool res{true}; params.conv_filter_strides,
res = TestConv1DNWC(); params.conv_filter_dilations,
std::cout << "TestConv1DNWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl; params.input_left_pads,
res = TestConv2DNHWC(); params.input_right_pads,
std::cout << "TestConv2DNHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl; InElementOp{},
} WeiElementOp{},
OutElementOp{});
if(conv_ptr->IsSupportedArgument(argument.get()))
{
float atol{1e-5f};
float rtol{1e-4f};
if constexpr(std::is_same_v<InDataType, ck::half_t>)
{
atol = 1e-4f;
rtol = 2.5e-3f;
}
invoker->Run(argument.get());
out_device_buf.FromDevice(output.mData.data());
res = res &&
test::check_err(
output.mData, host_output.mData, "Error: incorrect results!", atol, rtol);
hipGetErrorString(
hipMemset(out_device_buf.GetDeviceBuffer(), 0, out_device_buf.mMemSize));
}
}
return res;
}
} // namespace conv
} // namespace test
#endif
#ifndef TEST_UTIL_HPP #ifndef TEST_UTIL_HPP
#define TEST_UTIL_HPP #define TEST_UTIL_HPP
#include <algorithm>
#include <cmath> #include <cmath>
#include <cstdlib> #include <cstdlib>
#include <iostream> #include <iostream>
#include <iomanip> #include <iomanip>
#include <iterator>
#include <limits> #include <limits>
#include <type_traits> #include <type_traits>
#include <vector> #include <vector>
namespace test_util { #include "data_type.hpp"
namespace test {
template <typename T> template <typename T>
typename std::enable_if<std::is_floating_point<T>::value, bool>::type typename std::enable_if<std::is_floating_point<T>::value && !std::is_same<T, ck::half_t>::value,
bool>::type
check_err(const std::vector<T>& out, check_err(const std::vector<T>& out,
const std::vector<T>& ref, const std::vector<T>& ref,
const std::string& msg, const std::string& msg,
T rtol = static_cast<T>(1e-5), double rtol = 1e-5,
T atol = static_cast<T>(1e-8)) double atol = 1e-8)
{ {
if(out.size() != ref.size()) if(out.size() != ref.size())
{ {
...@@ -28,9 +33,9 @@ check_err(const std::vector<T>& out, ...@@ -28,9 +33,9 @@ check_err(const std::vector<T>& out,
} }
bool res{true}; bool res{true};
int err_count = 0; int err_count = 0;
T err = 0; double err = 0;
T max_err = std::numeric_limits<T>::min(); double max_err = std::numeric_limits<double>::min();
for(std::size_t i = 0; i < ref.size(); ++i) for(std::size_t i = 0; i < ref.size(); ++i)
{ {
err = std::abs(out[i] - ref[i]); err = std::abs(out[i] - ref[i]);
...@@ -41,7 +46,96 @@ check_err(const std::vector<T>& out, ...@@ -41,7 +46,96 @@ check_err(const std::vector<T>& out,
if(err_count < 5) if(err_count < 5)
{ {
std::cout << std::setw(12) << std::setprecision(7) << "out[" << i << "] != ref[" std::cout << std::setw(12) << std::setprecision(7) << "out[" << i << "] != ref["
<< i << "]: " << out[i] << "!=" << ref[i] << std::endl << i << "]: " << out[i] << " != " << ref[i] << std::endl
<< msg << std::endl;
}
res = false;
}
}
if(!res)
{
std::cout << std::setw(12) << std::setprecision(7) << "max err: " << max_err << std::endl;
}
return res;
}
template <typename T>
typename std::enable_if<std::is_same<T, ck::bhalf_t>::value || std::is_same<T, ck::half_t>::value,
bool>::type
check_err(const std::vector<T>& out,
const std::vector<T>& ref,
const std::string& msg,
double rtol = 1e-5,
double atol = 1e-8)
{
if(out.size() != ref.size())
{
std::cout << "out.size() != ref.size(), :" << out.size() << " != " << ref.size()
<< std::endl
<< msg << std::endl;
return false;
}
bool res{true};
int err_count = 0;
double err = 0;
double max_err = ck::type_convert<float>(ck::NumericLimits<T>::Min());
for(std::size_t i = 0; i < ref.size(); ++i)
{
float o = ck::type_convert<float>(out[i]);
float r = ck::type_convert<float>(ref[i]);
err = std::abs(o - r);
if(err > atol + rtol * std::abs(r) || !std::isfinite(o) || !std::isfinite(r))
{
max_err = err > max_err ? err : max_err;
err_count++;
if(err_count < 5)
{
std::cout << std::setw(12) << std::setprecision(7) << "out[" << i << "] != ref["
<< i << "]: " << o << " != " << r << std::endl
<< msg << std::endl;
}
res = false;
}
}
if(!res)
{
std::cout << std::setw(12) << std::setprecision(7) << "max err: " << max_err << std::endl;
}
return res;
}
bool check_err(const std::vector<ck::half_t>& out,
const std::vector<ck::half_t>& ref,
const std::string& msg,
ck::half_t rtol = static_cast<ck::half_t>(1e-3f),
ck::half_t atol = static_cast<ck::half_t>(1e-3f))
{
if(out.size() != ref.size())
{
std::cout << "out.size() != ref.size(), :" << out.size() << " != " << ref.size()
<< std::endl
<< msg << std::endl;
return false;
}
bool res{true};
int err_count = 0;
double err = 0;
double max_err = std::numeric_limits<ck::half_t>::min();
for(std::size_t i = 0; i < ref.size(); ++i)
{
double out_ = double(out[i]);
double ref_ = double(ref[i]);
err = std::abs(out_ - ref_);
if(err > atol + rtol * std::abs(ref_) || !std::isfinite(out_) || !std::isfinite(ref_))
{
max_err = err > max_err ? err : max_err;
err_count++;
if(err_count < 5)
{
std::cout << std::setw(12) << std::setprecision(7) << "out[" << i << "] != ref["
<< i << "]: " << out_ << "!=" << ref_ << std::endl
<< msg << std::endl; << msg << std::endl;
} }
res = false; res = false;
...@@ -55,8 +149,13 @@ check_err(const std::vector<T>& out, ...@@ -55,8 +149,13 @@ check_err(const std::vector<T>& out,
} }
template <typename T> template <typename T>
typename std::enable_if<std::is_integral<T>::value, bool>::type check_err( typename std::enable_if<std::is_integral<T>::value && !std::is_same<T, ck::bhalf_t>::value,
const std::vector<T>& out, const std::vector<T>& ref, const std::string& msg, T = 0, T = 0) bool>::type
check_err(const std::vector<T>& out,
const std::vector<T>& ref,
const std::string& msg,
double = 0,
double = 0)
{ {
if(out.size() != ref.size()) if(out.size() != ref.size())
{ {
...@@ -70,7 +169,7 @@ typename std::enable_if<std::is_integral<T>::value, bool>::type check_err( ...@@ -70,7 +169,7 @@ typename std::enable_if<std::is_integral<T>::value, bool>::type check_err(
{ {
if(out[i] != ref[i]) if(out[i] != ref[i])
{ {
std::cout << "out[" << i << "] != ref[" << i << "]: " << out[i] << "!=" << ref[i] std::cout << "out[" << i << "] != ref[" << i << "]: " << out[i] << " != " << ref[i]
<< std::endl << std::endl
<< msg << std::endl; << msg << std::endl;
return false; return false;
...@@ -79,6 +178,13 @@ typename std::enable_if<std::is_integral<T>::value, bool>::type check_err( ...@@ -79,6 +178,13 @@ typename std::enable_if<std::is_integral<T>::value, bool>::type check_err(
return true; return true;
} }
} // namespace test_util } // namespace test
template <typename T>
std::ostream& operator<<(std::ostream& os, const std::vector<T>& v)
{
std::copy(std::begin(v), std::end(v), std::ostream_iterator<T>(os, " "));
return os;
}
#endif #endif
add_test_executable(test_reduce_no_index reduce_no_index.cpp)
add_test_executable(test_reduce_with_index reduce_with_index.cpp)
target_link_libraries(test_reduce_no_index PRIVATE host_tensor)
target_link_libraries(test_reduce_no_index PRIVATE device_reduce_instance)
target_link_libraries(test_reduce_with_index PRIVATE host_tensor)
target_link_libraries(test_reduce_with_index PRIVATE device_reduce_instance)
#include "getopt.h"
#include "device_reduce_instance.hpp"
#include "reduction_enums.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "test_util.hpp"
#include "reduce_util.hpp"
using namespace ck;
namespace {
template <index_t Rank, index_t NumReduceDim>
static inline std::vector<int> get_invariant_dims(const std::vector<int>& reduceDims)
{
assert(NumReduceDim == reduceDims.size());
int reduceFlag = 0;
// flag the bits for the reduceDims
for(int i = 0; i < NumReduceDim; i++)
{
reduceFlag |= 1 << reduceDims[i];
};
std::vector<int> invariantDims;
// collect invariant dimensions
for(int i = 0; i < Rank; i++)
if((reduceFlag & (1 << i)) == 0)
{
invariantDims.push_back(i);
};
return invariantDims;
};
// map the data type used by the GPU kernels to the corresponding type used by the host codes
template <typename InType>
struct type_mapping
{
using OutType = InType;
};
template <>
struct type_mapping<ck::half_t>
{
using OutType = half_float::half;
};
constexpr int Rank = 4;
constexpr ReduceTensorOp_t ReduceOpId = ReduceTensorOp_t::AVG;
constexpr NanPropagation_t NanOpt = NanPropagation_t::PROPAGATE_NAN;
constexpr bool PropagateNan = false;
constexpr ReduceTensorIndices_t IndicesOpt = ReduceTensorIndices_t::NO_INDICES;
constexpr bool NeedIndices = false;
template <typename InDataType,
typename AccDataType,
typename OutDataType,
int Rank,
int NumReduceDim>
bool test_reduce_no_index_impl(int init_method,
const std::vector<size_t>& inLengths,
const std::vector<int>& reduceDims,
float alpha,
float beta)
{
using namespace ck::tensor_operation::device;
using namespace ck::tensor_operation::device::device_reduce_instance;
using namespace ck::host_reduce;
constexpr bool out_support_atomic_add = std::is_same<OutDataType, float>::value;
constexpr bool op_support_atomic_add = true;
constexpr bool use_atomic_add = (out_support_atomic_add && op_support_atomic_add);
Tensor<InDataType> in(inLengths);
std::vector<size_t> outLengths;
const auto invariantDims = get_invariant_dims<Rank, NumReduceDim>(reduceDims);
if(reduceDims.size() == Rank)
outLengths.push_back(1);
else
for(auto dim : invariantDims)
outLengths.push_back(inLengths[dim]);
Tensor<OutDataType> out_ref(outLengths);
Tensor<OutDataType> out(outLengths);
// only used when the OutDataType is bhalf_t
Tensor<float> out_ref_fp32(outLengths);
Tensor<float> out_fp32(outLengths);
auto inStrides = in.mDesc.GetStrides();
auto outStrides = out.mDesc.GetStrides();
size_t invariant_total_length = out.mDesc.GetElementSize();
size_t reduce_total_length = in.mDesc.GetElementSize() / invariant_total_length;
std::size_t num_thread = std::thread::hardware_concurrency();
switch(init_method)
{
case 0: break;
case 1:
in.GenerateTensorValue(GeneratorTensor_1<InDataType>{1}, num_thread);
if(beta != 0.0f)
out_ref.GenerateTensorValue(GeneratorTensor_1<InDataType>{1}, num_thread);
break;
case 2:
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
if(beta != 0.0f)
out_ref.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
break;
default:
in.GenerateTensorValue(GeneratorTensor_3<InDataType>{-5.0, 5.0}, num_thread);
if(beta != 0.0f)
out_ref.GenerateTensorValue(GeneratorTensor_3<InDataType>{-5.0, 5.0}, num_thread);
}
if(beta != 0.0f)
for(size_t i = 0; i < out_ref.mDesc.GetElementSpace(); i++)
out.mData[i] = out_ref.mData[i];
// these buffers are usually provided by the user application
DeviceMem in_dev(sizeof(InDataType) * in.mDesc.GetElementSpace());
DeviceMem out_dev(sizeof(OutDataType) * out.mDesc.GetElementSpace());
in_dev.ToDevice(in.mData.data());
if(beta != 0.0f)
out_dev.ToDevice(out.mData.data());
using InElementwiseOperation_0 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
using AccElementwiseOperation_0 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
AccElementwiseOperation;
using InElementwiseOperation_1 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, false>::
InElementwiseOperation;
using AccElementwiseOperation_1 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, false>::
AccElementwiseOperation;
using InElementwiseOperation_2 =
typename reduce_unary_operator<AccDataType, ReduceOpId, false, true>::
InElementwiseOperation;
using AccElementwiseOperation_2 =
typename reduce_unary_operator<AccDataType, ReduceOpId, false, true>::
AccElementwiseOperation;
using DeviceReduceInstPtr0 =
DeviceReducePtr<InElementwiseOperation_0, AccElementwiseOperation_0>;
using DeviceReduceInstPtr1 =
DeviceReducePtr<InElementwiseOperation_1, AccElementwiseOperation_1>;
using DeviceReduceInstPtr2 =
DeviceReducePtr<InElementwiseOperation_2, AccElementwiseOperation_2>;
std::vector<DeviceReduceInstPtr0> reduce0_ptrs;
std::vector<DeviceReduceInstPtr1> reduce1_ptrs;
std::vector<DeviceReduceInstPtr2> reduce2_ptrs;
add_device_reduce_instance_threadwise<InDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce0_ptrs);
add_device_reduce_instance_blockwise<InDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce0_ptrs);
if constexpr(use_atomic_add)
{
add_device_reduce_instance_multiblock_atomic_add<InDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce0_ptrs);
}
else
{
add_device_reduce_instance_multiblock_partial_reduce<InDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce1_ptrs);
};
// used for secondary reduction
if constexpr(!use_atomic_add)
{
add_device_reduce_instance_blockwise_second_call<AccDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce2_ptrs);
};
if(reduce0_ptrs.empty() && reduce1_ptrs.empty())
{
throw std::runtime_error("Wrong! No device REDUCE instance found");
};
bool result = true;
using HostInDataType = typename type_mapping<InDataType>::OutType;
using HostOutDataType = typename type_mapping<OutDataType>::OutType;
using HostAccDataType = typename type_mapping<AccDataType>::OutType;
ReductionHost<HostInDataType,
HostAccDataType,
HostOutDataType,
ReduceOpId,
Rank,
NumReduceDim,
PropagateNan,
NeedIndices>
hostReduce(in.mDesc, out_ref.mDesc, invariantDims, reduceDims);
hostReduce.Run(alpha,
reinterpret_cast<const HostInDataType*>(in.mData.data()),
beta,
reinterpret_cast<HostOutDataType*>(out_ref.mData.data()),
nullptr);
const auto i_inLengths = to_int_vector(inLengths);
const auto i_inStrides = to_int_vector(inStrides);
const auto i_outLengths = to_int_vector(outLengths);
const auto i_outStrides = to_int_vector(outStrides);
for(auto& reduce_ptr : reduce0_ptrs)
{
auto wsSizeInBytes = reduce_ptr->GetWorkspaceSizeInBytes(i_inLengths, reduceDims);
DeviceMem ws_dev(wsSizeInBytes);
InElementwiseOperation_0 in_elementwise_op_0(static_cast<int32_t>(reduce_total_length));
AccElementwiseOperation_0 acc_elementwise_op_0(static_cast<int32_t>(reduce_total_length));
auto argument_ptr = reduce_ptr->MakeArgumentPointer(i_inLengths,
i_inStrides,
i_outLengths,
i_outStrides,
reduceDims,
alpha,
beta,
in_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(),
nullptr,
ws_dev.GetDeviceBuffer(),
in_elementwise_op_0,
acc_elementwise_op_0);
if(!reduce_ptr->IsSupportedArgument(argument_ptr.get()))
continue;
auto invoker_ptr = reduce_ptr->MakeInvokerPointer();
(void)invoker_ptr->Run(argument_ptr.get());
out_dev.FromDevice(out.mData.data());
bool single_result = true;
if constexpr(std::is_same<OutDataType, ck::half_t>::value ||
std::is_same<OutDataType, ck::bhalf_t>::value)
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(!single_result)
{
std::cout << "Fail Info: " << reduce_ptr->GetTypeString() << std::endl;
result = false;
}
};
for(auto& reduce_ptr : reduce1_ptrs)
{
auto wsSizeInBytes = reduce_ptr->GetWorkspaceSizeInBytes(i_inLengths, reduceDims);
DeviceMem ws_dev(wsSizeInBytes);
InElementwiseOperation_1 in_elementwise_op_1(static_cast<int32_t>(reduce_total_length));
AccElementwiseOperation_1 acc_elementwise_op_1(static_cast<int32_t>(reduce_total_length));
auto argument_ptr = reduce_ptr->MakeArgumentPointer(i_inLengths,
i_inStrides,
i_outLengths,
i_outStrides,
reduceDims,
alpha,
beta,
in_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(),
nullptr,
ws_dev.GetDeviceBuffer(),
in_elementwise_op_1,
acc_elementwise_op_1);
if(!reduce_ptr->IsSupportedArgument(argument_ptr.get()))
continue;
auto invoker_ptr = reduce_ptr->MakeInvokerPointer();
(void)invoker_ptr->Run(argument_ptr.get());
std::vector<int> inLengths2 = reduce_ptr->GetWorkspace2dLengths(argument_ptr.get());
std::vector<int> inStrides2{inLengths2[1], 1};
for(auto& reduce2_ptr : reduce2_ptrs)
{
InElementwiseOperation_2 in_elementwise_op_2(static_cast<int32_t>(reduce_total_length));
AccElementwiseOperation_2 acc_elementwise_op_2(
static_cast<int32_t>(reduce_total_length));
auto argument2_ptr = reduce2_ptr->MakeArgumentPointer(inLengths2,
inStrides2,
i_outLengths,
i_outStrides,
reduceDims,
alpha,
beta,
ws_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(),
nullptr,
ws_dev.GetDeviceBuffer(),
in_elementwise_op_2,
acc_elementwise_op_2);
if(!reduce2_ptr->IsSupportedArgument(argument2_ptr.get()))
continue;
std::string reduce2_name = reduce2_ptr->GetTypeString();
auto invoker2_ptr = reduce2_ptr->MakeInvokerPointer();
(void)invoker2_ptr->Run(argument2_ptr.get());
out_dev.FromDevice(out.mData.data());
bool single_result = true;
if constexpr(std::is_same<OutDataType, ck::half_t>::value ||
std::is_same<OutDataType, ck::bhalf_t>::value)
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(!single_result)
{
std::cout << "Fail Info: " << reduce_ptr->GetTypeString() << " => "
<< reduce2_ptr->GetTypeString() << std::endl;
result = false;
}
};
};
return (result);
};
} // anonymous namespace
static struct option long_options[] = {{"inLengths", required_argument, nullptr, 'D'},
{"reduceDimensions", required_argument, nullptr, 'R'},
{"scales", required_argument, nullptr, 'S'},
{"help", no_argument, nullptr, '?'},
{nullptr, 0, nullptr, 0}};
class SimpleAppArgs
{
template <typename T>
static T getSingleValueFromString(const std::string& valueStr)
{
std::istringstream iss(valueStr);
T ret;
iss >> ret;
return (ret);
};
template <typename T>
static std::vector<T> getTypeValuesFromString(const char* cstr_values)
{
std::string valuesStr(cstr_values);
std::vector<T> values;
std::size_t pos = 0;
std::size_t new_pos;
new_pos = valuesStr.find(',', pos);
while(new_pos != std::string::npos)
{
const std::string sliceStr = valuesStr.substr(pos, new_pos - pos);
T val = getSingleValueFromString<T>(sliceStr);
values.push_back(val);
pos = new_pos + 1;
new_pos = valuesStr.find(',', pos);
};
std::string sliceStr = valuesStr.substr(pos);
T val = getSingleValueFromString<T>(sliceStr);
values.push_back(val);
return (values);
};
private:
int option_index = 0;
public:
std::vector<size_t> inLengths;
std::vector<int> reduceDims;
std::vector<float> scales;
int data_type;
int init_method = 1;
public:
void show_usage(const char* cmd)
{
std::cout << "Usage of " << cmd << std::endl;
std::cout << "--inLengths or -D, comma separated list of input tensor dimension lengths "
"(only 4-d tensor supported)"
<< std::endl;
std::cout << "--reduceDimensions or -R comma seperated list of dimension indexes to reduce "
"(only 1 or 3 or 4 dimensions supported)"
<< std::endl;
std::cout << "--scales or -S, comma separated two float values for alpha and beta"
<< std::endl;
std::cout << "Arg1 -- data type (0: fp16, 1: fp32, 3: int8, 5: bp16, 6: fp64)" << std::endl;
std::cout << "Arg2 -- init method(0=no init, 1=single integer value, 2=scope integer "
"value, 3=decimal value)"
<< std::endl;
};
int processArgs(int argc, char* argv[])
{
unsigned int ch;
while(1)
{
ch = getopt_long(argc, argv, "D:R:S:", long_options, &option_index);
if(ch == -1)
break;
switch(ch)
{
case 'D':
if(!optarg)
throw std::runtime_error("Invalid option format!");
inLengths = getTypeValuesFromString<size_t>(optarg);
break;
case 'R':
if(!optarg)
throw std::runtime_error("Invalid option format!");
reduceDims = getTypeValuesFromString<int>(optarg);
break;
case 'S':
if(!optarg)
throw std::runtime_error("Invalid option format!");
scales = getTypeValuesFromString<float>(optarg);
break;
case '?':
if(std::string(long_options[option_index].name) == "help")
{
show_usage(argv[0]);
return (-1);
};
break;
default: show_usage(argv[0]); return (-1);
};
};
if(optind + 2 > argc)
throw std::runtime_error("Invalid cmd-line arguments, more argumetns are needed!");
data_type = std::atoi(argv[optind++]);
init_method = std::atoi(argv[optind]);
if(scales.empty())
{
scales.push_back(1.0f);
scales.push_back(0.0f);
};
if(inLengths.size() != 4 ||
(reduceDims.size() != 1 && reduceDims.size() != 3 && reduceDims.size() != 4))
return (-1);
if(data_type != 0 && data_type != 1 && data_type != 3 && data_type != 5)
return (-1);
return (0);
};
};
bool test_reduce_no_index(int data_type,
int init_method,
std::vector<int> reduceDims,
std::vector<size_t> inLengths,
float alpha,
float beta)
{
bool result = true;
if(data_type == 0)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_no_index_impl<float, float, float, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_no_index_impl<float, float, float, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_no_index_impl<float, float, float, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
else if(data_type == 1)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_no_index_impl<ck::half_t, float, ck::half_t, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_no_index_impl<ck::half_t, float, ck::half_t, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_no_index_impl<ck::half_t, float, ck::half_t, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
else if(data_type == 3)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_no_index_impl<int8_t, int32_t, int8_t, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_no_index_impl<int8_t, int32_t, int8_t, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_no_index_impl<int8_t, int32_t, int8_t, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
else if(data_type == 5)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_no_index_impl<ck::bhalf_t, float, ck::bhalf_t, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_no_index_impl<ck::bhalf_t, float, ck::bhalf_t, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_no_index_impl<ck::bhalf_t, float, ck::bhalf_t, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
return (result);
};
int main(int argc, char* argv[])
{
SimpleAppArgs args;
bool result = true;
if(argc == 1)
{
int data_type = 1;
int init_method = 2;
std::vector<size_t> inLengths{64, 4, 280, 80};
std::vector<std::vector<int>> v_reduceDims{
{0, 1, 2, 3}, {0, 1, 2}, {1, 2, 3}, {0, 1, 3}, {0, 2, 3}, {0}, {1}, {2}, {3}};
for(auto& reduceDims : v_reduceDims)
result = result && test_reduce_no_index(
data_type, init_method, reduceDims, inLengths, 1.0f, 0.0f);
}
else
{
if(args.processArgs(argc, argv) < 0)
{
throw std::runtime_error(
"Invalid input arguments, test_reduce_no_index could not be executed!");
};
result = test_reduce_no_index(args.data_type,
args.init_method,
args.reduceDims,
args.inLengths,
args.scales[0],
args.scales[1]);
}
std::cout << "test_reduce_no_index ..... " << (result ? "SUCCESS" : "FAILURE") << std::endl;
return (result ? 0 : -1);
}
#ifndef REDUCE_UTILS_HPP
#define REDUCE_UTILS_HPP
#include "data_type.hpp"
namespace ck {
namespace reduce_util {
template <typename T>
void to_f32_vector(const Tensor<T>& src, Tensor<float>& dst)
{
for(int i = 0; i < src.mData.size(); ++i)
dst.mData[i] = type_convert<float>(src.mData[i]);
}
} // namespace reduce_util
} // namespace ck
#endif
#include "getopt.h"
#include "device_reduce_instance.hpp"
#include "reduction_enums.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "test_util.hpp"
#include "reduce_util.hpp"
using namespace ck;
namespace {
template <index_t Rank, index_t NumReduceDim>
static inline std::vector<int> get_invariant_dims(const std::vector<int>& reduceDims)
{
assert(NumReduceDim == reduceDims.size());
int reduceFlag = 0;
// flag the bits for the reduceDims
for(int i = 0; i < NumReduceDim; i++)
{
reduceFlag |= 1 << reduceDims[i];
};
std::vector<int> invariantDims;
// collect invariant dimensions
for(int i = 0; i < Rank; i++)
if((reduceFlag & (1 << i)) == 0)
{
invariantDims.push_back(i);
};
return invariantDims;
};
// map the data type used by the GPU kernels to the corresponding type used by the host codes
template <typename InType>
struct type_mapping
{
using OutType = InType;
};
template <>
struct type_mapping<ck::half_t>
{
using OutType = half_float::half;
};
constexpr int Rank = 4;
constexpr ReduceTensorOp_t ReduceOpId = ReduceTensorOp_t::AMAX;
constexpr NanPropagation_t NanOpt = NanPropagation_t::PROPAGATE_NAN;
constexpr bool PropagateNan = false;
constexpr ReduceTensorIndices_t IndicesOpt = ReduceTensorIndices_t::FLATTENED_INDICES;
constexpr bool NeedIndices = true;
template <typename InDataType,
typename AccDataType,
typename OutDataType,
int Rank,
int NumReduceDim>
bool test_reduce_with_index_impl(int init_method,
const std::vector<size_t>& inLengths,
const std::vector<int>& reduceDims,
float alpha,
float beta)
{
using namespace ck::tensor_operation::device;
using namespace ck::tensor_operation::device::device_reduce_instance;
using namespace ck::host_reduce;
Tensor<InDataType> in(inLengths);
std::vector<size_t> outLengths;
const auto invariantDims = get_invariant_dims<Rank, NumReduceDim>(reduceDims);
if(reduceDims.size() == Rank)
outLengths.push_back(1);
else
for(auto dim : invariantDims)
outLengths.push_back(inLengths[dim]);
Tensor<OutDataType> out_ref(outLengths);
Tensor<OutDataType> out(outLengths);
Tensor<int32_t> out_indices_ref(outLengths);
Tensor<int32_t> out_indices(outLengths);
// only used when the OutDataType is bhalf_t
Tensor<float> out_ref_fp32(outLengths);
Tensor<float> out_fp32(outLengths);
auto inStrides = in.mDesc.GetStrides();
auto outStrides = out.mDesc.GetStrides();
size_t invariant_total_length = out.mDesc.GetElementSize();
size_t reduce_total_length = in.mDesc.GetElementSize() / invariant_total_length;
std::size_t num_thread = std::thread::hardware_concurrency();
switch(init_method)
{
case 0: break;
case 1:
in.GenerateTensorValue(GeneratorTensor_1<InDataType>{1}, num_thread);
if(beta != 0.0f)
out_ref.GenerateTensorValue(GeneratorTensor_1<InDataType>{1}, num_thread);
break;
case 2:
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
if(beta != 0.0f)
out_ref.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread);
break;
default:
in.GenerateTensorValue(GeneratorTensor_3<InDataType>{-5.0, 5.0}, num_thread);
if(beta != 0.0f)
out_ref.GenerateTensorValue(GeneratorTensor_3<InDataType>{-5.0, 5.0}, num_thread);
}
if(beta != 0.0f)
for(size_t i = 0; i < out_ref.mDesc.GetElementSpace(); i++)
out.mData[i] = out_ref.mData[i];
// these buffers are usually provided by the user application
DeviceMem in_dev(sizeof(InDataType) * in.mDesc.GetElementSpace());
DeviceMem out_dev(sizeof(OutDataType) * out.mDesc.GetElementSpace());
in_dev.ToDevice(in.mData.data());
if(beta != 0.0f)
out_dev.ToDevice(out.mData.data());
size_t indicesSizeInBytes = NeedIndices ? out.mDesc.GetElementSize() * sizeof(int) : 0;
DeviceMem out_indices_dev(indicesSizeInBytes);
using InElementwiseOperation_0 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
using AccElementwiseOperation_0 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::
AccElementwiseOperation;
using InElementwiseOperation_1 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, false>::
InElementwiseOperation;
using AccElementwiseOperation_1 =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, false>::
AccElementwiseOperation;
using InElementwiseOperation_2 =
typename reduce_unary_operator<AccDataType, ReduceOpId, false, true>::
InElementwiseOperation;
using AccElementwiseOperation_2 =
typename reduce_unary_operator<AccDataType, ReduceOpId, false, true>::
AccElementwiseOperation;
using DeviceReduceInstPtr0 =
DeviceReducePtr<InElementwiseOperation_0, AccElementwiseOperation_0>;
using DeviceReduceInstPtr1 =
DeviceReducePtr<InElementwiseOperation_1, AccElementwiseOperation_1>;
using DeviceReduceInstPtr2 =
DeviceReducePtr<InElementwiseOperation_2, AccElementwiseOperation_2>;
std::vector<DeviceReduceInstPtr0> reduce0_ptrs;
std::vector<DeviceReduceInstPtr1> reduce1_ptrs;
std::vector<DeviceReduceInstPtr2> reduce2_ptrs;
add_device_reduce_instance_threadwise<InDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce0_ptrs);
add_device_reduce_instance_blockwise<InDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce0_ptrs);
add_device_reduce_instance_multiblock_partial_reduce<InDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce1_ptrs);
add_device_reduce_instance_blockwise_second_call<AccDataType,
AccDataType,
OutDataType,
Rank,
NumReduceDim,
ReduceOpId,
NanOpt,
IndicesOpt>(reduce2_ptrs);
if(reduce0_ptrs.empty() && reduce1_ptrs.empty())
{
throw std::runtime_error("Wrong! No device REDUCE instance found");
};
bool result = true;
using HostInDataType = typename type_mapping<InDataType>::OutType;
using HostOutDataType = typename type_mapping<OutDataType>::OutType;
using HostAccDataType = typename type_mapping<AccDataType>::OutType;
ReductionHost<HostInDataType,
HostAccDataType,
HostOutDataType,
ReduceOpId,
Rank,
NumReduceDim,
PropagateNan,
NeedIndices>
hostReduce(in.mDesc, out_ref.mDesc, invariantDims, reduceDims);
hostReduce.Run(alpha,
reinterpret_cast<const HostInDataType*>(in.mData.data()),
beta,
reinterpret_cast<HostOutDataType*>(out_ref.mData.data()),
out_indices_ref.mData.data());
const auto i_inLengths = to_int_vector(inLengths);
const auto i_inStrides = to_int_vector(inStrides);
const auto i_outLengths = to_int_vector(outLengths);
const auto i_outStrides = to_int_vector(outStrides);
for(auto& reduce_ptr : reduce0_ptrs)
{
auto wsSizeInBytes = reduce_ptr->GetWorkspaceSizeInBytes(i_inLengths, reduceDims);
DeviceMem ws_dev(wsSizeInBytes);
InElementwiseOperation_0 in_elementwise_op_0(static_cast<int32_t>(reduce_total_length));
AccElementwiseOperation_0 acc_elementwise_op_0(static_cast<int32_t>(reduce_total_length));
auto argument_ptr = reduce_ptr->MakeArgumentPointer(i_inLengths,
i_inStrides,
i_outLengths,
i_outStrides,
reduceDims,
alpha,
beta,
in_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(),
out_indices_dev.GetDeviceBuffer(),
ws_dev.GetDeviceBuffer(),
in_elementwise_op_0,
acc_elementwise_op_0);
if(!reduce_ptr->IsSupportedArgument(argument_ptr.get()))
continue;
auto invoker_ptr = reduce_ptr->MakeInvokerPointer();
(void)invoker_ptr->Run(argument_ptr.get());
out_dev.FromDevice(out.mData.data());
bool single_result = true;
if constexpr(std::is_same<OutDataType, ck::half_t>::value ||
std::is_same<OutDataType, ck::bhalf_t>::value)
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(NeedIndices)
{
out_indices_dev.FromDevice(out_indices.mData.data());
single_result = single_result && test::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
};
if(!single_result)
{
std::cout << "Fail Info: " << reduce_ptr->GetTypeString() << std::endl;
result = false;
}
};
for(auto& reduce_ptr : reduce1_ptrs)
{
auto wsSizeInBytes = reduce_ptr->GetWorkspaceSizeInBytes(i_inLengths, reduceDims);
DeviceMem ws_dev(wsSizeInBytes);
InElementwiseOperation_1 in_elementwise_op_1(static_cast<int32_t>(reduce_total_length));
AccElementwiseOperation_1 acc_elementwise_op_1(static_cast<int32_t>(reduce_total_length));
auto argument_ptr = reduce_ptr->MakeArgumentPointer(i_inLengths,
i_inStrides,
i_outLengths,
i_outStrides,
reduceDims,
alpha,
beta,
in_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(),
out_indices_dev.GetDeviceBuffer(),
ws_dev.GetDeviceBuffer(),
in_elementwise_op_1,
acc_elementwise_op_1);
if(!reduce_ptr->IsSupportedArgument(argument_ptr.get()))
continue;
std::string reduce_name = reduce_ptr->GetTypeString();
auto invoker_ptr = reduce_ptr->MakeInvokerPointer();
(void)invoker_ptr->Run(argument_ptr.get());
std::vector<int> inLengths2 = reduce_ptr->GetWorkspace2dLengths(argument_ptr.get());
std::vector<int> inStrides2{inLengths2[1], 1};
for(auto& reduce2_ptr : reduce2_ptrs)
{
InElementwiseOperation_2 in_elementwise_op_2(static_cast<int32_t>(reduce_total_length));
AccElementwiseOperation_2 acc_elementwise_op_2(
static_cast<int32_t>(reduce_total_length));
auto argument2_ptr = reduce2_ptr->MakeArgumentPointer(inLengths2,
inStrides2,
i_outLengths,
i_outStrides,
reduceDims,
alpha,
beta,
ws_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(),
out_indices_dev.GetDeviceBuffer(),
ws_dev.GetDeviceBuffer(),
in_elementwise_op_2,
acc_elementwise_op_2);
if(!reduce2_ptr->IsSupportedArgument(argument2_ptr.get()))
continue;
std::string reduce2_name = reduce2_ptr->GetTypeString();
auto invoker2_ptr = reduce2_ptr->MakeInvokerPointer();
(void)invoker2_ptr->Run(argument2_ptr.get());
out_dev.FromDevice(out.mData.data());
bool single_result = true;
if constexpr(std::is_same<OutDataType, ck::half_t>::value ||
std::is_same<OutDataType, ck::bhalf_t>::value)
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(NeedIndices)
{
out_indices_dev.FromDevice(out_indices.mData.data());
single_result = single_result && test::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
};
if(!single_result)
{
std::cout << "Fail Info: " << reduce_ptr->GetTypeString() << " => "
<< reduce2_ptr->GetTypeString() << std::endl;
result = false;
}
};
};
return (result);
};
} // anonymous namespace
static struct option long_options[] = {{"inLengths", required_argument, nullptr, 'D'},
{"reduceDimensions", required_argument, nullptr, 'R'},
{"scales", required_argument, nullptr, 'S'},
{"help", no_argument, nullptr, '?'},
{nullptr, 0, nullptr, 0}};
class SimpleAppArgs
{
template <typename T>
static T getSingleValueFromString(const std::string& valueStr)
{
std::istringstream iss(valueStr);
T ret;
iss >> ret;
return (ret);
};
template <typename T>
static std::vector<T> getTypeValuesFromString(const char* cstr_values)
{
std::string valuesStr(cstr_values);
std::vector<T> values;
std::size_t pos = 0;
std::size_t new_pos;
new_pos = valuesStr.find(',', pos);
while(new_pos != std::string::npos)
{
const std::string sliceStr = valuesStr.substr(pos, new_pos - pos);
T val = getSingleValueFromString<T>(sliceStr);
values.push_back(val);
pos = new_pos + 1;
new_pos = valuesStr.find(',', pos);
};
std::string sliceStr = valuesStr.substr(pos);
T val = getSingleValueFromString<T>(sliceStr);
values.push_back(val);
return (values);
};
private:
int option_index = 0;
public:
std::vector<size_t> inLengths;
std::vector<int> reduceDims;
std::vector<float> scales;
int data_type;
int init_method = 1;
public:
void show_usage(const char* cmd)
{
std::cout << "Usage of " << cmd << std::endl;
std::cout << "--inLengths or -D, comma separated list of input tensor dimension lengths "
"(only 4-d tensor supported)"
<< std::endl;
std::cout << "--reduceDimensions or -R comma seperated list of dimension indexes to reduce "
"(only 1 or 3 or 4 dimensions supported)"
<< std::endl;
std::cout << "--scales or -S, comma separated two float values for alpha and beta"
<< std::endl;
std::cout << "Arg1 -- data type (1: fp32, 3: int8, 5: bp16, 6: fp64)" << std::endl;
std::cout << "Arg2 -- init method(0=no init, 1=single integer value, 2=scope integer "
"value, 3=decimal value)"
<< std::endl;
};
int processArgs(int argc, char* argv[])
{
unsigned int ch;
while(1)
{
ch = getopt_long(argc, argv, "D:R:S:", long_options, &option_index);
if(ch == -1)
break;
switch(ch)
{
case 'D':
if(!optarg)
throw std::runtime_error("Invalid option format!");
inLengths = getTypeValuesFromString<size_t>(optarg);
break;
case 'R':
if(!optarg)
throw std::runtime_error("Invalid option format!");
reduceDims = getTypeValuesFromString<int>(optarg);
break;
case 'S':
if(!optarg)
throw std::runtime_error("Invalid option format!");
scales = getTypeValuesFromString<float>(optarg);
break;
case '?':
if(std::string(long_options[option_index].name) == "help")
{
show_usage(argv[0]);
return (-1);
};
break;
default: show_usage(argv[0]); return (-1);
};
};
if(optind + 2 > argc)
throw std::runtime_error("Invalid cmd-line arguments, more argumetns are needed!");
data_type = std::atoi(argv[optind++]);
init_method = std::atoi(argv[optind]);
if(scales.empty())
{
scales.push_back(1.0f);
scales.push_back(0.0f);
};
if(inLengths.size() != 4 ||
(reduceDims.size() != 1 && reduceDims.size() != 3 && reduceDims.size() != 4))
return (-1);
if(data_type != 0 && data_type != 1 && data_type != 3 && data_type != 5)
return (-1);
return (0);
};
};
bool test_reduce_with_index(int data_type,
int init_method,
std::vector<int> reduceDims,
std::vector<size_t> inLengths,
float alpha,
float beta)
{
bool result = true;
if(data_type == 0)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_with_index_impl<float, float, float, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_with_index_impl<float, float, float, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_with_index_impl<float, float, float, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
else if(data_type == 1)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_with_index_impl<ck::half_t, ck::half_t, ck::half_t, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_with_index_impl<ck::half_t, ck::half_t, ck::half_t, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_with_index_impl<ck::half_t, ck::half_t, ck::half_t, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
else if(data_type == 3)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_with_index_impl<int8_t, int8_t, int8_t, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_with_index_impl<int8_t, int8_t, int8_t, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_with_index_impl<int8_t, int8_t, int8_t, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
else if(data_type == 5)
{
switch(reduceDims.size())
{
case 1:
result = test_reduce_with_index_impl<ck::bhalf_t, float, ck::bhalf_t, Rank, 1>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 3:
result = test_reduce_with_index_impl<ck::bhalf_t, float, ck::bhalf_t, Rank, 3>(
init_method, inLengths, reduceDims, alpha, beta);
break;
case 4:
result = test_reduce_with_index_impl<ck::bhalf_t, float, ck::bhalf_t, Rank, 4>(
init_method, inLengths, reduceDims, alpha, beta);
break;
};
}
return (result);
};
int main(int argc, char* argv[])
{
SimpleAppArgs args;
bool result = true;
if(argc == 1)
{
int data_type = 1;
int init_method = 2;
std::vector<size_t> inLengths{64, 4, 280, 80};
std::vector<std::vector<int>> v_reduceDims{
{0, 1, 2, 3}, {0, 1, 2}, {1, 2, 3}, {0, 1, 3}, {0, 2, 3}, {0}, {1}, {2}, {3}};
for(auto& reduceDims : v_reduceDims)
result = result && test_reduce_with_index(
data_type, init_method, reduceDims, inLengths, 1.0f, 0.0f);
}
else
{
if(args.processArgs(argc, argv) < 0)
{
throw std::runtime_error(
"Invalid input arguments, test_reduce_with_index could not be executed!");
};
result = test_reduce_with_index(args.data_type,
args.init_method,
args.reduceDims,
args.inLengths,
args.scales[0],
args.scales[1]);
}
std::cout << "test_reduce_with_index ..... " << (result ? "SUCCESS" : "FAILURE") << std::endl;
return (result ? 0 : -1);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment