Commit 07a673c6 authored by carlushuang's avatar carlushuang
Browse files

Merge remote-tracking branch 'origin/develop' into cpu_avx2

parents c0f698d5 ac0d8066
#ifndef GEMM_UTILS_HPP
#define GEMM_UTILS_HPP
#include "check_err.hpp"
#include "config.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "reference_gemm.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace ck {
namespace gemm_util {
......@@ -202,20 +202,17 @@ struct TestGemm
bool res = false;
if(std::is_same<CDataType, float>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
res = ck::utils::check_err(c_device.mData, c_host.mData);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
else if(std::is_same<CDataType, ck::half_t>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
res = ck::utils::check_err(c_device.mData, c_host.mData);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
else if(std::is_same<CDataType, int8_t>::value)
{
res = test::check_err(c_device.mData, c_host.mData, "Error: incorrect results!");
res = ck::utils::check_err(c_device.mData, c_host.mData);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
}
......@@ -330,9 +327,8 @@ struct TestGemmBF16
bf16_to_f32_(c_device_bf16, c_device_fp32);
// Assert
bool res = test::check_err(
bool res = ck::utils::check_err(
c_device_fp32.mData, c_host_fp32.mData, "Error: incorrect results!", 1e-2f, 1e-3f);
std::cout << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res;
......
......@@ -4,6 +4,8 @@
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "check_err.hpp"
#include "config.hpp"
#include "print.hpp"
#include "device.hpp"
......@@ -15,7 +17,6 @@
#include "element_wise_operation.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
#include "test_util.hpp"
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
......@@ -46,24 +47,6 @@ using ALayout = ck::tensor_layout::gemm::RowMajor;
using BLayout = ck::tensor_layout::gemm::ColumnMajor;
using CLayout = ck::tensor_layout::gemm::RowMajor;
template <typename T>
static bool check_err(const Tensor<T>& ref, const Tensor<T>& result)
{
float max_diff = 1e-2;
for(int i = 0; i < ref.mData.size(); ++i)
{
float diff = std::abs(double(ref.mData[i]) - double(result.mData[i]));
if(max_diff < diff)
{
std::cout << double(ref.mData[i]) << "," << double(result.mData[i]) << std::endl;
return false;
}
}
return true;
}
bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
{
int group_count = rand() % 10 + 1;
......@@ -188,7 +171,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
ref_invoker.Run(ref_argument);
bool res = check_err(c_device_tensors[i], c_host_tensors[i]);
bool res = ck::utils::check_err(c_host_tensors[i].mData, c_device_tensors[i].mData);
std::cout << "group_id: " << i << (res ? " SUCCESS" : " FAILURE") << std::endl;
......
#ifndef TEST_CONV_UTIL_HPP
#define TEST_CONV_UTIL_HPP
#include <algorithm>
#include <cstdlib>
#include <numeric>
#include <random>
#include <stdexcept>
#include <tuple>
#include <type_traits>
#include <vector>
#include "config.hpp"
#include "conv_utils.hpp"
#include "device.hpp"
#include "device_tensor.hpp"
#include "device_convnd_fwd_xdl_nhwc_kyxc_nhwk.hpp"
#include "element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "reference_conv_fwd.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace {
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
static constexpr auto ConvFwdDefault =
ck::tensor_operation::device::ConvolutionForwardSpecialization_t::Default;
template <ck::index_t SpatialDims, typename InDataType, typename WeiDataType, typename OutDataType>
using DeviceConvNDFwdInstance = ck::tensor_operation::device::
DeviceConvNDFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K<
// clang-format off
InDataType, //
WeiDataType, //
OutDataType, //
InDataType, //
InElementOp, // Input Elementwise Operation
WeiElementOp, // Weights Elementwise Operation
OutElementOp, // Output Elementwise Operation
ConvFwdDefault, // ConvForwardSpecialization
SpatialDims, // SptialDims
64, // BlockSize
16, // MPerBlock
16, // NPerBlock
4, // K0PerBlock
1, // K1
16, // MPerXDL
16, // NPerXDL
1, // MXdlPerWave
1, // NXdlPerWave
S<1, 16, 1>, // ABlockTransferThreadClusterLengths_K0_M_K1
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
S<1, 0, 2>, // ABlockTransferSrcAccessOrder
2, // ABlockTransferSrcVectorDim
1, // ABlockTransferSrcScalarPerVector
1, // ABlockTransferDstScalarPerVector_K1
true, // ABlockLdsAddExtraM
S<1, 16, 1>, // BBlockTransferThreadClusterLengths_K0_N_K1
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
S<1, 0, 2>, // BBlockTransferSrcAccessOrder
2, // BBlockTransferSrcVectorDim
1, // BBlockTransferSrcScalarPerVector
1, // BBlockTransferDstScalarPerVector_K1
true, // BBlockTransferAddExtraN
7, // CThreadTransferSrcDstVectorDim
1>; // CThreadTransferDstScalarPerVector
// clang-format on
} // namespace
namespace test {
namespace conv {
using DeviceConvFwdNoOpPtr =
ck::tensor_operation::device::DeviceConvFwdPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>;
template <typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float,
typename InLayout = ck::tensor_layout::convolution::NHWC,
typename WeiLayout = ck::tensor_layout::convolution::KYXC,
typename OutLayout = ck::tensor_layout::convolution::NHWK>
auto GetHostTensors(const ck::conv_util::ConvParams& params, bool init = true)
{
std::vector<std::size_t> input_dims{static_cast<std::size_t>(params.N),
static_cast<std::size_t>(params.C)};
input_dims.insert(std::end(input_dims),
std::begin(params.input_spatial_lengths),
std::end(params.input_spatial_lengths));
std::vector<std::size_t> filter_dims{static_cast<std::size_t>(params.K),
static_cast<std::size_t>(params.C)};
filter_dims.insert(std::end(filter_dims),
std::begin(params.filter_spatial_lengths),
std::end(params.filter_spatial_lengths));
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
std::vector<std::size_t> output_dims{static_cast<std::size_t>(params.N),
static_cast<std::size_t>(params.K)};
output_dims.insert(std::end(output_dims),
std::begin(output_spatial_lengths),
std::end(output_spatial_lengths));
Tensor<InDataType> input(ck::conv_util::GetHostTensorDescriptor(input_dims, InLayout{}));
Tensor<WeiDataType> weights(ck::conv_util::GetHostTensorDescriptor(filter_dims, WeiLayout{}));
Tensor<OutDataType> host_output(
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
Tensor<OutDataType> device_output(
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
if(init)
{
std::mt19937 gen(11939);
if constexpr(std::is_same<InDataType, uint8_t>::value)
{
std::uniform_int_distribution<> dis(-5, 5);
std::generate(
input.begin(), input.end(), [&dis, &gen]() { return InDataType(dis(gen)); });
std::generate(
weights.begin(), weights.end(), [&dis, &gen]() { return WeiDataType(dis(gen)); });
}
else
{
std::uniform_real_distribution<> dis(0.f, 1.f);
std::generate(
input.begin(), input.end(), [&dis, &gen]() { return InDataType(dis(gen)); });
std::generate(
weights.begin(), weights.end(), [&dis, &gen]() { return WeiDataType(dis(gen)); });
}
std::fill(host_output.begin(), host_output.end(), OutDataType(0.f));
std::fill(device_output.begin(), device_output.end(), OutDataType(0.f));
}
return std::make_tuple(input, weights, host_output, device_output);
}
template <ck::index_t NDim,
typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float>
void RunReferenceConv(const ck::conv_util::ConvParams& params,
const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output)
{
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<InDataType,
WeiDataType,
OutDataType,
InElementOp,
WeiElementOp,
OutElementOp,
NDim>();
auto ref_invoker = ref_conv.MakeInvoker();
auto ref_argument = ref_conv.MakeArgument(input,
weights,
output,
params.conv_filter_strides,
params.conv_filter_dilations,
params.input_left_pads,
params.input_right_pads,
InElementOp{},
WeiElementOp{},
OutElementOp{});
ref_invoker.Run(ref_argument);
}
template <ck::index_t NDim,
typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float>
void RunConv(const ck::conv_util::ConvParams& params,
const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output)
{
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace());
DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace());
DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace());
in_device_buf.ToDevice(input.mData.data());
wei_device_buf.ToDevice(weights.mData.data());
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
auto conv = DeviceConvNDFwdInstance<NDim, InDataType, WeiDataType, OutDataType>();
auto invoker = conv.MakeInvoker();
auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
params.N,
params.K,
params.C,
params.input_spatial_lengths,
params.filter_spatial_lengths,
output_spatial_lengths,
params.conv_filter_strides,
params.conv_filter_dilations,
params.input_left_pads,
params.input_right_pads,
InElementOp{},
WeiElementOp{},
OutElementOp{});
if(!conv.IsSupportedArgument(argument))
{
throw std::runtime_error(
"Error! device_conv with the specified compilation parameters does "
"not support this Conv problem");
}
invoker.Run(argument);
out_device_buf.FromDevice(output.mData.data());
}
template <ck::index_t NDim,
typename InDataType = float,
typename WeiDataType = float,
typename OutDataType = float>
bool RunConvInstances(const ck::conv_util::ConvParams& params,
const std::vector<DeviceConvFwdNoOpPtr>& conv_ptrs,
const Tensor<InDataType>& input,
const Tensor<WeiDataType>& weights,
Tensor<OutDataType>& output,
const Tensor<OutDataType>& host_output)
{
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace());
DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace());
DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace());
in_device_buf.ToDevice(input.mData.data());
wei_device_buf.ToDevice(weights.mData.data());
const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
bool res{true};
for(auto& conv_ptr : conv_ptrs)
{
auto invoker = conv_ptr->MakeInvokerPointer();
auto argument = conv_ptr->MakeArgumentPointer(
static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
params.N,
params.K,
params.C,
params.input_spatial_lengths,
params.filter_spatial_lengths,
output_spatial_lengths,
params.conv_filter_strides,
params.conv_filter_dilations,
params.input_left_pads,
params.input_right_pads,
InElementOp{},
WeiElementOp{},
OutElementOp{});
if(conv_ptr->IsSupportedArgument(argument.get()))
{
float atol{1e-5f};
float rtol{1e-4f};
if constexpr(std::is_same_v<InDataType, ck::half_t>)
{
atol = 1e-4f;
rtol = 2.5e-3f;
}
invoker->Run(argument.get());
out_device_buf.FromDevice(output.mData.data());
res = res &&
test::check_err(
output.mData, host_output.mData, "Error: incorrect results!", atol, rtol);
hipGetErrorString(
hipMemset(out_device_buf.GetDeviceBuffer(), 0, out_device_buf.mMemSize));
}
}
return res;
}
} // namespace conv
} // namespace test
#endif
......@@ -4,8 +4,10 @@
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "check_err.hpp"
#include "config.hpp"
#include "print.hpp"
#include "magic_division.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
......@@ -54,29 +56,6 @@ __host__ void cpu_magic_number_division(uint32_t magic_multiplier,
}
}
template <typename T>
T check_error(const std::vector<T>& ref, const std::vector<T>& result)
{
T error = 0;
T max_diff = 0;
T ref_value = 0, result_value = 0;
for(std::size_t i = 0; i < ref.size(); ++i)
{
T diff = std::abs(ref[i] - result[i]);
error += diff;
if(max_diff < diff)
{
max_diff = diff;
ref_value = ref[i];
result_value = result[i];
}
}
return max_diff;
}
int main(int, char*[])
{
uint64_t num_divisor = 4096;
......@@ -135,9 +114,9 @@ int main(int, char*[])
naive_result_dev_buf.FromDevice(naive_result_host.data());
magic_result_dev_buf.FromDevice(magic_result_host.data());
int32_t max_diff = check_error(naive_result_host, magic_result_host);
bool res = ck::utils::check_err(magic_result_host, naive_result_host);
if(max_diff != 0)
if(!res)
{
pass = false;
continue;
......@@ -149,9 +128,9 @@ int main(int, char*[])
magic_result_host2.data(),
num_dividend);
max_diff = check_error(naive_result_host, magic_result_host2);
res = ck::utils::check_err(magic_result_host2, naive_result_host);
if(max_diff != 0)
if(!res)
{
pass = false;
continue;
......
#include "getopt.h"
#include "check_err.hpp"
#include "device_reduce_instance.hpp"
#include "reduction_enums.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "test_util.hpp"
#include "reduce_util.hpp"
using namespace ck;
......@@ -51,11 +52,11 @@ struct type_mapping<ck::half_t>
constexpr int Rank = 4;
constexpr ReduceTensorOp_t ReduceOpId = ReduceTensorOp_t::AVG;
constexpr NanPropagation_t NanOpt = NanPropagation_t::PROPAGATE_NAN;
constexpr bool PropagateNan = false;
constexpr ReduceTensorIndices_t IndicesOpt = ReduceTensorIndices_t::NO_INDICES;
constexpr bool NeedIndices = false;
constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::AVG;
constexpr NanPropagation NanOpt = NanPropagation::PROPAGATE_NAN;
constexpr bool PropagateNan = false;
constexpr ReduceTensorIndices IndicesOpt = ReduceTensorIndices::NO_INDICES;
constexpr bool NeedIndices = false;
template <typename InDataType,
typename AccDataType,
......@@ -289,13 +290,13 @@ bool test_reduce_no_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(!single_result)
......@@ -376,13 +377,13 @@ bool test_reduce_no_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(!single_result)
......
......@@ -4,7 +4,7 @@
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "test_util.hpp"
#include "check_err.hpp"
#include "reduce_util.hpp"
using namespace ck;
......@@ -51,11 +51,11 @@ struct type_mapping<ck::half_t>
constexpr int Rank = 4;
constexpr ReduceTensorOp_t ReduceOpId = ReduceTensorOp_t::AMAX;
constexpr NanPropagation_t NanOpt = NanPropagation_t::PROPAGATE_NAN;
constexpr bool PropagateNan = false;
constexpr ReduceTensorIndices_t IndicesOpt = ReduceTensorIndices_t::FLATTENED_INDICES;
constexpr bool NeedIndices = true;
constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::AMAX;
constexpr NanPropagation NanOpt = NanPropagation::PROPAGATE_NAN;
constexpr bool PropagateNan = false;
constexpr ReduceTensorIndices IndicesOpt = ReduceTensorIndices::FLATTENED_INDICES;
constexpr bool NeedIndices = true;
template <typename InDataType,
typename AccDataType,
......@@ -273,21 +273,21 @@ bool test_reduce_with_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(NeedIndices)
{
out_indices_dev.FromDevice(out_indices.mData.data());
single_result = single_result && test::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
single_result = single_result && ck::utils::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
};
if(!single_result)
......@@ -370,21 +370,22 @@ bool test_reduce_with_index_impl(int init_method,
{
reduce_util::to_f32_vector(out, out_fp32);
reduce_util::to_f32_vector(out_ref, out_ref_fp32);
single_result = test::check_err(
single_result = ck::utils::check_err(
out_fp32.mData, out_ref_fp32.mData, "Error: incorrect data result!");
}
else
{
single_result =
test::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
ck::utils::check_err(out.mData, out_ref.mData, "Error: incorrect data result!");
};
if(NeedIndices)
{
out_indices_dev.FromDevice(out_indices.mData.data());
single_result = single_result && test::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
single_result =
single_result && ck::utils::check_err(out_indices_ref.mData,
out_indices.mData,
"Error: incorrect index result!");
};
if(!single_result)
......
......@@ -6,13 +6,13 @@
#include <type_traits>
#include <vector>
#include "check_err.hpp"
#include "config.hpp"
#include "conv_utils.hpp"
#include "conv_fwd_util.hpp"
#include "element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "reference_conv_fwd.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace {
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
......@@ -57,9 +57,10 @@ template <ck::index_t NDim,
typename OutLayout = ck::tensor_layout::convolution::NHWK,
typename FillInputOp = FillMonotonicSeq<InDataType>,
typename FillWeightsOp = FillConstant<WeiDataType>>
Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
const FillInputOp& fill_input_op = FillInputOp{},
const FillWeightsOp& fill_weights_op = FillWeightsOp{0.5f})
Tensor<OutDataType>
run_reference_convolution_forward(const ck::utils::conv::ConvParams& params,
const FillInputOp& fill_input_op = FillInputOp{},
const FillWeightsOp& fill_weights_op = FillWeightsOp{0.5f})
{
std::vector<std::size_t> input_dims{static_cast<std::size_t>(params.N),
static_cast<std::size_t>(params.C)};
......@@ -80,18 +81,16 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
std::begin(output_spatial_lengths),
std::end(output_spatial_lengths));
Tensor<InDataType> input(ck::conv_util::GetHostTensorDescriptor(input_dims, InLayout{}));
Tensor<WeiDataType> weights(ck::conv_util::GetHostTensorDescriptor(filter_dims, WeiLayout{}));
Tensor<InDataType> input(ck::utils::conv::get_host_tensor_descriptor(input_dims, InLayout{}));
Tensor<WeiDataType> weights(
ck::utils::conv::get_host_tensor_descriptor(filter_dims, WeiLayout{}));
Tensor<OutDataType> host_output(
ck::conv_util::GetHostTensorDescriptor(output_dims, OutLayout{}));
ck::utils::conv::get_host_tensor_descriptor(output_dims, OutLayout{}));
fill_input_op(input.begin(), input.end());
fill_weights_op(weights.begin(), weights.end());
std::fill(host_output.begin(), host_output.end(), OutDataType(0.f));
// std::cout <<"input: " << input.mDesc << std::endl << input.mData << std::endl;
// std::cout <<"weight: " << weights.mDesc << std::endl << weights.mData << std::endl;
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<InDataType,
WeiDataType,
OutDataType,
......@@ -116,10 +115,10 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
return host_output;
}
bool TestConv2DNHWC()
bool test_conv2d_nhwc()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.N = 1;
params.K = 1;
params.C = 2;
......@@ -130,7 +129,7 @@ bool TestConv2DNHWC()
params.input_left_pads = std::vector<ck::index_t>{0, 0};
params.input_right_pads = std::vector<ck::index_t>{0, 0};
auto out_tensor = RunReferenceConv<2>(params);
auto out_tensor = run_reference_convolution_forward<2>(params);
std::vector<std::size_t> ref_dims{1, 1, 4, 4};
std::vector<float> ref_data{130.5,
148.5,
......@@ -148,10 +147,10 @@ bool TestConv2DNHWC()
472.5,
490.5,
508.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
params.N = 1;
params.K = 2;
......@@ -163,7 +162,7 @@ bool TestConv2DNHWC()
params.input_left_pads = std::vector<ck::index_t>{1, 1};
params.input_right_pads = std::vector<ck::index_t>{1, 1};
out_tensor = RunReferenceConv<2>(params);
out_tensor = run_reference_convolution_forward<2>(params);
ref_dims = std::vector<std::size_t>{1, 2, 5, 5};
ref_data = std::vector<float>{
210., 210., 327., 327., 351., 351., 375., 375., 399., 399.,
......@@ -171,18 +170,18 @@ bool TestConv2DNHWC()
747., 747., 1138.5, 1138.5, 1174.5, 1174.5, 1210.5, 1210.5, 1246.5, 1246.5,
1035., 1035., 1570.5, 1570.5, 1606.5, 1606.5, 1642.5, 1642.5, 1678.5, 1678.5,
1323., 1323., 2002.5, 2002.5, 2038.5, 2038.5, 2074.5, 2074.5, 2110.5, 2110.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
return res;
}
bool TestConv1DNWC()
bool test_conv1d_nwc()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 1;
params.N = 1;
params.K = 1;
......@@ -194,19 +193,20 @@ bool TestConv1DNWC()
params.input_left_pads = std::vector<ck::index_t>{0};
params.input_right_pads = std::vector<ck::index_t>{0};
auto out_tensor = RunReferenceConv<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
auto out_tensor =
run_reference_convolution_forward<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
std::vector<std::size_t> ref_dims{1, 1, 4};
std::vector<float> ref_data{7.5, 13.5, 19.5, 25.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
params.num_dim_spatial = 1;
params.N = 1;
......@@ -219,19 +219,19 @@ bool TestConv1DNWC()
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
out_tensor = RunReferenceConv<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
out_tensor = run_reference_convolution_forward<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(params);
ref_dims = std::vector<std::size_t>{1, 2, 5};
ref_data = std::vector<float>{9., 9., 19.5, 19.5, 31.5, 31.5, 43.5, 43.5, 55.5, 55.5};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mData, ref_data, "Error: incorrect results!");
params.num_dim_spatial = 1;
params.N = 2;
......@@ -244,13 +244,13 @@ bool TestConv1DNWC()
params.input_left_pads = std::vector<ck::index_t>{1};
params.input_right_pads = std::vector<ck::index_t>{1};
auto out_tensor2 = RunReferenceConv<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(
auto out_tensor2 = run_reference_convolution_forward<1,
float,
float,
float,
ck::tensor_layout::convolution::NWC,
ck::tensor_layout::convolution::KXC,
ck::tensor_layout::convolution::NWK>(
params, FillMonotonicSeq<float>{0.f, 0.1f});
ref_dims = std::vector<std::size_t>{2, 16, 16};
......@@ -319,18 +319,18 @@ bool TestConv1DNWC()
72.9, 72.9, 72.9, 72.9, 72.9, 72.9, 72.9, 72.9,
49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4,
49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4, 49.4};
res = res && test::check_err(out_tensor2.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor2.mData, ref_data, "Error: incorrect results!");
res = res && ck::utils::check_err(out_tensor2.mDesc.GetLengths(),
ref_dims,
"Error: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor2.mData, ref_data, "Error: incorrect results!");
return res;
}
bool TestConv3DNCDHW()
bool test_conv3d_ncdhw()
{
bool res{true};
ck::conv_util::ConvParams params;
ck::utils::conv::ConvParams params;
params.num_dim_spatial = 3;
params.N = 1;
params.K = 1;
......@@ -342,13 +342,13 @@ bool TestConv3DNCDHW()
params.input_left_pads = std::vector<ck::index_t>{0, 0, 0};
params.input_right_pads = std::vector<ck::index_t>{0, 0, 0};
auto out_tensor = RunReferenceConv<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
auto out_tensor = run_reference_convolution_forward<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
params, FillMonotonicSeq<float>{0.f, 0.1f});
std::vector<std::size_t> ref_dims{1, 1, 4, 4, 4};
std::vector<float> ref_data{
......@@ -360,10 +360,11 @@ bool TestConv3DNCDHW()
634.5, 637.2, 639.9, 642.60004, 650.7, 653.4, 656.10004, 658.8,
699.3, 702., 704.7, 707.4, 715.5, 718.2, 720.9, 723.60004,
731.7, 734.4001, 737.10004, 739.8, 747.9001, 750.60004, 753.3, 756.};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 1]: wrong output tensor dimensions!");
res = res && test::check_err(out_tensor.mData, ref_data, "Error [case 1]: incorrect results!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 1]: wrong output tensor dimensions!");
res = res &&
ck::utils::check_err(out_tensor.mData, ref_data, "Error [case 1]: incorrect results!");
params.N = 1;
params.K = 2;
......@@ -375,13 +376,13 @@ bool TestConv3DNCDHW()
params.input_left_pads = std::vector<ck::index_t>{0, 0, 0};
params.input_right_pads = std::vector<ck::index_t>{0, 0, 0};
out_tensor = RunReferenceConv<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
out_tensor = run_reference_convolution_forward<3,
float,
float,
float,
ck::tensor_layout::convolution::NCDHW,
ck::tensor_layout::convolution::KCZYX,
ck::tensor_layout::convolution::NKDHW>(
params, FillMonotonicSeq<float>{0.f, 0.1f});
ref_dims = std::vector<std::size_t>{1, 2, 4, 4, 4};
ref_data = std::vector<float>{
......@@ -401,11 +402,11 @@ bool TestConv3DNCDHW()
5283.9004, 5292., 5300.0996, 5308.2, 5381.0996, 5389.2, 5397.3, 5405.4004,
6255.9004, 6264.0005, 6272.1, 6280.2, 6353.1, 6361.2, 6369.301, 6377.4,
6450.301, 6458.4, 6466.5, 6474.6, 6547.5, 6555.6, 6563.699, 6571.801};
res = res && test::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 2]: wrong output tensor dimensions!");
res = res && ck::utils::check_err(out_tensor.mDesc.GetLengths(),
ref_dims,
"Error [case 2]: wrong output tensor dimensions!");
res =
res && test::check_err(
res && ck::utils::check_err(
out_tensor.mData, ref_data, "Error [case 2]: incorrect results!", 1e-4f, 1e-6f);
return res;
......@@ -416,11 +417,11 @@ bool TestConv3DNCDHW()
int main(void)
{
bool res{true};
res = TestConv2DNHWC();
std::cout << "TestConv2DNHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv1DNWC();
res = test_conv2d_nhwc();
std::cout << "test_conv2d_nhwc ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv1d_nwc();
std::cout << "TestConv1DNHWC ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = TestConv3DNCDHW();
std::cout << "TestConv3DNCDHW ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
res = test_conv3d_ncdhw();
std::cout << "test_conv3d_ncdhw ..... " << (res ? "SUCCESS" : "FAILURE") << std::endl;
return res ? 0 : 1;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment