Commit e4e99a49 authored by Po-Yen, Chen's avatar Po-Yen, Chen
Browse files

Use new utilities to shorten codes

parent 7acbf104
...@@ -7,16 +7,17 @@ ...@@ -7,16 +7,17 @@
#include <cstdlib> #include <cstdlib>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" #include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp" #include "ck/library/utility/literals.hpp"
#include "ck/library/utility/check_err.hpp"
template <ck::index_t... Is> template <ck::index_t... Is>
using S = ck::Sequence<Is...>; using S = ck::Sequence<Is...>;
...@@ -108,22 +109,21 @@ void DumpPerf(float ave_time, int M, int N, int K) ...@@ -108,22 +109,21 @@ void DumpPerf(float ave_time, int M, int N, int K)
<< " GB/s, " << std::endl; << " GB/s, " << std::endl;
} }
using namespace ck::literals;
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) { auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({len}), return HostTensorDescriptor({len}, {stride});
std::vector<std::size_t>({stride}));
}; };
auto f_host_tensor_descriptor2d = auto f_host_tensor_descriptor2d =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) { [](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value) if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {stride, 1_uz});
std::vector<std::size_t>({stride, 1}));
} }
else else
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {1_uz, stride});
std::vector<std::size_t>({1, stride}));
} }
}; };
...@@ -152,18 +152,18 @@ int main() ...@@ -152,18 +152,18 @@ int main()
d0_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{-1, 1}); d0_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{-1, 1});
d1_m_n.GenerateTensorValue(GeneratorTensor_3<D1DataType>{-1, 1}); d1_m_n.GenerateTensorValue(GeneratorTensor_3<D1DataType>{-1, 1});
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_k_n.GetMemorySize());
DeviceMem d0_device_buf(sizeof(D0DataType) * d0_n.mDesc.GetElementSpaceSize()); DeviceMem d0_device_buf(d0_n.GetMemorySize());
DeviceMem d1_device_buf(sizeof(D1DataType) * d1_m_n.mDesc.GetElementSpaceSize()); DeviceMem d1_device_buf(d1_m_n.GetMemorySize());
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n.mDesc.GetElementSpaceSize()); DeviceMem e_device_buf(e_m_n.GetMemorySize());
DeviceMem r0_device_buf(sizeof(R0DataType) * r0_m.mDesc.GetElementSpaceSize()); DeviceMem r0_device_buf(r0_m.GetMemorySize());
DeviceMem r1_device_buf(sizeof(R1DataType) * r1_m.mDesc.GetElementSpaceSize()); DeviceMem r1_device_buf(r1_m.GetMemorySize());
a_device_buf.ToDevice(a_m_k.mData.data()); a_device_buf.ToDevice(a_m_k.data());
b_device_buf.ToDevice(b_k_n.mData.data()); b_device_buf.ToDevice(b_k_n.data());
d0_device_buf.ToDevice(d0_n.mData.data()); d0_device_buf.ToDevice(d0_n.data());
d1_device_buf.ToDevice(d1_m_n.mData.data()); d1_device_buf.ToDevice(d1_m_n.data());
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
auto b_element_op = BElementOp{}; auto b_element_op = BElementOp{};
...@@ -212,9 +212,9 @@ int main() ...@@ -212,9 +212,9 @@ int main()
auto I0 = ck::Number<0>{}; auto I0 = ck::Number<0>{};
auto I1 = ck::Number<1>{}; auto I1 = ck::Number<1>{};
Tensor<EDataType> e_m_n_host(e_m_n.mDesc); Tensor<EDataType> e_m_n_host(e_m_n.GetDesc());
Tensor<R0DataType> r0_m_host(r0_m.mDesc); Tensor<R0DataType> r0_m_host(r0_m.GetDesc());
Tensor<R1DataType> r1_m_host(r1_m.mDesc); Tensor<R1DataType> r1_m_host(r1_m.GetDesc());
auto ref_gemm = ReferenceGemmInstance{}; auto ref_gemm = ReferenceGemmInstance{};
auto ref_invoker = ref_gemm.MakeInvoker(); auto ref_invoker = ref_gemm.MakeInvoker();
...@@ -255,16 +255,13 @@ int main() ...@@ -255,16 +255,13 @@ int main()
r1_m_host(m) = ck::type_convert<R1DataType>(reduce1_acc); r1_m_host(m) = ck::type_convert<R1DataType>(reduce1_acc);
} }
e_device_buf.FromDevice(e_m_n.mData.data()); e_device_buf.FromDevice(e_m_n.data());
r0_device_buf.FromDevice(r0_m.mData.data()); r0_device_buf.FromDevice(r0_m.data());
r1_device_buf.FromDevice(r1_m.mData.data()); r1_device_buf.FromDevice(r1_m.data());
pass = ck::utils::check_err( pass = ck::utils::check_err(e_m_n, e_m_n_host, "Error: Incorrect results c", 1e-2, 1e-2);
e_m_n.mData, e_m_n_host.mData, "Error: Incorrect results c", 1e-2, 1e-2); pass &= ck::utils::check_err(r0_m, r0_m_host, "Error: Incorrect results d0", 1e-2, 1e-2);
pass &= ck::utils::check_err( pass &= ck::utils::check_err(r1_m, r1_m_host, "Error: Incorrect results d1", 1e-2, 1e-2);
r0_m.mData, r0_m_host.mData, "Error: Incorrect results d0", 1e-2, 1e-2);
pass &= ck::utils::check_err(
r1_m.mData, r1_m_host.mData, "Error: Incorrect results d1", 1e-2, 1e-2);
} }
bool time_kernel = true; bool time_kernel = true;
......
...@@ -160,25 +160,23 @@ bool run_gemm_reduce_add_addsquare_xdl(ck::index_t M, ...@@ -160,25 +160,23 @@ bool run_gemm_reduce_add_addsquare_xdl(ck::index_t M,
{ {
case 0: break; case 0: break;
case 1: case 1:
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k.begin(), ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k);
a_m_k.end()); ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n);
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n.begin(),
b_k_n.end());
break; break;
default: default:
ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k.begin(), a_m_k.end()); ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k);
ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n.begin(), b_k_n.end()); ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n);
break; break;
} }
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_k_n.GetMemorySize());
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n.mDesc.GetElementSpaceSize()); DeviceMem e_device_buf(e_m_n.GetMemorySize());
DeviceMem r0_device_buf(sizeof(R0DataType) * r0_m.mDesc.GetElementSpaceSize()); DeviceMem r0_device_buf(r0_m.GetMemorySize());
DeviceMem r1_device_buf(sizeof(R1DataType) * r1_m.mDesc.GetElementSpaceSize()); DeviceMem r1_device_buf(r1_m.GetMemorySize());
a_device_buf.ToDevice(a_m_k.mData.data()); a_device_buf.ToDevice(a_m_k.data());
b_device_buf.ToDevice(b_k_n.mData.data()); b_device_buf.ToDevice(b_k_n.data());
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
auto b_element_op = BElementOp{}; auto b_element_op = BElementOp{};
...@@ -226,9 +224,9 @@ bool run_gemm_reduce_add_addsquare_xdl(ck::index_t M, ...@@ -226,9 +224,9 @@ bool run_gemm_reduce_add_addsquare_xdl(ck::index_t M,
auto I0 = ck::Number<0>{}; auto I0 = ck::Number<0>{};
auto I1 = ck::Number<1>{}; auto I1 = ck::Number<1>{};
Tensor<ReduceAccDataType> e_m_n_host(e_m_n.mDesc); Tensor<ReduceAccDataType> e_m_n_host(e_m_n.GetDesc());
Tensor<R0DataType> r0_m_host(r0_m.mDesc); Tensor<R0DataType> r0_m_host(r0_m.GetDesc());
Tensor<R1DataType> r1_m_host(r1_m.mDesc); Tensor<R1DataType> r1_m_host(r1_m.GetDesc());
auto ref_gemm = ReferenceGemmInstance{}; auto ref_gemm = ReferenceGemmInstance{};
auto ref_invoker = ref_gemm.MakeInvoker(); auto ref_invoker = ref_gemm.MakeInvoker();
...@@ -259,20 +257,18 @@ bool run_gemm_reduce_add_addsquare_xdl(ck::index_t M, ...@@ -259,20 +257,18 @@ bool run_gemm_reduce_add_addsquare_xdl(ck::index_t M,
r0_m_host(m) = ck::type_convert<R0DataType>(reduce0_acc); r0_m_host(m) = ck::type_convert<R0DataType>(reduce0_acc);
r1_m_host(m) = ck::type_convert<R1DataType>(reduce1_acc); r1_m_host(m) = ck::type_convert<R1DataType>(reduce1_acc);
} }
e_device_buf.FromDevice(e_m_n.mData.data()); e_device_buf.FromDevice(e_m_n.data());
Tensor<EDataType> e_m_n_host_converted(e_m_n_host); Tensor<EDataType> e_m_n_host_converted(e_m_n_host);
pass = ck::utils::check_err( pass = ck::utils::check_err(
e_m_n.mData, e_m_n_host_converted.mData, "Error: Incorrect results c", 1e-2, 1e-2); e_m_n, e_m_n_host_converted, "Error: Incorrect results c", 1e-2, 1e-2);
r0_device_buf.FromDevice(r0_m.mData.data()); r0_device_buf.FromDevice(r0_m.data());
r1_device_buf.FromDevice(r1_m.mData.data()); r1_device_buf.FromDevice(r1_m.data());
pass &= ck::utils::check_err( pass &= ck::utils::check_err(r0_m, r0_m_host, "Error: Incorrect results d0", 1e-2, 1e-2);
r0_m.mData, r0_m_host.mData, "Error: Incorrect results d0", 1e-2, 1e-2); pass &= ck::utils::check_err(r1_m, r1_m_host, "Error: Incorrect results d1", 1e-2, 1e-2);
pass &= ck::utils::check_err(
r1_m.mData, r1_m_host.mData, "Error: Incorrect results d1", 1e-2, 1e-2);
if(pass) if(pass)
{ {
......
...@@ -134,21 +134,19 @@ auto run_gemm_reduce_max_xdl(ck::index_t M, ...@@ -134,21 +134,19 @@ auto run_gemm_reduce_max_xdl(ck::index_t M,
{ {
case 0: break; case 0: break;
case 1: case 1:
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k.begin(), ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k);
a_m_k.end()); ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n);
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n.begin(),
b_k_n.end());
break; break;
default: default:
ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k.begin(), a_m_k.end()); ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k);
ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n.begin(), b_k_n.end()); ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n);
break; break;
} }
DeviceMem a_device_buf(sizeof(ADataKernelType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataKernelType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_k_n.GetMemorySize());
DeviceMem e_device_buf(sizeof(EDataKernelType) * e_m_n.mDesc.GetElementSpaceSize()); DeviceMem e_device_buf(e_m_n.GetMemorySize());
DeviceMem r0_device_buf(sizeof(R0DataType) * r0_m.mDesc.GetElementSpaceSize()); DeviceMem r0_device_buf(r0_m.GetMemorySize());
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
if constexpr(std::is_same_v<ADataType, ck::int4_t>) if constexpr(std::is_same_v<ADataType, ck::int4_t>)
...@@ -156,14 +154,14 @@ auto run_gemm_reduce_max_xdl(ck::index_t M, ...@@ -156,14 +154,14 @@ auto run_gemm_reduce_max_xdl(ck::index_t M,
Tensor<ADataKernelType> a_m_k_converted = a_m_k.template CopyAsType<ADataKernelType>(); Tensor<ADataKernelType> a_m_k_converted = a_m_k.template CopyAsType<ADataKernelType>();
Tensor<BDataKernelType> b_k_n_converted = b_k_n.template CopyAsType<BDataKernelType>(); Tensor<BDataKernelType> b_k_n_converted = b_k_n.template CopyAsType<BDataKernelType>();
a_device_buf.ToDevice(a_m_k_converted.mData.data()); a_device_buf.ToDevice(a_m_k_converted.data());
b_device_buf.ToDevice(b_k_n_converted.mData.data()); b_device_buf.ToDevice(b_k_n_converted.data());
} }
else else
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
{ {
a_device_buf.ToDevice(a_m_k.mData.data()); a_device_buf.ToDevice(a_m_k.data());
b_device_buf.ToDevice(b_k_n.mData.data()); b_device_buf.ToDevice(b_k_n.data());
} }
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
...@@ -210,8 +208,8 @@ auto run_gemm_reduce_max_xdl(ck::index_t M, ...@@ -210,8 +208,8 @@ auto run_gemm_reduce_max_xdl(ck::index_t M,
{ {
auto I0 = ck::Number<0>{}; auto I0 = ck::Number<0>{};
Tensor<ReduceAccDataType> e_m_n_host(e_m_n.mDesc); Tensor<ReduceAccDataType> e_m_n_host(e_m_n.GetDesc());
Tensor<R0DataType> r0_m_host(r0_m.mDesc); Tensor<R0DataType> r0_m_host(r0_m.GetDesc());
auto ref_gemm = ReferenceGemmInstance{}; auto ref_gemm = ReferenceGemmInstance{};
auto ref_invoker = ref_gemm.MakeInvoker(); auto ref_invoker = ref_gemm.MakeInvoker();
...@@ -236,15 +234,15 @@ auto run_gemm_reduce_max_xdl(ck::index_t M, ...@@ -236,15 +234,15 @@ auto run_gemm_reduce_max_xdl(ck::index_t M,
r0_m_host(m) = ck::type_convert<R0DataType>(reduce0_acc); r0_m_host(m) = ck::type_convert<R0DataType>(reduce0_acc);
} }
e_device_buf.FromDevice(e_m_n.mData.data()); e_device_buf.FromDevice(e_m_n.data());
Tensor<EDataType> e_m_n_host_converted(e_m_n_host); Tensor<EDataType> e_m_n_host_converted(e_m_n_host);
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
if constexpr(std::is_same_v<ADataType, ck::int4_t>) if constexpr(std::is_same_v<ADataType, ck::int4_t>)
{ {
Tensor<EDataType> e_m_n_device_converted(e_m_n); Tensor<EDataType> e_m_n_device_converted(e_m_n);
pass = ck::utils::check_err(e_m_n_device_converted.mData, pass = ck::utils::check_err(e_m_n_device_converted,
e_m_n_host_converted.mData, e_m_n_host_converted,
"Error: Incorrect results c", "Error: Incorrect results c",
1e-2, 1e-2,
1e-2); 1e-2);
...@@ -253,12 +251,11 @@ auto run_gemm_reduce_max_xdl(ck::index_t M, ...@@ -253,12 +251,11 @@ auto run_gemm_reduce_max_xdl(ck::index_t M,
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
{ {
pass = ck::utils::check_err( pass = ck::utils::check_err(
e_m_n.mData, e_m_n_host_converted.mData, "Error: Incorrect results c", 1e-2, 1e-2); e_m_n, e_m_n_host_converted, "Error: Incorrect results c", 1e-2, 1e-2);
} }
r0_device_buf.FromDevice(r0_m.mData.data()); r0_device_buf.FromDevice(r0_m.data());
pass &= ck::utils::check_err( pass &= ck::utils::check_err(r0_m, r0_m_host, "Error: Incorrect results d0", 1e-2, 1e-2);
r0_m.mData, r0_m_host.mData, "Error: Incorrect results d0", 1e-2, 1e-2);
if(pass) if(pass)
{ {
...@@ -339,22 +336,20 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M, ...@@ -339,22 +336,20 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M,
{ {
case 0: break; case 0: break;
case 1: case 1:
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k.begin(), ck::utils::FillUniformDistributionIntegerValue<ADataType>{-5.f, 5.f}(a_m_k);
a_m_k.end()); ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n);
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-5.f, 5.f}(b_k_n.begin(),
b_k_n.end());
break; break;
default: default:
ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k.begin(), a_m_k.end()); ck::utils::FillUniformDistribution<ADataType>{-1.f, 1.f}(a_m_k);
ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n.begin(), b_k_n.end()); ck::utils::FillUniformDistribution<BDataType>{-1.f, 1.f}(b_k_n);
break; break;
} }
DeviceMem a_device_buf(sizeof(ADataKernelType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataKernelType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_k_n.GetMemorySize());
DeviceMem e_device_buf(sizeof(EDataKernelType) * e_m_n.mDesc.GetElementSpaceSize()); DeviceMem e_device_buf(e_m_n.GetMemorySize());
DeviceMem r0_device_buf(sizeof(R0DataType) * r0_m.mDesc.GetElementSpaceSize()); DeviceMem r0_device_buf(r0_m.GetMemorySize());
DeviceMem r1_device_buf(sizeof(R1DataType) * r1_m.mDesc.GetElementSpaceSize()); DeviceMem r1_device_buf(r1_m.GetMemorySize());
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
if constexpr(std::is_same_v<ADataType, ck::int4_t>) if constexpr(std::is_same_v<ADataType, ck::int4_t>)
...@@ -362,14 +357,14 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M, ...@@ -362,14 +357,14 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M,
Tensor<ADataKernelType> a_m_k_converted = a_m_k.template CopyAsType<ADataKernelType>(); Tensor<ADataKernelType> a_m_k_converted = a_m_k.template CopyAsType<ADataKernelType>();
Tensor<BDataKernelType> b_k_n_converted = b_k_n.template CopyAsType<BDataKernelType>(); Tensor<BDataKernelType> b_k_n_converted = b_k_n.template CopyAsType<BDataKernelType>();
a_device_buf.ToDevice(a_m_k_converted.mData.data()); a_device_buf.ToDevice(a_m_k_converted.data());
b_device_buf.ToDevice(b_k_n_converted.mData.data()); b_device_buf.ToDevice(b_k_n_converted.data());
} }
else else
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
{ {
a_device_buf.ToDevice(a_m_k.mData.data()); a_device_buf.ToDevice(a_m_k.data());
b_device_buf.ToDevice(b_k_n.mData.data()); b_device_buf.ToDevice(b_k_n.data());
} }
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
...@@ -418,9 +413,9 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M, ...@@ -418,9 +413,9 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M,
auto I0 = ck::Number<0>{}; auto I0 = ck::Number<0>{};
auto I1 = ck::Number<1>{}; auto I1 = ck::Number<1>{};
Tensor<ReduceAccDataType> e_m_n_host(e_m_n.mDesc); Tensor<ReduceAccDataType> e_m_n_host(e_m_n.GetDesc());
Tensor<R0DataType> r0_m_host(r0_m.mDesc); Tensor<R0DataType> r0_m_host(r0_m.GetDesc());
Tensor<R1DataType> r1_m_host(r1_m.mDesc); Tensor<R1DataType> r1_m_host(r1_m.GetDesc());
auto ref_gemm = ReferenceGemmInstance{}; auto ref_gemm = ReferenceGemmInstance{};
auto ref_invoker = ref_gemm.MakeInvoker(); auto ref_invoker = ref_gemm.MakeInvoker();
...@@ -453,15 +448,15 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M, ...@@ -453,15 +448,15 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M,
r0_m_host(m) = ck::type_convert<R0DataType>(reduce0_acc); r0_m_host(m) = ck::type_convert<R0DataType>(reduce0_acc);
r1_m_host(m) = ck::type_convert<R1DataType>(reduce1_acc); r1_m_host(m) = ck::type_convert<R1DataType>(reduce1_acc);
} }
e_device_buf.FromDevice(e_m_n.mData.data()); e_device_buf.FromDevice(e_m_n.data());
Tensor<EDataType> e_m_n_host_converted(e_m_n_host); Tensor<EDataType> e_m_n_host_converted(e_m_n_host);
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
if constexpr(std::is_same_v<ADataType, ck::int4_t>) if constexpr(std::is_same_v<ADataType, ck::int4_t>)
{ {
Tensor<EDataType> e_m_n_device_converted(e_m_n); Tensor<EDataType> e_m_n_device_converted(e_m_n);
pass = ck::utils::check_err(e_m_n_device_converted.mData, pass = ck::utils::check_err(e_m_n_device_converted,
e_m_n_host_converted.mData, e_m_n_host_converted,
"Error: Incorrect results c", "Error: Incorrect results c",
1e-2, 1e-2,
1e-2); 1e-2);
...@@ -470,16 +465,14 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M, ...@@ -470,16 +465,14 @@ bool run_gemm_reduce_mean_meansquare_xdl(ck::index_t M,
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
{ {
pass = ck::utils::check_err( pass = ck::utils::check_err(
e_m_n.mData, e_m_n_host_converted.mData, "Error: Incorrect results c", 1e-2, 1e-2); e_m_n, e_m_n_host_converted, "Error: Incorrect results c", 1e-2, 1e-2);
} }
r0_device_buf.FromDevice(r0_m.mData.data()); r0_device_buf.FromDevice(r0_m.data());
r1_device_buf.FromDevice(r1_m.mData.data()); r1_device_buf.FromDevice(r1_m.data());
pass &= ck::utils::check_err( pass &= ck::utils::check_err(r0_m, r0_m_host, "Error: Incorrect results d0", 1e-2, 1e-2);
r0_m.mData, r0_m_host.mData, "Error: Incorrect results d0", 1e-2, 1e-2); pass &= ck::utils::check_err(r1_m, r1_m_host, "Error: Incorrect results d1", 1e-2, 1e-2);
pass &= ck::utils::check_err(
r1_m.mData, r1_m_host.mData, "Error: Incorrect results d1", 1e-2, 1e-2);
if(pass) if(pass)
{ {
......
...@@ -50,9 +50,9 @@ int run_conv_bwd_data(bool do_verification, ...@@ -50,9 +50,9 @@ int run_conv_bwd_data(bool do_verification,
Tensor<WeiDataType> wei(wei_g_k_c_xs_desc); Tensor<WeiDataType> wei(wei_g_k_c_xs_desc);
Tensor<OutDataType> out(out_g_n_k_wos_desc); Tensor<OutDataType> out(out_g_n_k_wos_desc);
std::cout << "in: " << in_host.mDesc << std::endl; std::cout << "in: " << in_host.GetDesc() << std::endl;
std::cout << "wei: " << wei.mDesc << std::endl; std::cout << "wei: " << wei.GetDesc() << std::endl;
std::cout << "out: " << out.mDesc << std::endl; std::cout << "out: " << out.GetDesc() << std::endl;
switch(init_method) switch(init_method)
{ {
...@@ -66,12 +66,12 @@ int run_conv_bwd_data(bool do_verification, ...@@ -66,12 +66,12 @@ int run_conv_bwd_data(bool do_verification,
wei.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5}); wei.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
} }
DeviceMem in_device_buf(sizeof(InDataType) * in_device.mDesc.GetElementSpaceSize()); DeviceMem in_device_buf(in_device.GetMemorySize());
DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpaceSize()); DeviceMem wei_device_buf(wei.GetMemorySize());
DeviceMem out_device_buf(sizeof(OutDataType) * out.mDesc.GetElementSpaceSize()); DeviceMem out_device_buf(out.GetMemorySize());
out_device_buf.ToDevice(out.mData.data()); out_device_buf.ToDevice(out.data());
wei_device_buf.ToDevice(wei.mData.data()); wei_device_buf.ToDevice(wei.data());
// reset input to zero // reset input to zero
in_device_buf.SetZero(); in_device_buf.SetZero();
...@@ -79,9 +79,9 @@ int run_conv_bwd_data(bool do_verification, ...@@ -79,9 +79,9 @@ int run_conv_bwd_data(bool do_verification,
// do GEMM // do GEMM
auto conv = DeviceConvNdBwdDataInstance{}; auto conv = DeviceConvNdBwdDataInstance{};
auto invoker = conv.MakeInvoker(); auto invoker = conv.MakeInvoker();
auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()), auto argument = conv.MakeArgument(in_device_buf.GetDeviceBuffer(),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()), wei_device_buf.GetDeviceBuffer(),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()), out_device_buf.GetDeviceBuffer(),
conv_param.N_, conv_param.N_,
conv_param.K_, conv_param.K_,
conv_param.C_, conv_param.C_,
...@@ -140,9 +140,9 @@ int run_conv_bwd_data(bool do_verification, ...@@ -140,9 +140,9 @@ int run_conv_bwd_data(bool do_verification,
ref_invoker.Run(ref_argument); ref_invoker.Run(ref_argument);
in_device_buf.FromDevice(in_device.mData.data()); in_device_buf.FromDevice(in_device.data());
return ck::utils::check_err(in_device.mData, in_host.mData) ? 0 : 1; return ck::utils::check_err(in_device, in_host) ? 0 : 1;
} }
return 0; return 0;
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <initializer_list>
#include <iostream> #include <iostream>
#include <numeric> #include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/utility/reduction_operator.hpp" #include "ck/utility/reduction_operator.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_reduce_xdl_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/device_batched_gemm_reduce_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" #include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp" #include "ck/library/utility/literals.hpp"
template <ck::index_t... Is> template <ck::index_t... Is>
using S = ck::Sequence<Is...>; using S = ck::Sequence<Is...>;
...@@ -127,20 +128,20 @@ int main(int argc, char* argv[]) ...@@ -127,20 +128,20 @@ int main(int argc, char* argv[])
exit(0); exit(0);
} }
using namespace ck::literals;
auto f_host_tensor_descriptor = [](std::size_t batch_count, auto f_host_tensor_descriptor = [](std::size_t batch_count,
std::size_t row, std::size_t row,
std::size_t col, std::size_t col,
std::size_t stride, std::size_t stride,
auto layout) { auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value) if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
{ {
return HostTensorDescriptor(std::vector<std::size_t>({batch_count, row, col}), return HostTensorDescriptor({batch_count, row, col}, {row * stride, stride, 1_uz});
std::vector<std::size_t>({row * stride, stride, 1}));
} }
else else
{ {
return HostTensorDescriptor(std::vector<std::size_t>({batch_count, row, col}), return HostTensorDescriptor({batch_count, row, col}, {col * stride, 1_uz, stride});
std::vector<std::size_t>({col * stride, 1, stride}));
} }
}; };
...@@ -149,23 +150,19 @@ int main(int argc, char* argv[]) ...@@ -149,23 +150,19 @@ int main(int argc, char* argv[])
Tensor<CDataType> c_g_m_n_host_result( Tensor<CDataType> c_g_m_n_host_result(
f_host_tensor_descriptor(BatchCount, M, N, StrideC, CLayout{})); f_host_tensor_descriptor(BatchCount, M, N, StrideC, CLayout{}));
Tensor<ReduceDataType> d0_g_m_host_result(HostTensorDescriptor(std::vector<std::size_t>( Tensor<ReduceDataType> d0_g_m_host_result(HostTensorDescriptor({BatchCount, M}));
{static_cast<std::size_t>(BatchCount), static_cast<std::size_t>(M)}))); Tensor<ReduceDataType> d1_g_m_host_result(HostTensorDescriptor({BatchCount, M}));
Tensor<ReduceDataType> d1_g_m_host_result(HostTensorDescriptor(std::vector<std::size_t>(
{static_cast<std::size_t>(BatchCount), static_cast<std::size_t>(M)})));
Tensor<CDataType> c_g_m_n_device_result( Tensor<CDataType> c_g_m_n_device_result(
f_host_tensor_descriptor(BatchCount, M, N, StrideC, CLayout{})); f_host_tensor_descriptor(BatchCount, M, N, StrideC, CLayout{}));
Tensor<ReduceDataType> d0_g_m_device_result(HostTensorDescriptor(std::vector<std::size_t>( Tensor<ReduceDataType> d0_g_m_device_result(HostTensorDescriptor({BatchCount, M}));
{static_cast<std::size_t>(BatchCount), static_cast<std::size_t>(M)}))); Tensor<ReduceDataType> d1_g_m_device_result(HostTensorDescriptor({BatchCount, M}));
Tensor<ReduceDataType> d1_g_m_device_result(HostTensorDescriptor(std::vector<std::size_t>(
{static_cast<std::size_t>(BatchCount), static_cast<std::size_t>(M)})));
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl; std::cout << "a_g_m_k: " << a_g_m_k.GetDesc() << std::endl;
std::cout << "b_g_k_n: " << b_g_k_n.mDesc << std::endl; std::cout << "b_g_k_n: " << b_g_k_n.GetDesc() << std::endl;
std::cout << "c_g_m_n: " << c_g_m_n_host_result.mDesc << std::endl; std::cout << "c_g_m_n: " << c_g_m_n_host_result.GetDesc() << std::endl;
std::cout << "d0_g_m: " << d0_g_m_host_result.mDesc << std::endl; std::cout << "d0_g_m: " << d0_g_m_host_result.GetDesc() << std::endl;
std::cout << "d1_g_m: " << d1_g_m_host_result.mDesc << std::endl; std::cout << "d1_g_m: " << d1_g_m_host_result.GetDesc() << std::endl;
switch(init_method) switch(init_method)
{ {
...@@ -180,16 +177,14 @@ int main(int argc, char* argv[]) ...@@ -180,16 +177,14 @@ int main(int argc, char* argv[])
break; break;
} }
DeviceMem a_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_g_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataType) * b_g_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_g_k_n.GetMemorySize());
DeviceMem c_device_buf(sizeof(CDataType) * c_g_m_n_device_result.mDesc.GetElementSpaceSize()); DeviceMem c_device_buf(c_g_m_n_device_result.GetMemorySize());
DeviceMem reduce0_device_buf(sizeof(ReduceDataType) * DeviceMem reduce0_device_buf(d0_g_m_device_result.GetMemorySize());
d0_g_m_device_result.mDesc.GetElementSpaceSize()); DeviceMem reduce1_device_buf(d1_g_m_device_result.GetMemorySize());
DeviceMem reduce1_device_buf(sizeof(ReduceDataType) *
d1_g_m_device_result.mDesc.GetElementSpaceSize());
a_device_buf.ToDevice(a_g_m_k.mData.data()); a_device_buf.ToDevice(a_g_m_k.data());
b_device_buf.ToDevice(b_g_k_n.mData.data()); b_device_buf.ToDevice(b_g_k_n.data());
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
auto b_element_op = BElementOp{}; auto b_element_op = BElementOp{};
...@@ -256,9 +251,9 @@ int main(int argc, char* argv[]) ...@@ -256,9 +251,9 @@ int main(int argc, char* argv[])
bool pass = true; bool pass = true;
if(do_verification) if(do_verification)
{ {
c_device_buf.FromDevice(c_g_m_n_device_result.mData.data()); c_device_buf.FromDevice(c_g_m_n_device_result.data());
reduce0_device_buf.FromDevice(d0_g_m_device_result.mData.data()); reduce0_device_buf.FromDevice(d0_g_m_device_result.data());
reduce1_device_buf.FromDevice(d1_g_m_device_result.mData.data()); reduce1_device_buf.FromDevice(d1_g_m_device_result.data());
auto ref_batched_gemm = ReferenceBatchedGemmInstance{}; auto ref_batched_gemm = ReferenceBatchedGemmInstance{};
auto ref_invoker = ref_batched_gemm.MakeInvoker(); auto ref_invoker = ref_batched_gemm.MakeInvoker();
...@@ -296,16 +291,15 @@ int main(int argc, char* argv[]) ...@@ -296,16 +291,15 @@ int main(int argc, char* argv[])
} }
} }
pass = ck::utils::check_err(c_g_m_n_host_result.mData, pass = ck::utils::check_err(
c_g_m_n_device_result.mData, c_g_m_n_host_result, c_g_m_n_device_result, "Error: Incorrect results c") &&
"Error: Incorrect results c") && ck::utils::check_err(d0_g_m_device_result,
ck::utils::check_err(d0_g_m_device_result.mData, d0_g_m_host_result,
d0_g_m_host_result.mData,
"Error: Incorrect results! D0", "Error: Incorrect results! D0",
1e-4, 1e-4,
1e-5) && 1e-5) &&
ck::utils::check_err(d1_g_m_device_result.mData, ck::utils::check_err(d1_g_m_device_result,
d1_g_m_host_result.mData, d1_g_m_host_result,
"Error: Incorrect results! D1", "Error: Incorrect results! D1",
1e-3, 1e-3,
1e-5); 1e-5);
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <cstdlib> #include <cstdlib>
#include <iostream>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp" #include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
using F16 = ck::half_t; using F16 = ck::half_t;
using F32 = float; using F32 = float;
...@@ -71,13 +72,13 @@ int main() ...@@ -71,13 +72,13 @@ int main()
ck::index_t Stride = 1024; ck::index_t Stride = 1024;
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) { auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({len}), return HostTensorDescriptor({len}, {stride});
std::vector<std::size_t>({stride}));
}; };
using namespace ck::literals;
auto f_host_tensor_descriptor2d = [](std::size_t row, std::size_t col, std::size_t stride) { auto f_host_tensor_descriptor2d = [](std::size_t row, std::size_t col, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {stride, 1_uz});
std::vector<std::size_t>({stride, 1}));
}; };
Tensor<ABDataType> a_m_n(f_host_tensor_descriptor2d(M, N, Stride)); Tensor<ABDataType> a_m_n(f_host_tensor_descriptor2d(M, N, Stride));
...@@ -87,12 +88,12 @@ int main() ...@@ -87,12 +88,12 @@ int main()
a_m_n.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); a_m_n.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
b_n.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); b_n.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
DeviceMem a_m_n_device_buf(sizeof(ABDataType) * a_m_n.mDesc.GetElementSpaceSize()); DeviceMem a_m_n_device_buf(a_m_n.GetMemorySize());
DeviceMem b_n_device_buf(sizeof(ABDataType) * b_n.mDesc.GetElementSpaceSize()); DeviceMem b_n_device_buf(b_n.GetMemorySize());
DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n.mDesc.GetElementSpaceSize()); DeviceMem c_m_n_device_buf(c_m_n.GetMemorySize());
a_m_n_device_buf.ToDevice(a_m_n.mData.data()); a_m_n_device_buf.ToDevice(a_m_n.data());
b_n_device_buf.ToDevice(b_n.mData.data()); b_n_device_buf.ToDevice(b_n.data());
std::array<const void*, 2> input = {a_m_n_device_buf.GetDeviceBuffer(), std::array<const void*, 2> input = {a_m_n_device_buf.GetDeviceBuffer(),
b_n_device_buf.GetDeviceBuffer()}; b_n_device_buf.GetDeviceBuffer()};
...@@ -122,14 +123,13 @@ int main() ...@@ -122,14 +123,13 @@ int main()
bool pass = true; bool pass = true;
if(do_verification) if(do_verification)
{ {
c_m_n_device_buf.FromDevice(c_m_n.mData.data()); c_m_n_device_buf.FromDevice(c_m_n.data());
Tensor<CDataType> host_c_m_n(f_host_tensor_descriptor2d(M, N, Stride)); Tensor<CDataType> host_c_m_n(f_host_tensor_descriptor2d(M, N, Stride));
host_broadcast2D<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add, 0>( host_broadcast2D<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add, 0>(
host_c_m_n, a_m_n, b_n, M, N, Add{}); host_c_m_n, a_m_n, b_n, M, N, Add{});
pass &= ck::utils::check_err( pass &= ck::utils::check_err(c_m_n, host_c_m_n, "Error: Incorrect results c", 1e-3, 1e-3);
c_m_n.mData, host_c_m_n.mData, "Error: Incorrect results c", 1e-3, 1e-3);
} }
return pass ? 0 : 1; return pass ? 0 : 1;
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <cstdlib> #include <cstdlib>
#include <iostream>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp" #include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/array.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
...@@ -66,31 +68,27 @@ int main() ...@@ -66,31 +68,27 @@ int main()
a_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); a_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
b_m_n_k.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); b_m_n_k.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
DeviceMem a_m_device_buf(sizeof(ABDataType) * a_m.mDesc.GetElementSpaceSize()); DeviceMem a_m_device_buf(a_m.GetMemorySize());
DeviceMem b_m_n_k_device_buf(sizeof(ABDataType) * b_m_n_k.mDesc.GetElementSpaceSize()); DeviceMem b_m_n_k_device_buf(b_m_n_k.GetMemorySize());
DeviceMem c_m_n_k_device_buf(sizeof(CDataType) * c_m_n_k.mDesc.GetElementSpaceSize()); DeviceMem c_m_n_k_device_buf(c_m_n_k.GetMemorySize());
a_m_device_buf.ToDevice(a_m.mData.data()); a_m_device_buf.ToDevice(a_m.data());
b_m_n_k_device_buf.ToDevice(b_m_n_k.mData.data()); b_m_n_k_device_buf.ToDevice(b_m_n_k.data());
std::array<const void*, 2> input = {a_m_device_buf.GetDeviceBuffer(), std::array<const void*, 2> input = {a_m_device_buf.GetDeviceBuffer(),
b_m_n_k_device_buf.GetDeviceBuffer()}; b_m_n_k_device_buf.GetDeviceBuffer()};
std::array<void*, 1> output = {c_m_n_k_device_buf.GetDeviceBuffer()}; std::array<void*, 1> output = {c_m_n_k_device_buf.GetDeviceBuffer()};
std::array<ck::index_t, 3> abc_lengths;
std::array<ck::index_t, 3> a_strides = {1, 0, 0}; std::array<ck::index_t, 3> a_strides = {1, 0, 0};
std::array<ck::index_t, 3> b_strides; std::array<ck::index_t, 3> b_strides;
std::array<ck::index_t, 3> c_strides; std::array<ck::index_t, 3> c_strides;
std::copy(mnk.begin(), mnk.end(), abc_lengths.begin()); ck::ranges::copy(b_m_n_k.GetStrides(), b_strides.begin());
std::copy( ck::ranges::copy(c_m_n_k.GetStrides(), c_strides.begin());
b_m_n_k.mDesc.GetStrides().begin(), b_m_n_k.mDesc.GetStrides().end(), b_strides.begin());
std::copy(
c_m_n_k.mDesc.GetStrides().begin(), c_m_n_k.mDesc.GetStrides().end(), c_strides.begin());
auto broadcastAdd = DeviceElementwiseAddInstance{}; auto broadcastAdd = DeviceElementwiseAddInstance{};
auto argument = broadcastAdd.MakeArgumentPointer( auto argument = broadcastAdd.MakeArgumentPointer(
abc_lengths, {a_strides, b_strides}, {c_strides}, input, output, Add{}); ck::utils::to_array(mnk), {a_strides, b_strides}, {c_strides}, input, output, Add{});
if(!broadcastAdd.IsSupportedArgument(argument.get())) if(!broadcastAdd.IsSupportedArgument(argument.get()))
{ {
...@@ -107,14 +105,14 @@ int main() ...@@ -107,14 +105,14 @@ int main()
bool pass = true; bool pass = true;
if(do_verification) if(do_verification)
{ {
c_m_n_k_device_buf.FromDevice(c_m_n_k.mData.data()); c_m_n_k_device_buf.FromDevice(c_m_n_k.data());
Tensor<CDataType> host_c_m_n_k(mnk); Tensor<CDataType> host_c_m_n_k(mnk);
host_broadcast3D_am_bmnk<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add>( host_broadcast3D_am_bmnk<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add>(
host_c_m_n_k, a_m, b_m_n_k, mnk, Add{}); host_c_m_n_k, a_m, b_m_n_k, mnk, Add{});
pass &= ck::utils::check_err( pass &=
c_m_n_k.mData, host_c_m_n_k.mData, "Error: Incorrect results c", 1e-3, 1e-3); ck::utils::check_err(c_m_n_k, host_c_m_n_k, "Error: Incorrect results c", 1e-3, 1e-3);
} }
return pass ? 0 : 1; return pass ? 0 : 1;
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <cstdlib> #include <cstdlib>
#include <iostream>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp" #include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
...@@ -53,8 +53,7 @@ int main() ...@@ -53,8 +53,7 @@ int main()
ck::index_t M = 1024; ck::index_t M = 1024;
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) { auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({len}), return HostTensorDescriptor({len}, {stride});
std::vector<std::size_t>({stride}));
}; };
Tensor<ABDataType> a_m(f_host_tensor_descriptor1d(M, 1)); Tensor<ABDataType> a_m(f_host_tensor_descriptor1d(M, 1));
...@@ -64,12 +63,12 @@ int main() ...@@ -64,12 +63,12 @@ int main()
a_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); a_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
b_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); b_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
DeviceMem a_m_device_buf(sizeof(ABDataType) * a_m.mDesc.GetElementSpaceSize()); DeviceMem a_m_device_buf(a_m.GetMemorySize());
DeviceMem b_m_device_buf(sizeof(ABDataType) * b_m.mDesc.GetElementSpaceSize()); DeviceMem b_m_device_buf(b_m.GetMemorySize());
DeviceMem c_m_device_buf(sizeof(CDataType) * c_m.mDesc.GetElementSpaceSize()); DeviceMem c_m_device_buf(c_m.GetMemorySize());
a_m_device_buf.ToDevice(a_m.mData.data()); a_m_device_buf.ToDevice(a_m.data());
b_m_device_buf.ToDevice(b_m.mData.data()); b_m_device_buf.ToDevice(b_m.data());
std::array<const void*, 2> input = {a_m_device_buf.GetDeviceBuffer(), std::array<const void*, 2> input = {a_m_device_buf.GetDeviceBuffer(),
b_m_device_buf.GetDeviceBuffer()}; b_m_device_buf.GetDeviceBuffer()};
...@@ -99,14 +98,13 @@ int main() ...@@ -99,14 +98,13 @@ int main()
bool pass = true; bool pass = true;
if(do_verification) if(do_verification)
{ {
c_m_device_buf.FromDevice(c_m.mData.data()); c_m_device_buf.FromDevice(c_m.data());
Tensor<CDataType> host_c_m(f_host_tensor_descriptor1d(M, 1)); Tensor<CDataType> host_c_m(f_host_tensor_descriptor1d(M, 1));
host_elementwise1D<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add>( host_elementwise1D<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add>(
host_c_m, a_m, b_m, M, Add{}); host_c_m, a_m, b_m, M, Add{});
pass &= ck::utils::check_err( pass &= ck::utils::check_err(c_m, host_c_m, "Error: Incorrect results c", 1e-3, 1e-3);
c_m.mData, host_c_m.mData, "Error: Incorrect results c", 1e-3, 1e-3);
} }
return pass ? 0 : 1; return pass ? 0 : 1;
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <cstdlib> #include <cstdlib>
#include <iostream>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp" #include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/array.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
...@@ -66,30 +68,30 @@ int main() ...@@ -66,30 +68,30 @@ int main()
a.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); a.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
b.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0}); b.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
DeviceMem a_device_buf(sizeof(ABDataType) * a.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a.GetMemorySize());
DeviceMem b_device_buf(sizeof(ABDataType) * b.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b.GetMemorySize());
DeviceMem c_device_buf(sizeof(CDataType) * c.mDesc.GetElementSpaceSize()); DeviceMem c_device_buf(c.GetMemorySize());
a_device_buf.ToDevice(a.mData.data()); a_device_buf.ToDevice(a.data());
b_device_buf.ToDevice(b.mData.data()); b_device_buf.ToDevice(b.data());
std::array<const void*, 2> input = {a_device_buf.GetDeviceBuffer(), std::array<const void*, 2> input = {a_device_buf.GetDeviceBuffer(),
b_device_buf.GetDeviceBuffer()}; b_device_buf.GetDeviceBuffer()};
std::array<void*, 1> output = {c_device_buf.GetDeviceBuffer()}; std::array<void*, 1> output = {c_device_buf.GetDeviceBuffer()};
std::array<ck::index_t, 4> abc_lengths;
std::array<ck::index_t, 4> a_strides; std::array<ck::index_t, 4> a_strides;
std::array<ck::index_t, 4> b_strides; std::array<ck::index_t, 4> b_strides;
std::array<ck::index_t, 4> c_strides; std::array<ck::index_t, 4> c_strides;
std::copy(nchw.begin(), nchw.end(), abc_lengths.begin()); using ck::ranges::copy;
std::copy(a.mDesc.GetStrides().begin(), a.mDesc.GetStrides().end(), a_strides.begin());
std::copy(b.mDesc.GetStrides().begin(), b.mDesc.GetStrides().end(), b_strides.begin()); copy(a.GetStrides(), a_strides.begin());
std::copy(c.mDesc.GetStrides().begin(), c.mDesc.GetStrides().end(), c_strides.begin()); copy(b.GetStrides(), b_strides.begin());
copy(c.GetStrides(), c_strides.begin());
auto broadcastAdd = DeviceElementwiseAddInstance{}; auto broadcastAdd = DeviceElementwiseAddInstance{};
auto argument = broadcastAdd.MakeArgumentPointer( auto argument = broadcastAdd.MakeArgumentPointer(
abc_lengths, {a_strides, b_strides}, {c_strides}, input, output, Add{}); ck::utils::to_array(nchw), {a_strides, b_strides}, {c_strides}, input, output, Add{});
if(!broadcastAdd.IsSupportedArgument(argument.get())) if(!broadcastAdd.IsSupportedArgument(argument.get()))
{ {
...@@ -106,14 +108,13 @@ int main() ...@@ -106,14 +108,13 @@ int main()
bool pass = true; bool pass = true;
if(do_verification) if(do_verification)
{ {
c_device_buf.FromDevice(c.mData.data()); c_device_buf.FromDevice(c.data());
Tensor<CDataType> host_c(nchw); Tensor<CDataType> host_c(nchw);
host_elementwise4D<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add>( host_elementwise4D<Tensor<ABDataType>, Tensor<ABDataType>, Tensor<CDataType>, Add>(
host_c, a, b, nchw, Add{}); host_c, a, b, nchw, Add{});
pass &= pass &= ck::utils::check_err(c, host_c, "Error: Incorrect results c", 1e-3, 1e-3);
ck::utils::check_err(c.mData, host_c.mData, "Error: Incorrect results c", 1e-3, 1e-3);
} }
return pass ? 0 : 1; return pass ? 0 : 1;
......
...@@ -51,9 +51,9 @@ int run_conv_bwd_weight(bool do_verification, ...@@ -51,9 +51,9 @@ int run_conv_bwd_weight(bool do_verification,
Tensor<WeiDataType> wei_device_result(wei_g_k_c_xs_desc); Tensor<WeiDataType> wei_device_result(wei_g_k_c_xs_desc);
Tensor<OutDataType> out(out_g_n_k_wos_desc); Tensor<OutDataType> out(out_g_n_k_wos_desc);
std::cout << "in: " << in.mDesc << std::endl; std::cout << "in: " << in.GetDesc() << std::endl;
std::cout << "wei: " << wei_host_result.mDesc << std::endl; std::cout << "wei: " << wei_host_result.GetDesc() << std::endl;
std::cout << "out: " << out.mDesc << std::endl; std::cout << "out: " << out.GetDesc() << std::endl;
switch(init_method) switch(init_method)
{ {
...@@ -67,12 +67,12 @@ int run_conv_bwd_weight(bool do_verification, ...@@ -67,12 +67,12 @@ int run_conv_bwd_weight(bool do_verification,
out.GenerateTensorValue(GeneratorTensor_3<OutDataType>{-0.5, 0.5}); out.GenerateTensorValue(GeneratorTensor_3<OutDataType>{-0.5, 0.5});
} }
DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpaceSize()); DeviceMem in_device_buf(in.GetMemorySize());
DeviceMem wei_device_buf(sizeof(WeiDataType) * wei_device_result.mDesc.GetElementSpaceSize()); DeviceMem wei_device_buf(wei_device_result.GetMemorySize());
DeviceMem out_device_buf(sizeof(OutDataType) * out.mDesc.GetElementSpaceSize()); DeviceMem out_device_buf(out.GetMemorySize());
in_device_buf.ToDevice(in.mData.data()); in_device_buf.ToDevice(in.data());
out_device_buf.ToDevice(out.mData.data()); out_device_buf.ToDevice(out.data());
// init to 0 // init to 0
wei_device_buf.SetZero(); wei_device_buf.SetZero();
...@@ -80,9 +80,9 @@ int run_conv_bwd_weight(bool do_verification, ...@@ -80,9 +80,9 @@ int run_conv_bwd_weight(bool do_verification,
// do GEMM // do GEMM
auto conv = DeviceConvBwdWeightInstance{}; auto conv = DeviceConvBwdWeightInstance{};
auto invoker = conv.MakeInvoker(); auto invoker = conv.MakeInvoker();
auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()), auto argument = conv.MakeArgument(in_device_buf.GetDeviceBuffer(),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()), wei_device_buf.GetDeviceBuffer(),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()), out_device_buf.GetDeviceBuffer(),
conv_param.N_, conv_param.N_,
conv_param.K_, conv_param.K_,
conv_param.C_, conv_param.C_,
...@@ -143,9 +143,9 @@ int run_conv_bwd_weight(bool do_verification, ...@@ -143,9 +143,9 @@ int run_conv_bwd_weight(bool do_verification,
ref_invoker.Run(ref_argument); ref_invoker.Run(ref_argument);
wei_device_buf.FromDevice(wei_device_result.mData.data()); wei_device_buf.FromDevice(wei_device_result.data());
return ck::utils::check_err(wei_device_result.mData, wei_host_result.mData) ? 0 : 1; return ck::utils::check_err(wei_device_result, wei_host_result) ? 0 : 1;
} }
return 0; return 0;
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <initializer_list>
#include <iostream> #include <iostream>
#include <numeric> #include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" #include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp" #include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" #include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp" #include "ck/library/utility/literals.hpp"
#include "ck/library/utility/check_err.hpp"
template <ck::index_t... Is> template <ck::index_t... Is>
using S = ck::Sequence<Is...>; using S = ck::Sequence<Is...>;
...@@ -108,21 +109,20 @@ using DeviceNormalizeInstance = ck::tensor_operation::device::DeviceElementwise< ...@@ -108,21 +109,20 @@ using DeviceNormalizeInstance = ck::tensor_operation::device::DeviceElementwise<
ck::Sequence<8>>; // scalarPerVector: y(layerNorm_out) ck::Sequence<8>>; // scalarPerVector: y(layerNorm_out)
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) { auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({len}), return HostTensorDescriptor({len}, {stride});
std::vector<std::size_t>({stride}));
}; };
using namespace ck::literals;
auto f_host_tensor_descriptor2d = auto f_host_tensor_descriptor2d =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) { [](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value) if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {stride, 1_uz});
std::vector<std::size_t>({stride, 1}));
} }
else else
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {1_uz, stride});
std::vector<std::size_t>({1, stride}));
} }
}; };
...@@ -264,25 +264,23 @@ int main() ...@@ -264,25 +264,23 @@ int main()
gamma_n.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-1, 1}); gamma_n.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-1, 1});
beta_n.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-1, 1}); beta_n.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-1, 1});
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_k_n.GetMemorySize());
DeviceMem bias_device_buf(sizeof(D0DataType) * bias_n.mDesc.GetElementSpaceSize()); DeviceMem bias_device_buf(bias_n.GetMemorySize());
DeviceMem d1_device_buf(sizeof(D1DataType) * d1_m_n.mDesc.GetElementSpaceSize()); DeviceMem d1_device_buf(d1_m_n.GetMemorySize());
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n.mDesc.GetElementSpaceSize()); DeviceMem e_device_buf(e_m_n.GetMemorySize());
DeviceMem r0_Mean_device_buf(sizeof(R0DataType) * r0_Mean_m.mDesc.GetElementSpaceSize()); DeviceMem r0_Mean_device_buf(r0_Mean_m.GetMemorySize());
DeviceMem r1_MeanSquare_device_buf(sizeof(R1DataType) * DeviceMem r1_MeanSquare_device_buf(r1_MeanSquare_m.GetMemorySize());
r1_MeanSquare_m.mDesc.GetElementSpaceSize()); DeviceMem gamma_device_buf(gamma_n.GetMemorySize());
DeviceMem gamma_device_buf(sizeof(GammaDataType) * gamma_n.mDesc.GetElementSpaceSize()); DeviceMem beta_device_buf(beta_n.GetMemorySize());
DeviceMem beta_device_buf(sizeof(BetaDataType) * beta_n.mDesc.GetElementSpaceSize()); DeviceMem layerNorm_device_buf(layerNorm_m_n.GetMemorySize());
DeviceMem layerNorm_device_buf(sizeof(LayerNormOutDataType) *
layerNorm_m_n.mDesc.GetElementSpaceSize()); a_device_buf.ToDevice(a_m_k.data());
b_device_buf.ToDevice(b_k_n.data());
a_device_buf.ToDevice(a_m_k.mData.data()); bias_device_buf.ToDevice(bias_n.data());
b_device_buf.ToDevice(b_k_n.mData.data()); d1_device_buf.ToDevice(d1_m_n.data());
bias_device_buf.ToDevice(bias_n.mData.data()); gamma_device_buf.ToDevice(gamma_n.data());
d1_device_buf.ToDevice(d1_m_n.mData.data()); beta_device_buf.ToDevice(beta_n.data());
gamma_device_buf.ToDevice(gamma_n.mData.data());
beta_device_buf.ToDevice(beta_n.mData.data());
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
auto b_element_op = BElementOp{}; auto b_element_op = BElementOp{};
...@@ -371,9 +369,9 @@ int main() ...@@ -371,9 +369,9 @@ int main()
M, M,
N); N);
layerNorm_device_buf.FromDevice(layerNorm_m_n.mData.data()); layerNorm_device_buf.FromDevice(layerNorm_m_n.data());
pass &= ck::utils::check_err(layerNorm_m_n.mData, pass &= ck::utils::check_err(layerNorm_m_n,
host_layerNorm_m_n.mData, host_layerNorm_m_n,
"Error: Incorrect results layerNorm_m_n", "Error: Incorrect results layerNorm_m_n",
1e-2, 1e-2,
1e-2); 1e-2);
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <initializer_list>
#include <iostream> #include <iostream>
#include <numeric> #include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" #include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp" #include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" #include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp" #include "ck/library/utility/literals.hpp"
#include "ck/library/utility/check_err.hpp"
template <ck::index_t... Is> template <ck::index_t... Is>
using S = ck::Sequence<Is...>; using S = ck::Sequence<Is...>;
...@@ -107,21 +108,20 @@ using DeviceNormalizeInstance = ck::tensor_operation::device::DeviceElementwise< ...@@ -107,21 +108,20 @@ using DeviceNormalizeInstance = ck::tensor_operation::device::DeviceElementwise<
ck::Sequence<8>>; // scalarPerVector: y(layerNorm_out) ck::Sequence<8>>; // scalarPerVector: y(layerNorm_out)
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) { auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({len}), return HostTensorDescriptor({len}, {stride});
std::vector<std::size_t>({stride}));
}; };
using namespace ck::literals;
auto f_host_tensor_descriptor2d = auto f_host_tensor_descriptor2d =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) { [](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value) if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {stride, 1_uz});
std::vector<std::size_t>({stride, 1}));
} }
else else
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {1_uz, stride});
std::vector<std::size_t>({1, stride}));
} }
}; };
...@@ -243,21 +243,19 @@ int main() ...@@ -243,21 +243,19 @@ int main()
gamma_n.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-1, 1}); gamma_n.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-1, 1});
beta_n.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-1, 1}); beta_n.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-1, 1});
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_k_n.GetMemorySize());
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n.mDesc.GetElementSpaceSize()); DeviceMem e_device_buf(e_m_n.GetMemorySize());
DeviceMem r0_Mean_device_buf(sizeof(R0DataType) * r0_Mean_m.mDesc.GetElementSpaceSize()); DeviceMem r0_Mean_device_buf(r0_Mean_m.GetMemorySize());
DeviceMem r1_MeanSquare_device_buf(sizeof(R1DataType) * DeviceMem r1_MeanSquare_device_buf(r1_MeanSquare_m.GetMemorySize());
r1_MeanSquare_m.mDesc.GetElementSpaceSize()); DeviceMem gamma_device_buf(gamma_n.GetMemorySize());
DeviceMem gamma_device_buf(sizeof(GammaDataType) * gamma_n.mDesc.GetElementSpaceSize()); DeviceMem beta_device_buf(beta_n.GetMemorySize());
DeviceMem beta_device_buf(sizeof(BetaDataType) * beta_n.mDesc.GetElementSpaceSize()); DeviceMem layerNorm_device_buf(layerNorm_m_n.GetMemorySize());
DeviceMem layerNorm_device_buf(sizeof(LayerNormOutDataType) *
layerNorm_m_n.mDesc.GetElementSpaceSize()); a_device_buf.ToDevice(a_m_k.data());
b_device_buf.ToDevice(b_k_n.data());
a_device_buf.ToDevice(a_m_k.mData.data()); gamma_device_buf.ToDevice(gamma_n.data());
b_device_buf.ToDevice(b_k_n.mData.data()); beta_device_buf.ToDevice(beta_n.data());
gamma_device_buf.ToDevice(gamma_n.mData.data());
beta_device_buf.ToDevice(beta_n.mData.data());
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
auto b_element_op = BElementOp{}; auto b_element_op = BElementOp{};
...@@ -345,12 +343,9 @@ int main() ...@@ -345,12 +343,9 @@ int main()
M, M,
N); N);
layerNorm_device_buf.FromDevice(layerNorm_m_n.mData.data()); layerNorm_device_buf.FromDevice(layerNorm_m_n.data());
pass &= ck::utils::check_err(layerNorm_m_n.mData, pass &= ck::utils::check_err(
host_layerNorm_m_n.mData, layerNorm_m_n, host_layerNorm_m_n, "Error: Incorrect results d1", 1e-3, 1e-3);
"Error: Incorrect results d1",
1e-3,
1e-3);
} }
{ {
......
...@@ -2,20 +2,22 @@ ...@@ -2,20 +2,22 @@
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream> #include <iostream>
#include <numeric>
#include <initializer_list> #include <initializer_list>
#include <numeric>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_layernorm_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm_layernorm.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" #include "ck/library/utility/literals.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_layernorm_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/utility/reduction_operator.hpp" #include "ck/utility/reduction_operator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm_layernorm.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
// This example demonstrate a single kernel that runs GEMM layer and laynorm in one fused kernel // This example demonstrate a single kernel that runs GEMM layer and laynorm in one fused kernel
// //
...@@ -130,17 +132,17 @@ int main(int argc, char* argv[]) ...@@ -130,17 +132,17 @@ int main(int argc, char* argv[])
exit(0); exit(0);
} }
using namespace ck::literals;
auto f_host_tensor_descriptor = auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) { [](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value) if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {stride, 1_uz});
std::vector<std::size_t>({stride, 1}));
} }
else else
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {1_uz, stride});
std::vector<std::size_t>({1, stride}));
} }
}; };
...@@ -154,13 +156,13 @@ int main(int argc, char* argv[]) ...@@ -154,13 +156,13 @@ int main(int argc, char* argv[])
Tensor<C0DataType> c0_n_gamma(HostTensorDescriptor(std::vector<size_t>({size_t(N)}))); Tensor<C0DataType> c0_n_gamma(HostTensorDescriptor(std::vector<size_t>({size_t(N)})));
Tensor<C0DataType> c0_n_beta(HostTensorDescriptor(std::vector<size_t>({size_t(N)}))); Tensor<C0DataType> c0_n_beta(HostTensorDescriptor(std::vector<size_t>({size_t(N)})));
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl; std::cout << "a_m_k: " << a_m_k.GetDesc() << std::endl;
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl; std::cout << "b_k_n: " << b_k_n.GetDesc() << std::endl;
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl; std::cout << "c_m_n: " << c_m_n_host_result.GetDesc() << std::endl;
std::cout << "c0_n_bias: " << c0_n_bias.mDesc << std::endl; std::cout << "c0_n_bias: " << c0_n_bias.GetDesc() << std::endl;
std::cout << "c0_m_n_add: " << c0_m_n_add.mDesc << std::endl; std::cout << "c0_m_n_add: " << c0_m_n_add.GetDesc() << std::endl;
std::cout << "c0_n_gamma: " << c0_n_gamma.mDesc << std::endl; std::cout << "c0_n_gamma: " << c0_n_gamma.GetDesc() << std::endl;
std::cout << "c0_n_beta: " << c0_n_beta.mDesc << std::endl; std::cout << "c0_n_beta: " << c0_n_beta.GetDesc() << std::endl;
switch(init_method) switch(init_method)
{ {
...@@ -185,20 +187,20 @@ int main(int argc, char* argv[]) ...@@ -185,20 +187,20 @@ int main(int argc, char* argv[])
c_m_n_host_result.GenerateTensorValue(GeneratorTensor_1<CDataType>{0}); c_m_n_host_result.GenerateTensorValue(GeneratorTensor_1<CDataType>{0});
acc_m_n_host_result.GenerateTensorValue(GeneratorTensor_1<AccDataType>{0}); acc_m_n_host_result.GenerateTensorValue(GeneratorTensor_1<AccDataType>{0});
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem a_device_buf(a_m_k.GetMemorySize());
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem b_device_buf(b_k_n.GetMemorySize());
DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize()); DeviceMem c_device_buf(c_m_n_device_result.GetMemorySize());
DeviceMem c0_bias_buf(sizeof(C0DataType) * c0_n_bias.mDesc.GetElementSpaceSize()); DeviceMem c0_bias_buf(c0_n_bias.GetMemorySize());
DeviceMem c0_add_buf(sizeof(C0DataType) * c0_m_n_add.mDesc.GetElementSpaceSize()); DeviceMem c0_add_buf(c0_m_n_add.GetMemorySize());
DeviceMem c0_gamma_buf(sizeof(C0DataType) * c0_n_gamma.mDesc.GetElementSpaceSize()); DeviceMem c0_gamma_buf(c0_n_gamma.GetMemorySize());
DeviceMem c0_beta_buf(sizeof(C0DataType) * c0_n_beta.mDesc.GetElementSpaceSize()); DeviceMem c0_beta_buf(c0_n_beta.GetMemorySize());
a_device_buf.ToDevice(a_m_k.mData.data()); a_device_buf.ToDevice(a_m_k.data());
b_device_buf.ToDevice(b_k_n.mData.data()); b_device_buf.ToDevice(b_k_n.data());
c0_bias_buf.ToDevice(c0_n_bias.mData.data()); c0_bias_buf.ToDevice(c0_n_bias.data());
c0_add_buf.ToDevice(c0_m_n_add.mData.data()); c0_add_buf.ToDevice(c0_m_n_add.data());
c0_gamma_buf.ToDevice(c0_n_gamma.mData.data()); c0_gamma_buf.ToDevice(c0_n_gamma.data());
c0_beta_buf.ToDevice(c0_n_beta.mData.data()); c0_beta_buf.ToDevice(c0_n_beta.data());
auto a_element_op = AElementOp{}; auto a_element_op = AElementOp{};
auto b_element_op = BElementOp{}; auto b_element_op = BElementOp{};
...@@ -208,13 +210,13 @@ int main(int argc, char* argv[]) ...@@ -208,13 +210,13 @@ int main(int argc, char* argv[])
// do GEMM // do GEMM
auto gemm = DeviceGemmInstance{}; auto gemm = DeviceGemmInstance{};
auto invoker = gemm.MakeInvoker(); auto invoker = gemm.MakeInvoker();
auto argument = gemm.MakeArgument(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()), auto argument = gemm.MakeArgument(a_device_buf.GetDeviceBuffer(),
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()), b_device_buf.GetDeviceBuffer(),
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()), c_device_buf.GetDeviceBuffer(),
static_cast<C0DataType*>(c0_add_buf.GetDeviceBuffer()), c0_add_buf.GetDeviceBuffer(),
static_cast<C0DataType*>(c0_bias_buf.GetDeviceBuffer()), c0_bias_buf.GetDeviceBuffer(),
static_cast<C0DataType*>(c0_gamma_buf.GetDeviceBuffer()), c0_gamma_buf.GetDeviceBuffer(),
static_cast<C0DataType*>(c0_beta_buf.GetDeviceBuffer()), c0_beta_buf.GetDeviceBuffer(),
M, M,
N, N,
K, K,
...@@ -252,7 +254,7 @@ int main(int argc, char* argv[]) ...@@ -252,7 +254,7 @@ int main(int argc, char* argv[])
bool pass = true; bool pass = true;
if(do_verification) if(do_verification)
{ {
c_device_buf.FromDevice(c_m_n_device_result.mData.data()); c_device_buf.FromDevice(c_m_n_device_result.data());
auto ref_gemm = ReferenceInstance{}; auto ref_gemm = ReferenceInstance{};
auto ref_invoker = ref_gemm.MakeInvoker(); auto ref_invoker = ref_gemm.MakeInvoker();
...@@ -274,15 +276,12 @@ int main(int argc, char* argv[]) ...@@ -274,15 +276,12 @@ int main(int argc, char* argv[])
if constexpr(std::is_same<CShuffleDataType, F32>::value) if constexpr(std::is_same<CShuffleDataType, F32>::value)
{ {
pass &= ck::utils::check_err( pass &= ck::utils::check_err(
c_m_n_device_result.mData, c_m_n_host_result.mData, "Error: Incorrect results c"); c_m_n_device_result, c_m_n_host_result, "Error: Incorrect results c");
} }
else if constexpr(std::is_same<CShuffleDataType, F16>::value) else if constexpr(std::is_same<CShuffleDataType, F16>::value)
{ {
pass &= ck::utils::check_err(c_m_n_device_result.mData, pass &= ck::utils::check_err(
c_m_n_host_result.mData, c_m_n_device_result, c_m_n_host_result, "Error: Incorrect results c", 1e-2, 1e-2);
"Error: Incorrect results c",
1e-2,
1e-2);
} }
} }
return pass ? 0 : 1; return pass ? 0 : 1;
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <numeric>
#include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include <initializer_list>
#include <numeric>
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/stream_config.hpp" #include "ck/stream_config.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" #include "ck/library/utility/literals.hpp"
template <ck::index_t... Is> template <ck::index_t... Is>
using S = ck::Sequence<Is...>; using S = ck::Sequence<Is...>;
...@@ -60,17 +62,17 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -60,17 +62,17 @@ bool run_cgemm_xdl(ck::index_t M,
"sizeof CDataType and KernelCDataType is different!"); "sizeof CDataType and KernelCDataType is different!");
#endif #endif
using namespace ck::literals;
auto f_host_tensor_descriptor = auto f_host_tensor_descriptor =
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) { [](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value) if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {stride, 1_uz});
std::vector<std::size_t>({stride, 1}));
} }
else else
{ {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor({row, col}, {1_uz, stride});
std::vector<std::size_t>({1, stride}));
} }
}; };
...@@ -83,12 +85,12 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -83,12 +85,12 @@ bool run_cgemm_xdl(ck::index_t M,
Tensor<KernelCDataType> c_m_n_imag_device_result( Tensor<KernelCDataType> c_m_n_imag_device_result(
f_host_tensor_descriptor(M, N, StrideC, CLayout{})); f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
std::cout << "a_m_k_real: " << a_m_k_real.mDesc << std::endl; std::cout << "a_m_k_real: " << a_m_k_real.GetDesc() << std::endl;
std::cout << "a_m_k_imag: " << a_m_k_imag.mDesc << std::endl; std::cout << "a_m_k_imag: " << a_m_k_imag.GetDesc() << std::endl;
std::cout << "b_k_n_real: " << b_k_n_real.mDesc << std::endl; std::cout << "b_k_n_real: " << b_k_n_real.GetDesc() << std::endl;
std::cout << "b_k_n_imag: " << b_k_n_imag.mDesc << std::endl; std::cout << "b_k_n_imag: " << b_k_n_imag.GetDesc() << std::endl;
std::cout << "c_m_n_real: " << c_m_n_real_device_result.mDesc << std::endl; std::cout << "c_m_n_real: " << c_m_n_real_device_result.GetDesc() << std::endl;
std::cout << "c_m_n_imag: " << c_m_n_imag_device_result.mDesc << std::endl; std::cout << "c_m_n_imag: " << c_m_n_imag_device_result.GetDesc() << std::endl;
switch(init_method) switch(init_method)
{ {
...@@ -108,18 +110,12 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -108,18 +110,12 @@ bool run_cgemm_xdl(ck::index_t M,
auto cgemm = DeviceCGemmInstance{}; auto cgemm = DeviceCGemmInstance{};
DeviceMem a_m_k_real_device_buf(sizeof(KernelADataType) * DeviceMem a_m_k_real_device_buf(a_m_k_real.GetMemorySize());
a_m_k_real.mDesc.GetElementSpaceSize()); DeviceMem a_m_k_imag_device_buf(a_m_k_imag.GetMemorySize());
DeviceMem a_m_k_imag_device_buf(sizeof(KernelADataType) * DeviceMem b_k_n_real_device_buf(b_k_n_real.GetMemorySize());
a_m_k_imag.mDesc.GetElementSpaceSize()); DeviceMem b_k_n_imag_device_buf(b_k_n_imag.GetMemorySize());
DeviceMem b_k_n_real_device_buf(sizeof(KernelBDataType) * DeviceMem c_m_n_real_device_buf(c_m_n_real_device_result.GetMemorySize());
b_k_n_real.mDesc.GetElementSpaceSize()); DeviceMem c_m_n_imag_device_buf(c_m_n_imag_device_result.GetMemorySize());
DeviceMem b_k_n_imag_device_buf(sizeof(KernelBDataType) *
b_k_n_imag.mDesc.GetElementSpaceSize());
DeviceMem c_m_n_real_device_buf(sizeof(KernelCDataType) *
c_m_n_real_device_result.mDesc.GetElementSpaceSize());
DeviceMem c_m_n_imag_device_buf(sizeof(KernelCDataType) *
c_m_n_imag_device_result.mDesc.GetElementSpaceSize());
DeviceMem workspace_device_buf(cgemm.GetWorkspaceSize(M, N, K, StrideA, StrideB, StrideC)); DeviceMem workspace_device_buf(cgemm.GetWorkspaceSize(M, N, K, StrideA, StrideB, StrideC));
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
...@@ -130,18 +126,18 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -130,18 +126,18 @@ bool run_cgemm_xdl(ck::index_t M,
Tensor<KernelBDataType> b_k_n_real_converted(b_k_n_real); Tensor<KernelBDataType> b_k_n_real_converted(b_k_n_real);
Tensor<KernelBDataType> b_k_n_imag_converted(b_k_n_imag); Tensor<KernelBDataType> b_k_n_imag_converted(b_k_n_imag);
a_m_k_real_device_buf.ToDevice(a_m_k_real_converted.mData.data()); a_m_k_real_device_buf.ToDevice(a_m_k_real_converted.data());
a_m_k_imag_device_buf.ToDevice(a_m_k_imag_converted.mData.data()); a_m_k_imag_device_buf.ToDevice(a_m_k_imag_converted.data());
b_k_n_real_device_buf.ToDevice(b_k_n_real_converted.mData.data()); b_k_n_real_device_buf.ToDevice(b_k_n_real_converted.data());
b_k_n_imag_device_buf.ToDevice(b_k_n_imag_converted.mData.data()); b_k_n_imag_device_buf.ToDevice(b_k_n_imag_converted.data());
} }
else else
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
{ {
a_m_k_real_device_buf.ToDevice(a_m_k_real.mData.data()); a_m_k_real_device_buf.ToDevice(a_m_k_real.data());
a_m_k_imag_device_buf.ToDevice(a_m_k_imag.mData.data()); a_m_k_imag_device_buf.ToDevice(a_m_k_imag.data());
b_k_n_real_device_buf.ToDevice(b_k_n_real.mData.data()); b_k_n_real_device_buf.ToDevice(b_k_n_real.data());
b_k_n_imag_device_buf.ToDevice(b_k_n_imag.mData.data()); b_k_n_imag_device_buf.ToDevice(b_k_n_imag.data());
} }
auto a_element_op = AElementwiseOperation{}; auto a_element_op = AElementwiseOperation{};
...@@ -150,14 +146,13 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -150,14 +146,13 @@ bool run_cgemm_xdl(ck::index_t M,
// do GEMM // do GEMM
auto invoker = cgemm.MakeInvoker(); auto invoker = cgemm.MakeInvoker();
auto argument = auto argument = cgemm.MakeArgument(a_m_k_real_device_buf.GetDeviceBuffer(),
cgemm.MakeArgument(static_cast<KernelADataType*>(a_m_k_real_device_buf.GetDeviceBuffer()), a_m_k_imag_device_buf.GetDeviceBuffer(),
static_cast<KernelADataType*>(a_m_k_imag_device_buf.GetDeviceBuffer()), b_k_n_real_device_buf.GetDeviceBuffer(),
static_cast<KernelBDataType*>(b_k_n_real_device_buf.GetDeviceBuffer()), b_k_n_imag_device_buf.GetDeviceBuffer(),
static_cast<KernelBDataType*>(b_k_n_imag_device_buf.GetDeviceBuffer()), c_m_n_real_device_buf.GetDeviceBuffer(),
static_cast<KernelCDataType*>(c_m_n_real_device_buf.GetDeviceBuffer()), c_m_n_imag_device_buf.GetDeviceBuffer(),
static_cast<KernelCDataType*>(c_m_n_imag_device_buf.GetDeviceBuffer()), workspace_device_buf.GetDeviceBuffer(),
static_cast<KernelCDataType*>(workspace_device_buf.GetDeviceBuffer()),
M, M,
N, N,
K, K,
...@@ -209,8 +204,8 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -209,8 +204,8 @@ bool run_cgemm_xdl(ck::index_t M,
ref_invoker.Run(ref_argument); ref_invoker.Run(ref_argument);
c_m_n_real_device_buf.FromDevice(c_m_n_real_device_result.mData.data()); c_m_n_real_device_buf.FromDevice(c_m_n_real_device_result.data());
c_m_n_imag_device_buf.FromDevice(c_m_n_imag_device_result.mData.data()); c_m_n_imag_device_buf.FromDevice(c_m_n_imag_device_result.data());
bool result = true; bool result = true;
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
...@@ -219,14 +214,14 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -219,14 +214,14 @@ bool run_cgemm_xdl(ck::index_t M,
const Tensor<CDataType> c_m_n_real_device_result_converted(c_m_n_real_device_result); const Tensor<CDataType> c_m_n_real_device_result_converted(c_m_n_real_device_result);
const Tensor<CDataType> c_m_n_imag_device_result_converted(c_m_n_imag_device_result); const Tensor<CDataType> c_m_n_imag_device_result_converted(c_m_n_imag_device_result);
result = ck::utils::check_err(c_m_n_real_device_result_converted.mData, result = ck::utils::check_err(c_m_n_real_device_result_converted,
c_m_n_real_host_result.mData, c_m_n_real_host_result,
"Verification error: incorrect results in real part!", "Verification error: incorrect results in real part!",
1e-2f, 1e-2f,
1e-1f); 1e-1f);
result = result && ck::utils::check_err( result = result && ck::utils::check_err(
c_m_n_imag_device_result_converted.mData, c_m_n_imag_device_result_converted,
c_m_n_imag_host_result.mData, c_m_n_imag_host_result,
"Verification error: incorrect results in imaginary part!", "Verification error: incorrect results in imaginary part!",
1e-2f, 1e-2f,
1e-1f); 1e-1f);
...@@ -234,14 +229,14 @@ bool run_cgemm_xdl(ck::index_t M, ...@@ -234,14 +229,14 @@ bool run_cgemm_xdl(ck::index_t M,
else else
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 #endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
{ {
result = ck::utils::check_err(c_m_n_real_device_result.mData, result = ck::utils::check_err(c_m_n_real_device_result,
c_m_n_real_host_result.mData, c_m_n_real_host_result,
"Verification error: incorrect results in real part!", "Verification error: incorrect results in real part!",
1e-2f, 1e-2f,
1e-1f); 1e-1f);
result = result && ck::utils::check_err( result = result && ck::utils::check_err(
c_m_n_imag_device_result.mData, c_m_n_imag_device_result,
c_m_n_imag_host_result.mData, c_m_n_imag_host_result,
"Verification error: incorrect results in imaginary part!", "Verification error: incorrect results in imaginary part!",
1e-2f, 1e-2f,
1e-1f); 1e-1f);
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <initializer_list>
#include <iostream> #include <iostream>
#include <numeric> #include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <getopt.h> #include <getopt.h>
#include "ck/ck.hpp" #include "ck/ck.hpp"
...@@ -13,10 +14,12 @@ ...@@ -13,10 +14,12 @@
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp" #include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" #include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_common_util.hpp" #include "ck/library/utility/host_common_util.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp" #include "ck/library/utility/ranges.hpp"
using namespace ck::tensor_operation::device; using namespace ck::tensor_operation::device;
...@@ -148,14 +151,13 @@ int main(int argc, char* argv[]) ...@@ -148,14 +151,13 @@ int main(int argc, char* argv[])
Tensor<OutDataType> out_ref(args.inLengths); Tensor<OutDataType> out_ref(args.inLengths);
Tensor<OutDataType> out(args.inLengths); Tensor<OutDataType> out(args.inLengths);
auto inStrides = in.mDesc.GetStrides(); auto inStrides = in.GetStrides();
auto outStrides = out.mDesc.GetStrides();
AccDataType alpha = args.scales[0]; AccDataType alpha = args.scales[0];
AccDataType beta = args.scales[1]; AccDataType beta = args.scales[1];
std::cout << "in: " << in.mDesc << std::endl; std::cout << "in: " << in.GetDesc() << std::endl;
std::cout << "out: " << out.mDesc << std::endl; std::cout << "out: " << out.GetDesc() << std::endl;
std::size_t num_thread = 1; std::size_t num_thread = 1;
...@@ -181,21 +183,22 @@ int main(int argc, char* argv[]) ...@@ -181,21 +183,22 @@ int main(int argc, char* argv[])
} }
if(beta != 0.0f) if(beta != 0.0f)
for(size_t i = 0; i < out_ref.mDesc.GetElementSpaceSize(); i++) {
out.mData[i] = out_ref.mData[i]; ck::ranges::copy(out_ref, out.begin());
}
}; };
// std::cout << "beta = " << beta << std::endl; // std::cout << "beta = " << beta << std::endl;
// LogRangeAsType<float>(std::cout << "tensor in: " , in.mData, ",") << std::endl; // LogRangeAsType<float>(std::cout << "tensor in: " , in, ",") << std::endl;
// LogRangeAsType<float>(std::cout << "tensor prior out: " , out.mData, ",") << std::endl; // LogRangeAsType<float>(std::cout << "tensor prior out: " , out, ",") << std::endl;
// these buffers are usually provided by the user application // these buffers are usually provided by the user application
DeviceMem in_dev(sizeof(InDataType) * in.mDesc.GetElementSpaceSize()); DeviceMem in_dev(in.GetMemorySize());
DeviceMem out_dev(sizeof(OutDataType) * out.mDesc.GetElementSpaceSize()); DeviceMem out_dev(out.GetMemorySize());
in_dev.ToDevice(in.mData.data()); in_dev.ToDevice(in.data());
if(beta != 0.0f) if(beta != 0.0f)
out_dev.ToDevice(out.mData.data()); out_dev.ToDevice(out.data());
if(args.do_verification) if(args.do_verification)
{ {
...@@ -205,21 +208,17 @@ int main(int argc, char* argv[]) ...@@ -205,21 +208,17 @@ int main(int argc, char* argv[])
auto ref_arg = ref.MakeArgument(in, out_ref, alpha, beta, reduceDims); auto ref_arg = ref.MakeArgument(in, out_ref, alpha, beta, reduceDims);
auto invoker = ref.MakeInvoker(); auto invoker = ref.MakeInvoker();
invoker.Run(ref_arg); invoker.Run(ref_arg);
// LogRangeAsType<float>(std::cout << "tensor out_ref: ", out_ref.mData, ",") << std::endl; // LogRangeAsType<float>(std::cout << "tensor out_ref: ", out_ref, ",") << std::endl;
}; };
std::vector<ck::index_t> i_inLengths; using Indices = std::vector<ck::index_t>;
std::vector<ck::index_t> i_inStrides;
i_inLengths.assign(args.inLengths.begin(), args.inLengths.end());
i_inStrides.assign(inStrides.begin(), inStrides.end());
auto device_instance = DeviceInstance{}; auto device_instance = DeviceInstance{};
std::cout << i_inLengths.size() << ", " << i_inStrides.size() << std::endl; std::cout << args.inLengths.size() << ", " << inStrides.size() << std::endl;
auto argument_ptr = device_instance.MakeArgumentPointer(i_inLengths, auto argument_ptr = device_instance.MakeArgumentPointer(ck::ranges::to<Indices>(args.inLengths),
i_inStrides, ck::ranges::to<Indices>(inStrides),
reduceDims, reduceDims,
&alpha, &alpha,
&beta, &beta,
...@@ -244,16 +243,15 @@ int main(int argc, char* argv[]) ...@@ -244,16 +243,15 @@ int main(int argc, char* argv[])
if(args.do_verification) if(args.do_verification)
{ {
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false}); invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false});
out_dev.FromDevice(out.mData.data()); out_dev.FromDevice(out.data());
// LogRangeAsType<float>(std::cout << "tensor out: " , out.mData, ",") << std::endl; // LogRangeAsType<float>(std::cout << "tensor out: " , out, ",") << std::endl;
pass = pass && ck::utils::check_err(out.mData, out_ref.mData); pass = pass && ck::utils::check_err(out, out_ref);
}; };
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, args.time_kernel}); float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, args.time_kernel});
std::size_t num_bytes = std::size_t num_bytes = in.GetElementSize() * sizeof(InDataType) +
in.mDesc.GetElementSize() * sizeof(InDataType) + (beta == 0.0f ? 1 : 2) * out.GetElementSize() * sizeof(OutDataType);
(beta == 0.0f ? 1 : 2) * out.mDesc.GetElementSize() * sizeof(OutDataType);
float gb_per_sec = num_bytes / 1.E6 / avg_time; float gb_per_sec = num_bytes / 1.E6 / avg_time;
......
#include <iostream> // SPDX-License-Identifier: MIT
#include <numeric> // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <initializer_list>
#include <cstdlib> #include "common.hpp"
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_multi_d_xdl.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/utility/literals.hpp"
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using BF16 = ck::bhalf_t;
using F32 = float;
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using ADataType = BF16; using ADataType = BF16;
using BDataType = BF16; using BDataType = BF16;
......
#include <iostream> // SPDX-License-Identifier: MIT
#include <numeric> // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <initializer_list>
#include <cstdlib> #include "common.hpp"
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_multi_d_xdl.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/utility/literals.hpp"
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using F16 = ck::half_t;
using F32 = float;
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using ADataType = F16; using ADataType = F16;
using BDataType = F16; using BDataType = F16;
......
#include <iostream> // SPDX-License-Identifier: MIT
#include <numeric> // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp" #include "common.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_multi_d_xdl.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/utility/literals.hpp"
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using F32 = float;
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using ADataType = F32; using ADataType = F32;
using BDataType = F32; using BDataType = F32;
......
#include <iostream> // SPDX-License-Identifier: MIT
#include <numeric> // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp" #include "common.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_multi_d_xdl.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/utility/literals.hpp"
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using ADataType = ck::int4_t; using ADataType = ck::int4_t;
using BDataType = ck::int4_t; using BDataType = ck::int4_t;
......
#include <iostream> // SPDX-License-Identifier: MIT
#include <numeric> // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <initializer_list>
#include <cstdlib> #include "common.hpp"
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_multi_d_xdl.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/utility/literals.hpp"
template <ck::index_t... Is>
using S = ck::Sequence<Is...>;
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using ADataType = int8_t; using ADataType = int8_t;
using BDataType = int8_t; using BDataType = int8_t;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment