#include #include #include "check_err.hpp" #include "config.hpp" #include "device.hpp" #include "host_tensor.hpp" #include "host_tensor_generator.hpp" #include "device_tensor.hpp" #include "binary_element_wise_operation.hpp" #include "device_binary_elementwise.hpp" using F16 = ck::half_t; using F32 = float; using ABDataType = F16; using CDataType = F16; using EltwiseComputeDataType = F32; using Add = ck::tensor_operation::binary_element_wise::Add; using DeviceElementwiseAddInstance = ck::tensor_operation::device:: DeviceBinaryElementwise; template void host_broadcast2D( HostTensorC& C, const HostTensorA& A, const HostTensorB& B, int M, int N, Functor functor) { using ctype = ck::remove_reference_t; for(int m = 0; m < M; ++m) { for(int n = 0; n < N; ++n) { ComputeDataType Amn = static_cast(A(m, n)); ComputeDataType Cmn = 0; if constexpr(broadcastDim == 0) { ComputeDataType Bn = static_cast(B(n)); functor(Cmn, Amn, Bn); } else { ComputeDataType Bm = static_cast(B(m)); functor(Cmn, Amn, Bm); } C(m, n) = static_cast(Cmn); } } } int main() { bool do_verification = true; bool time_kernel = false; ck::index_t M = 1024; ck::index_t N = 1024; ck::index_t Stride = 1024; auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) { return HostTensorDescriptor(std::vector({len}), std::vector({stride})); }; auto f_host_tensor_descriptor2d = [](std::size_t row, std::size_t col, std::size_t stride) { return HostTensorDescriptor(std::vector({row, col}), std::vector({stride, 1})); }; Tensor a_m_n(f_host_tensor_descriptor2d(M, N, Stride)); Tensor b_n(f_host_tensor_descriptor1d(N, 1)); Tensor c_m_n(f_host_tensor_descriptor2d(M, N, Stride)); a_m_n.GenerateTensorValue(GeneratorTensor_3{0.0, 1.0}); b_n.GenerateTensorValue(GeneratorTensor_3{0.0, 1.0}); DeviceMem a_m_n_device_buf(sizeof(ABDataType) * a_m_n.mDesc.GetElementSpace()); DeviceMem b_n_device_buf(sizeof(ABDataType) * b_n.mDesc.GetElementSpace()); DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n.mDesc.GetElementSpace()); a_m_n_device_buf.ToDevice(a_m_n.mData.data()); b_n_device_buf.ToDevice(b_n.mData.data()); auto broadcastAdd = DeviceElementwiseAddInstance{}; auto argument = broadcastAdd.MakeArgumentPointer(a_m_n_device_buf.GetDeviceBuffer(), b_n_device_buf.GetDeviceBuffer(), c_m_n_device_buf.GetDeviceBuffer(), {M, N}, {Stride, 1}, {0, 1}, // broadcast in first dimension {Stride, 1}, Add{}); if(!broadcastAdd.IsSupportedArgument(argument.get())) { throw std::runtime_error("The runtime parameters seems not supported by the " "DeviceBinaryElementwise_2D instance, exiting!"); }; auto broadcastAdd_invoker_ptr = broadcastAdd.MakeInvokerPointer(); float ave_time = broadcastAdd_invoker_ptr->Run(argument.get(), StreamConfig{nullptr, time_kernel}); std::cout << "Perf: " << ave_time << " ms" << std::endl; bool pass = true; if(do_verification) { c_m_n_device_buf.FromDevice(c_m_n.mData.data()); Tensor host_c_m_n(f_host_tensor_descriptor2d(M, N, Stride)); host_broadcast2D, Tensor, Tensor, EltwiseComputeDataType, Add, 0>(host_c_m_n, a_m_n, b_n, M, N, Add{}); pass &= ck::utils::check_err( c_m_n.mData, host_c_m_n.mData, "Error: Incorrect results d1", 1e-3, 1e-3); } return pass ? 0 : 1; }