Commit 4af77e1f authored by rocking's avatar rocking
Browse files

Support 1 dimension

parent 0d26477a
add_example_executable(example_broadcast_add broadcast_add.cpp) add_example_executable(example_broadcast_add_2d broadcast_add_2d.cpp)
\ No newline at end of file add_example_executable(example_elementwise_add_1d elementwise_add_1d.cpp)
\ No newline at end of file
...@@ -67,6 +67,11 @@ int main() ...@@ -67,6 +67,11 @@ int main()
ck::index_t N = 1024; ck::index_t N = 1024;
ck::index_t Stride = 1024; ck::index_t Stride = 1024;
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({len}),
std::vector<std::size_t>({stride}));
};
auto f_host_tensor_descriptor2d = [](std::size_t row, std::size_t col, std::size_t stride) { auto f_host_tensor_descriptor2d = [](std::size_t row, std::size_t col, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({row, col}), return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
std::vector<std::size_t>({stride, 1})); std::vector<std::size_t>({stride, 1}));
...@@ -74,8 +79,7 @@ int main() ...@@ -74,8 +79,7 @@ int main()
Tensor<ABDataType> a_m_n(f_host_tensor_descriptor2d(M, N, Stride)); Tensor<ABDataType> a_m_n(f_host_tensor_descriptor2d(M, N, Stride));
Tensor<ABDataType> b_n(std::vector<std::size_t>({static_cast<std::size_t>(N)}), Tensor<ABDataType> b_n(f_host_tensor_descriptor1d(N, 1));
std::vector<std::size_t>({1}));
Tensor<CDataType> c_m_n(f_host_tensor_descriptor2d(M, N, Stride)); Tensor<CDataType> c_m_n(f_host_tensor_descriptor2d(M, N, Stride));
......
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include <math.h>
#include "check_err.hpp"
#include "config.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "device_tensor.hpp"
#include "binary_element_wise_operation.hpp"
#include "device_binary_elementwise.hpp"
using F16 = ck::half_t;
using F32 = float;
using ABDataType = F16;
using CDataType = F16;
using EltwiseComputeDataType = F32;
using Add = ck::tensor_operation::binary_element_wise::Add;
using DeviceElementwiseAddInstance = ck::tensor_operation::device::
DeviceBinaryElementwise<F16, F16, CDataType, EltwiseComputeDataType, Add, 1, 8>;
template <typename HostTensorA,
typename HostTensorB,
typename HostTensorC,
typename ComputeDataType,
typename Functor,
int broadcastDim>
void host_elementwise1D(
HostTensorC& C, const HostTensorA& A, const HostTensorB& B, int M, Functor functor)
{
for(int m = 0; m < M; ++m)
{
ComputeDataType Am = static_cast<ComputeDataType>(A(m));
ComputeDataType Bm = static_cast<ComputeDataType>(B(m));
ComputeDataType Cm = 0;
functor(Cm, Am, Bm);
C(m) = static_cast<ComputeDataType>(Cm);
}
}
int main()
{
bool do_verification = true;
bool time_kernel = false;
ck::index_t M = 1024;
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
return HostTensorDescriptor(std::vector<std::size_t>({len}),
std::vector<std::size_t>({stride}));
};
Tensor<ABDataType> a_m(f_host_tensor_descriptor1d(M, 1));
Tensor<ABDataType> b_m(f_host_tensor_descriptor1d(M, 1));
Tensor<ABDataType> c_m(f_host_tensor_descriptor1d(M, 1));
a_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
b_m.GenerateTensorValue(GeneratorTensor_3<ABDataType>{0.0, 1.0});
DeviceMem a_m_device_buf(sizeof(ABDataType) * a_m.mDesc.GetElementSpace());
DeviceMem b_m_device_buf(sizeof(ABDataType) * b_m.mDesc.GetElementSpace());
DeviceMem c_m_device_buf(sizeof(CDataType) * c_m.mDesc.GetElementSpace());
a_m_device_buf.ToDevice(a_m.mData.data());
b_m_device_buf.ToDevice(b_m.mData.data());
auto broadcastAdd = DeviceElementwiseAddInstance{};
auto argument = broadcastAdd.MakeArgumentPointer(a_m_device_buf.GetDeviceBuffer(),
b_m_device_buf.GetDeviceBuffer(),
c_m_device_buf.GetDeviceBuffer(),
{M},
{1},
{1},
{1},
Add{},
256);
if(!broadcastAdd.IsSupportedArgument(argument.get()))
{
throw std::runtime_error("The runtime parameters seems not supported by the "
"DeviceBinaryElementwise_2D instance, exiting!");
};
auto broadcastAdd_invoker_ptr = broadcastAdd.MakeInvokerPointer();
float ave_time =
broadcastAdd_invoker_ptr->Run(argument.get(), StreamConfig{nullptr, time_kernel});
std::cout << "Perf: " << ave_time << " ms" << std::endl;
bool pass = true;
if(do_verification)
{
c_m_device_buf.FromDevice(c_m.mData.data());
Tensor<CDataType> host_c_m(f_host_tensor_descriptor1d(M, 1));
host_elementwise1D<Tensor<ABDataType>,
Tensor<ABDataType>,
Tensor<CDataType>,
EltwiseComputeDataType,
Add,
0>(host_c_m, a_m, b_m, M, Add{});
pass &= ck::utils::check_err(
c_m.mData, host_c_m.mData, "Error: Incorrect results d1", 1e-3, 1e-3);
}
return pass ? 0 : 1;
}
...@@ -21,6 +21,27 @@ struct DeviceBinaryElementwise : public BaseOperator ...@@ -21,6 +21,27 @@ struct DeviceBinaryElementwise : public BaseOperator
{ {
static constexpr auto I0 = Number<0>{}; static constexpr auto I0 = Number<0>{};
static auto MakeDescriptor_M0_1d(const std::vector<int>& shape,
const std::vector<int>& stride,
index_t gridSize,
index_t threadPerBlock)
{
// 1d desc - [m]
const auto desc_m0 =
make_naive_tensor_descriptor(make_tuple(shape[0]), make_tuple(stride[0]));
// pad
const auto m0 = desc_m0.GetLength(I0);
const index_t loop_step = gridSize * threadPerBlock * ScalarPerVector;
const auto pad = math::integer_least_multiple(m0, loop_step) - m0;
const auto desc_m0_pad =
transform_tensor_descriptor(desc_m0,
make_tuple(make_right_pad_transform(m0, pad)),
make_tuple(Sequence<0>{}),
make_tuple(Sequence<0>{}));
return desc_m0_pad;
}
static auto MakeDescriptor_M0_2d(const std::vector<int>& shape, static auto MakeDescriptor_M0_2d(const std::vector<int>& shape,
const std::vector<int>& stride, const std::vector<int>& stride,
index_t gridSize, index_t gridSize,
...@@ -57,7 +78,9 @@ struct DeviceBinaryElementwise : public BaseOperator ...@@ -57,7 +78,9 @@ struct DeviceBinaryElementwise : public BaseOperator
index_t gridSize, index_t gridSize,
index_t threadPerBlock) index_t threadPerBlock)
{ {
if constexpr(Dim == 2) if constexpr(Dim == 1)
return MakeDescriptor_M0_1d(shape, stride, gridSize, threadPerBlock);
else if constexpr(Dim == 2)
return MakeDescriptor_M0_2d(shape, stride, gridSize, threadPerBlock); return MakeDescriptor_M0_2d(shape, stride, gridSize, threadPerBlock);
else else
return make_naive_tensor_descriptor(make_tuple(0), make_tuple(0)); return make_naive_tensor_descriptor(make_tuple(0), make_tuple(0));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment