Commit 506df423 authored by Chao Liu's avatar Chao Liu
Browse files

refactor

parent b6bfde53
......@@ -2,9 +2,9 @@
#define CK_DUMMY_DYNAMIC_TRANSFORM_V1_HPP
#include "common_header.hpp"
#include "dynamic_tensor_descriptor.hpp"
#include "dynamic_tensor_descriptor_helper.hpp"
#include "dynamic_tensor_coordinate.hpp"
#include "dynamic_tensor_descriptor_v1.hpp"
#include "dynamic_tensor_descriptor_helper_v1.hpp"
#include "dynamic_tensor_coordinate_v1.hpp"
namespace ck {
......@@ -565,7 +565,7 @@ struct DummyDynamicTransform_v1
const index_t niter = p_wei_global[10];
auto in_gemmk_gemmn_coord =
make_dynamic_tensor_coordinate<2>(in_gemmk_gemmn_global_desc, idx);
make_dynamic_tensor_coordinate_v1(in_gemmk_gemmn_global_desc, idx);
for(index_t iter = 0; iter < niter; ++iter)
{
......
......@@ -2,20 +2,20 @@
#define CK_DUMMY_DYNAMIC_TRANSFORM_V2_HPP
#include "common_header.hpp"
#include "dynamic_tensor_descriptor_v2.hpp"
#include "dynamic_tensor_descriptor_helper_v2.hpp"
#include "dynamic_tensor_descriptor.hpp"
#include "dynamic_tensor_descriptor_helper.hpp"
namespace ck {
template <typename... Wei, typename... In, typename... Out>
__host__ __device__ constexpr auto map_convolution_into_gemm_fwd_v4r4(
const DynamicTensorDescriptor_v2<Wei...>& wei_k_c_y_x_global_desc,
const DynamicTensorDescriptor_v2<In...>& in_n_c_hi_wi_global_desc,
const DynamicTensorDescriptor_v2<Out...>& out_n_k_ho_wo_global_desc,
const MultiIndex<2> conv_strides,
const MultiIndex<2> conv_dilations,
const MultiIndex<2> in_left_pads,
const MultiIndex<2> in_right_pads)
__host__ __device__ constexpr auto
map_convolution_into_gemm_fwd_v4r4(const DynamicTensorDescriptor<Wei...>& wei_k_c_y_x_global_desc,
const DynamicTensorDescriptor<In...>& in_n_c_hi_wi_global_desc,
const DynamicTensorDescriptor<Out...>& out_n_k_ho_wo_global_desc,
const MultiIndex<2> conv_strides,
const MultiIndex<2> conv_dilations,
const MultiIndex<2> in_left_pads,
const MultiIndex<2> in_right_pads)
{
constexpr auto I0 = Number<0>{};
constexpr auto I1 = Number<1>{};
......@@ -47,8 +47,8 @@ __host__ __device__ constexpr auto map_convolution_into_gemm_fwd_v4r4(
const index_t InRightPadW = in_right_pads[I1];
// input tensor
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor_v2(
transform_dynamic_tensor_descriptor_v2(
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor(
transform_dynamic_tensor_descriptor(
in_n_c_hi_wi_global_desc,
make_tuple(DynamicPassThrough{N},
DynamicPassThrough{C},
......@@ -66,7 +66,7 @@ __host__ __device__ constexpr auto map_convolution_into_gemm_fwd_v4r4(
const index_t Hip = in_n_c_hip_wip_global_desc.GetLength(I2);
const index_t Wip = in_n_c_hip_wip_global_desc.GetLength(I3);
const auto in_n_c_y_ho_x_wo_global_desc = transform_dynamic_tensor_descriptor_v2(
const auto in_n_c_y_ho_x_wo_global_desc = transform_dynamic_tensor_descriptor(
in_n_c_hip_wip_global_desc,
make_tuple(
DynamicPassThrough{N},
......@@ -76,7 +76,7 @@ __host__ __device__ constexpr auto map_convolution_into_gemm_fwd_v4r4(
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}),
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2, 3>{}, Sequence<4, 5>{}));
const auto in_gemmktotal_gemmn_global_desc = transform_dynamic_tensor_descriptor_v2(
const auto in_gemmktotal_gemmn_global_desc = transform_dynamic_tensor_descriptor(
in_n_c_y_ho_x_wo_global_desc,
make_tuple(DynamicMerge<3>{make_multi_index(C, Y, X)},
DynamicMerge<3>{make_multi_index(N, Ho, Wo)}),
......@@ -89,7 +89,7 @@ __host__ __device__ constexpr auto map_convolution_into_gemm_fwd_v4r4(
constexpr index_t GemmKPack = 8;
const index_t gemmk = gemmktotal / GemmKPack;
const auto in_gemmk_gemmn_gemmkpack_global_desc = transform_dynamic_tensor_descriptor_v2(
const auto in_gemmk_gemmn_gemmkpack_global_desc = transform_dynamic_tensor_descriptor(
in_gemmktotal_gemmn_global_desc,
make_tuple(DynamicUnMerge<2>{make_multi_index(gemmk, GemmKPack)},
DynamicPassThrough{gemmn}),
......@@ -105,9 +105,9 @@ __host__ __device__ constexpr auto map_convolution_into_gemm_fwd_v4r4(
#if 0
template <typename... Wei, typename... In, typename... Out>
__host__ __device__ constexpr auto map_convolution_into_gemm_bwd_v4r1(
const DynamicTensorDescriptor_v2<Wei...>& wei_k_c_y_x_global_desc,
const DynamicTensorDescriptor_v2<In...>& in_n_c_hi_wi_global_desc,
const DynamicTensorDescriptor_v2<Out...>& out_n_k_ho_wo_global_desc,
const DynamicTensorDescriptor<Wei...>& wei_k_c_y_x_global_desc,
const DynamicTensorDescriptor<In...>& in_n_c_hi_wi_global_desc,
const DynamicTensorDescriptor<Out...>& out_n_k_ho_wo_global_desc,
const MultiIndex<2> conv_strides,
const MultiIndex<2> conv_dilations,
const MultiIndex<2> in_left_pads,
......@@ -148,7 +148,7 @@ __host__ __device__ constexpr auto map_convolution_into_gemm_bwd_v4r1(
constexpr bool out_skip_out_of_bound_check = true;
#endif
constexpr auto out_n_k_ydot_htilda_xdot_wtilda_global_desc = transform_tensor_descriptor_v2(
constexpr auto out_n_k_ydot_htilda_xdot_wtilda_global_desc = transform_tensor_descriptor(
out_n_k_ho_wo_global_desc,
make_tuple(PassThrough{N},
PassThrough{K},
......@@ -158,7 +158,7 @@ __host__ __device__ constexpr auto map_convolution_into_gemm_bwd_v4r1(
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2, 3>{}, Sequence<4, 5>{}));
constexpr auto out_n_k_ydot_htildaslice_xdot_wtildaslice_global_desc =
transform_tensor_descriptor_v2(
transform_tensor_descriptor(
out_n_k_ydot_htilda_xdot_wtilda_global_desc,
make_tuple(PassThrough{N},
PassThrough{K},
......@@ -230,15 +230,14 @@ struct DummyDynamicTransform_v2_1
const index_t niter = p_wei_global[10];
auto in_gemmk_gemmn_coord =
make_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_global_desc, idx);
auto in_gemmk_gemmn_coord = make_dynamic_tensor_coordinate(in_gemmk_gemmn_global_desc, idx);
const auto in_gemmk_gemmn_coord_step = make_dynamic_tensor_coordinate_step_v2(
in_gemmk_gemmn_global_desc, make_multi_index(1, 0));
const auto in_gemmk_gemmn_coord_step =
make_dynamic_tensor_coordinate_step(in_gemmk_gemmn_global_desc, make_multi_index(1, 0));
for(index_t iter = 0; iter < niter; ++iter)
{
move_dynamic_tensor_coordinate_v2(
move_dynamic_tensor_coordinate(
in_gemmk_gemmn_global_desc, in_gemmk_gemmn_coord, in_gemmk_gemmn_coord_step);
// write
......@@ -308,7 +307,7 @@ struct DummyDynamicTransform_v2_1
const index_t InRightPadW = in_right_pads[i1];
#if 0
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor_v2(
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor(
move(in_n_c_hi_wi_global_desc),
make_tuple(DynamicPassThrough{N},
DynamicPassThrough{C},
......@@ -317,7 +316,7 @@ struct DummyDynamicTransform_v2_1
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}),
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}));
#elif 0
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor_v2(
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor(
move(in_n_c_hi_wi_global_desc),
make_tuple(DynamicPassThrough{N},
DynamicPassThrough{C},
......@@ -326,8 +325,8 @@ struct DummyDynamicTransform_v2_1
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}),
make_tuple(Sequence<0>{}, Sequence<1>{}, Sequence<2>{}, Sequence<3>{}));
#else
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor_v2(
transform_dynamic_tensor_descriptor_v2(
const auto in_n_c_hip_wip_global_desc = transform_dynamic_tensor_descriptor(
transform_dynamic_tensor_descriptor(
move(in_n_c_hi_wi_global_desc),
make_tuple(DynamicPassThrough{N},
DynamicPassThrough{C},
......@@ -351,14 +350,14 @@ struct DummyDynamicTransform_v2_1
#if 1
const index_t niter = p_wei_global[10];
auto in_coord = make_dynamic_tensor_coordinate_v2(in_n_c_hip_wip_global_desc, idx);
auto in_coord = make_dynamic_tensor_coordinate(in_n_c_hip_wip_global_desc, idx);
const auto in_coord_step = make_dynamic_tensor_coordinate_step_v2(
const auto in_coord_step = make_dynamic_tensor_coordinate_step(
in_n_c_hip_wip_global_desc, make_multi_index(1, 0, 0, 0));
for(index_t iter = 0; iter < niter; ++iter)
{
move_dynamic_tensor_coordinate_v2(in_n_c_hip_wip_global_desc, in_coord, in_coord_step);
move_dynamic_tensor_coordinate(in_n_c_hip_wip_global_desc, in_coord, in_coord_step);
// write
float value = 1;
......@@ -381,7 +380,7 @@ struct DummyDynamicTransform_v2_1
}
#else
// write
// auto in_coord = make_dynamic_tensor_coordinate_v2(in_n_c_hi_wi_global_desc, idx);
// auto in_coord = make_dynamic_tensor_coordinate(in_n_c_hi_wi_global_desc, idx);
p_out_global[in_n_c_hip_wip_global_desc.CalculateOffset(idx)] = 1;
#endif
......@@ -429,26 +428,23 @@ struct DummyDynamicTransform_v2_fwd_v4r4
const index_t niter = p_wei_global[10];
auto in_gemmk_gemmn_gemmkpack_coord =
make_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_gemmkpack_global_desc, idx);
make_dynamic_tensor_coordinate(in_gemmk_gemmn_gemmkpack_global_desc, idx);
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_0_1 =
make_dynamic_tensor_coordinate_step_v2(in_gemmk_gemmn_gemmkpack_global_desc,
make_multi_index(0, 0, 1));
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_0_1 = make_dynamic_tensor_coordinate_step(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(0, 0, 1));
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_1_0 =
make_dynamic_tensor_coordinate_step_v2(in_gemmk_gemmn_gemmkpack_global_desc,
make_multi_index(0, 1, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_1_0 = make_dynamic_tensor_coordinate_step(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(0, 1, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_1_0_0 =
make_dynamic_tensor_coordinate_step_v2(in_gemmk_gemmn_gemmkpack_global_desc,
make_multi_index(1, 0, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_1_0_0 = make_dynamic_tensor_coordinate_step(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(1, 0, 0));
// move (0, 0, 1)
for(index_t iter = 0; iter < niter; ++iter)
{
move_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_0_1);
move_dynamic_tensor_coordinate(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_0_1);
// write
float value = 1;
......@@ -478,9 +474,9 @@ struct DummyDynamicTransform_v2_fwd_v4r4
// move (0, 1, 0)
for(index_t iter = 0; iter < niter; ++iter)
{
move_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_1_0);
move_dynamic_tensor_coordinate(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_1_0);
// write
float value = 1;
......@@ -510,9 +506,9 @@ struct DummyDynamicTransform_v2_fwd_v4r4
// move (1, 0, 0)
for(index_t iter = 0; iter < niter; ++iter)
{
move_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_1_0_0);
move_dynamic_tensor_coordinate(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_1_0_0);
// write
float value = 1;
......
#ifndef CK_DYNAMIC_TENSOR_COORDINATE_HPP
#define CK_DYNAMIC_TENSOR_COORDINATE_HPP
#ifndef CK_DYNAMIC_TENSOR_COORDINATE_V1_HPP
#define CK_DYNAMIC_TENSOR_COORDINATE_V1_HPP
#include "common_header.hpp"
#include "dynamic_tensor_descriptor.hpp"
#include "dynamic_tensor_descriptor_v1.hpp"
namespace ck {
......@@ -19,20 +19,20 @@ namespace ck {
// 1. Given step size in each dimension, update itself, or return a new tensor cooridnate, so user
// can freely move the "point of location" inside the tensor
// wrapper class for DynamicNativeTensorCoordinate and DynamicTransformedTensorCoordinate
// wrapper class for DynamicNativeTensorCoordinate_v1 and DynamicTransformedTensorCoordinate_v1
template <typename TensorDesc>
struct DynamicTensorCoordinate;
struct DynamicTensorCoordinate_v1;
// tensor coordinate for native tensor
template <typename TensorDesc>
struct DynamicNativeTensorCoordinate
struct DynamicNativeTensorCoordinate_v1
{
using type = DynamicNativeTensorCoordinate;
using type = DynamicNativeTensorCoordinate_v1;
using tensor_desc_type = TensorDesc;
static constexpr index_t NDim = tensor_desc_type::GetNumOfDimension();
using Index = MultiIndex<NDim>;
__host__ __device__ explicit constexpr DynamicNativeTensorCoordinate(
__host__ __device__ explicit constexpr DynamicNativeTensorCoordinate_v1(
const tensor_desc_type& tensor_desc, const Index& idx)
: tensor_desc_{tensor_desc}, idx_{idx}, offset_{tensor_desc.CalculateOffset(idx)}
{
......@@ -118,17 +118,17 @@ struct DynamicNativeTensorCoordinate
// tensor coordinate for transformed tensor
template <typename TensorDesc>
struct DynamicTransformedTensorCoordinate
struct DynamicTransformedTensorCoordinate_v1
{
static constexpr index_t NDimUp = TensorDesc::GetNumOfDimension();
using UpperDesc = TensorDesc;
using UpperCoord = DynamicTransformedTensorCoordinate;
using UpperCoord = DynamicTransformedTensorCoordinate_v1;
using UpperIndex = MultiIndex<NDimUp>;
using LowerDesc = typename UpperDesc::LowerDesc;
using LowerCoord = typename DynamicTensorCoordinate<LowerDesc>::type;
using LowerCoord = typename DynamicTensorCoordinate_v1<LowerDesc>::type;
__host__ __device__ explicit constexpr DynamicTransformedTensorCoordinate(
__host__ __device__ explicit constexpr DynamicTransformedTensorCoordinate_v1(
const UpperDesc& tensor_desc_up, const UpperIndex& idx_up)
: tensor_desc_up_{tensor_desc_up},
idx_up_{idx_up},
......@@ -240,30 +240,32 @@ struct DynamicTransformedTensorCoordinate
template <index_t NDim>
__host__ __device__ constexpr auto
make_dynamic_tensor_coordinate(const DynamicNativeTensorDescriptor<NDim>& tensor_desc,
const MultiIndex<NDim>& idx)
make_dynamic_tensor_coordinate_v1(const DynamicNativeTensorDescriptor_v1<NDim>& tensor_desc,
const MultiIndex<NDim>& idx)
{
return DynamicNativeTensorCoordinate<DynamicNativeTensorDescriptor<NDim>>{tensor_desc, idx};
return DynamicNativeTensorCoordinate_v1<DynamicNativeTensorDescriptor_v1<NDim>>{tensor_desc,
idx};
}
template <index_t NDim, typename... Ts>
__host__ __device__ constexpr auto
make_dynamic_tensor_coordinate(const DynamicTransformedTensorDescriptor<Ts...>& tensor_desc,
const MultiIndex<NDim>& idx)
make_dynamic_tensor_coordinate_v1(const DynamicTransformedTensorDescriptor_v1<Ts...>& tensor_desc,
const MultiIndex<NDim>& idx)
{
static_assert(DynamicTransformedTensorDescriptor<Ts...>::GetNumOfDimension() == NDim,
static_assert(DynamicTransformedTensorDescriptor_v1<Ts...>::GetNumOfDimension() == NDim,
"wrong! inconsistent # of dimensions");
return DynamicTransformedTensorCoordinate<DynamicTransformedTensorDescriptor<Ts...>>{
return DynamicTransformedTensorCoordinate_v1<DynamicTransformedTensorDescriptor_v1<Ts...>>{
tensor_desc, idx};
}
template <typename TensorDesc>
struct DynamicTensorCoordinate
struct DynamicTensorCoordinate_v1
{
static constexpr index_t NDim = TensorDesc::GetNumOfDimension();
using type = decltype(make_dynamic_tensor_coordinate<NDim>(TensorDesc{}, MultiIndex<NDim>{}));
using type =
decltype(make_dynamic_tensor_coordinate_v1<NDim>(TensorDesc{}, MultiIndex<NDim>{}));
};
} // namespace ck
......
......@@ -6,29 +6,46 @@
namespace ck {
template <typename Lengths, typename Strides>
__host__ __device__ constexpr auto make_dynamic_native_tensor_descriptor(const Lengths& lengths,
const Strides& strides)
template <index_t N>
__host__ __device__ constexpr auto
make_dynamic_native_tensor_descriptor_packed(const MultiIndex<N>& lengths)
{
static_assert(Lengths::Size() == Strides::Size(), "wrong! Size not the same");
return DynamicNativeTensorDescriptor<Lengths::Size()>(lengths, strides);
const auto transforms = make_tuple(DynamicUnMerge<N>{lengths});
constexpr auto low_dim_hidden_idss = make_tuple(Sequence<0>{});
constexpr auto up_dim_hidden_idss =
make_tuple(typename arithmetic_sequence_gen<1, N + 1, 1>::type{});
constexpr auto visible_dim_hidden_ids = typename arithmetic_sequence_gen<1, N + 1, 1>::type{};
const index_t element_space_size =
container_reduce(lengths, math::multiplies<index_t>{}, index_t{1});
return DynamicTensorDescriptor<decltype(transforms),
decltype(low_dim_hidden_idss),
decltype(up_dim_hidden_idss),
decltype(visible_dim_hidden_ids)>{transforms,
element_space_size};
}
template <typename LowTensorDescriptor,
typename Transforms,
typename LowDimensionIds,
typename UpDimensionIds>
template <index_t N>
__host__ __device__ constexpr auto
transform_dynamic_tensor_descriptor(const LowTensorDescriptor& low_tensor_desc,
const Transforms& transforms,
LowDimensionIds,
UpDimensionIds)
make_dynamic_native_tensor_descriptor(const MultiIndex<N>& lengths, const MultiIndex<N>& strides)
{
return DynamicTransformedTensorDescriptor<LowTensorDescriptor,
Transforms,
LowDimensionIds,
UpDimensionIds>{low_tensor_desc, transforms};
const auto transforms = make_tuple(DynamicEmbed<N>{lengths, strides});
constexpr auto low_dim_hidden_idss = make_tuple(Sequence<0>{});
constexpr auto up_dim_hidden_idss =
make_tuple(typename arithmetic_sequence_gen<1, N + 1, 1>::type{});
constexpr auto visible_dim_hidden_ids = typename arithmetic_sequence_gen<1, N + 1, 1>::type{};
index_t element_space_size = 1;
static_for<0, N, 1>{}([&](auto i) { element_space_size += (lengths[i] - 1) * strides[i]; });
return DynamicTensorDescriptor<decltype(transforms),
decltype(low_dim_hidden_idss),
decltype(up_dim_hidden_idss),
decltype(visible_dim_hidden_ids)>{transforms,
element_space_size};
}
} // namespace ck
......
#ifndef CK_DYNAMIC_TENSOR_DESCRIPTOR_HELPER_V1_HPP
#define CK_DYNAMIC_TENSOR_DESCRIPTOR_HELPER_V1_HPP
#include "common_header.hpp"
#include "dynamic_tensor_descriptor_v1.hpp"
namespace ck {
template <typename Lengths, typename Strides>
__host__ __device__ constexpr auto make_dynamic_native_tensor_descriptor_v1(const Lengths& lengths,
const Strides& strides)
{
static_assert(Lengths::Size() == Strides::Size(), "wrong! Size not the same");
return DynamicNativeTensorDescriptor_v1<Lengths::Size()>(lengths, strides);
}
template <typename LowTensorDescriptor,
typename Transforms,
typename LowDimensionIds,
typename UpDimensionIds>
__host__ __device__ constexpr auto
transform_dynamic_tensor_descriptor_v1(const LowTensorDescriptor& low_tensor_desc,
const Transforms& transforms,
LowDimensionIds,
UpDimensionIds)
{
return DynamicTransformedTensorDescriptor_v1<LowTensorDescriptor,
Transforms,
LowDimensionIds,
UpDimensionIds>{low_tensor_desc, transforms};
}
} // namespace ck
#endif
#ifndef CK_DYNAMIC_TENSOR_DESCRIPTOR_HELPER_V2_HPP
#define CK_DYNAMIC_TENSOR_DESCRIPTOR_HELPER_V2_HPP
#include "common_header.hpp"
#include "dynamic_tensor_descriptor_v2.hpp"
namespace ck {
template <index_t N>
__host__ __device__ constexpr auto
make_dynamic_native_tensor_descriptor_packed_v2(const MultiIndex<N>& lengths)
{
const auto transforms = make_tuple(DynamicUnMerge<N>{lengths});
constexpr auto low_dim_hidden_idss = make_tuple(Sequence<0>{});
constexpr auto up_dim_hidden_idss =
make_tuple(typename arithmetic_sequence_gen<1, N + 1, 1>::type{});
constexpr auto visible_dim_hidden_ids = typename arithmetic_sequence_gen<1, N + 1, 1>::type{};
const index_t element_space_size =
container_reduce(lengths, math::multiplies<index_t>{}, index_t{1});
return DynamicTensorDescriptor_v2<decltype(transforms),
decltype(low_dim_hidden_idss),
decltype(up_dim_hidden_idss),
decltype(visible_dim_hidden_ids)>{transforms,
element_space_size};
}
template <index_t N>
__host__ __device__ constexpr auto
make_dynamic_native_tensor_descriptor_v2(const MultiIndex<N>& lengths, const MultiIndex<N>& strides)
{
const auto transforms = make_tuple(DynamicEmbed<N>{lengths, strides});
constexpr auto low_dim_hidden_idss = make_tuple(Sequence<0>{});
constexpr auto up_dim_hidden_idss =
make_tuple(typename arithmetic_sequence_gen<1, N + 1, 1>::type{});
constexpr auto visible_dim_hidden_ids = typename arithmetic_sequence_gen<1, N + 1, 1>::type{};
index_t element_space_size = 1;
static_for<0, N, 1>{}([&](auto i) { element_space_size += (lengths[i] - 1) * strides[i]; });
return DynamicTensorDescriptor_v2<decltype(transforms),
decltype(low_dim_hidden_idss),
decltype(up_dim_hidden_idss),
decltype(visible_dim_hidden_ids)>{transforms,
element_space_size};
}
} // namespace ck
#endif
#ifndef CK_DYNAMIC_TENSOR_DESCRIPTOR_V1_HPP
#define CK_DYNAMIC_TENSOR_DESCRIPTOR_V1_HPP
#include "common_header.hpp"
#include "dynamic_multi_index_transform.hpp"
namespace ck {
template <index_t NDim>
struct DynamicNativeTensorDescriptor_v1
{
using Index = MultiIndex<NDim>;
const Index lengths_;
const Index strides_;
__host__ __device__ explicit constexpr DynamicNativeTensorDescriptor_v1(const Index& lengths,
const Index& strides)
: lengths_{lengths}, strides_{strides}
{
}
__host__ __device__ explicit constexpr DynamicNativeTensorDescriptor_v1()
: lengths_{make_zero_multi_index<NDim>()}, strides_{make_zero_multi_index<NDim>()}
{
}
__host__ __device__ static constexpr index_t GetNumOfDimension() { return NDim; }
__host__ __device__ constexpr auto GetLengths() const { return lengths_; }
__host__ __device__ constexpr auto GetStrides() const { return strides_; }
template <index_t IDim>
__host__ __device__ constexpr index_t GetLength(Number<IDim>) const
{
return lengths_[Number<IDim>{}];
}
template <index_t IDim>
__host__ __device__ constexpr index_t GetStride(Number<IDim>) const
{
return strides_[Number<IDim>{}];
}
__host__ __device__ constexpr index_t GetElementSize() const
{
return container_reduce(GetLengths(), math::multiplies<index_t>{}, index_t{1});
}
__host__ __device__ constexpr index_t GetElementSpace() const
{
index_t space = 1;
static_for<0, NDim, 1>{}([&](auto i) { space += (GetLength(i) - 1) * GetStride(i); });
return space;
}
template <typename Idx>
__host__ __device__ constexpr index_t CalculateOffset(const Idx& idx) const
{
index_t offset = 0;
static_for<0, NDim, 1>{}([&](auto i) { offset += idx[i] * GetStride(i); });
return offset;
}
template <typename IdxDiff>
__host__ __device__ constexpr index_t CalculateOffsetDiff(const IdxDiff& idx_diff) const
{
return CalculateOffset(idx_diff);
}
template <typename Idx>
__host__ __device__ constexpr bool IsUpperIndexValid(const Idx& idx) const
{
bool flag = true;
static_for<0, NDim, 1>{}(
[&](auto i) { flag = flag && idx[i] >= 0 && idx[i] < GetLength(i); });
return flag;
}
};
template <typename LowTensorDescriptor, // DynamicNativeTensorDescriptor_v1 or
// DynamicTransformedTensorDescriptor_v1
typename Transforms, // Tuple<MultIndexTransforms...>
typename LowDimensionIds, // Tuple<Sequence<...>>
typename UpDimensionIds> // Tuple<Sequence<...>>
struct DynamicTransformedTensorDescriptor_v1
{
using LowerDesc = LowTensorDescriptor;
using UpperDesc = DynamicTransformedTensorDescriptor_v1;
static constexpr index_t NTransform = Transforms::Size();
const LowerDesc low_tensor_desc_;
const Transforms transforms_;
__host__ __device__ static constexpr index_t GetNumOfLowerDimension()
{
return LowerDesc::GetNumOfDimension();
}
__host__ __device__ static constexpr index_t GetNumOfUpperDimension()
{
index_t ndim_up = 0;
static_for<0, NTransform, 1>{}([&](auto i) constexpr {
constexpr auto tmp = UpDimensionIds{}.At(i);
ndim_up += decltype(tmp)::Size();
});
return ndim_up;
}
static constexpr index_t NDimUp = GetNumOfUpperDimension();
static constexpr index_t NDimLow = GetNumOfLowerDimension();
using UpperIndex = MultiIndex<NDimUp>;
using LowerIndex = MultiIndex<NDimLow>;
struct lambda_merge_sequences
{
template <typename... Xs>
__host__ __device__ constexpr auto operator()(Xs... xs) const
{
return merge_sequences(xs...);
}
};
struct lambda_merge_arrays
{
template <typename... Xs>
__host__ __device__ constexpr auto operator()(Xs... xs) const
{
return container_cat(xs...);
}
};
__host__ __device__ explicit constexpr DynamicTransformedTensorDescriptor_v1(
const LowerDesc& low_tensor_desc, const Transforms& transforms)
: low_tensor_desc_{low_tensor_desc}, transforms_{transforms}
{
static_assert(NTransform == Transforms::Size() && NTransform == LowDimensionIds::Size() &&
NTransform == UpDimensionIds::Size(),
"wrong! # of transformations not the same");
// sanity check:
// LowDimensionIds should include all low-dimensions,
// UpDimensionIds should include all up-dimensions
using unsorted_up_dimension_ids =
decltype(unpack(lambda_merge_sequences{}, UpDimensionIds{}));
using sorted_up_dimension_ids =
typename sequence_sort<unsorted_up_dimension_ids, math::less<index_t>>::type;
static_assert(sorted_up_dimension_ids::Size() == NDimUp &&
is_valid_sequence_map<sorted_up_dimension_ids>{},
"wrong! UpDimensionIds is not configured correctly");
using unsorted_low_dimension_ids =
decltype(unpack(lambda_merge_sequences{}, LowDimensionIds{}));
using sorted_low_dimension_ids =
typename sequence_sort<unsorted_low_dimension_ids, math::less<index_t>>::type;
static_assert(sorted_low_dimension_ids::Size() == NDimLow &&
is_valid_sequence_map<sorted_low_dimension_ids>{},
"wrong! LowDimensionIds is not configured correctly");
// TODO: sanity check: while a up-dimension could be associated with
// multille
// transformation, a low-dimension should be associated with only one
// transformation
// TODO: sanity-check: GetLowerLengths of each transform should be
// consistent with lengths
// of lower-tensor-descriptor
}
__host__ __device__ explicit constexpr DynamicTransformedTensorDescriptor_v1()
: low_tensor_desc_{}, transforms_{}
{
}
__host__ __device__ static constexpr index_t GetNumOfDimension()
{
return GetNumOfUpperDimension();
}
__host__ __device__ constexpr auto GetUpperLengths() const
{
// sort upper-dimension-ids
constexpr auto unsorted_up_dimension_ids =
unpack(lambda_merge_sequences{}, UpDimensionIds{});
using sort_up_dimension_ids = sequence_unique_sort<decltype(unsorted_up_dimension_ids),
math::less<index_t>,
math::equal<index_t>>;
constexpr auto sorted2unsorted_map = typename sort_up_dimension_ids::sorted2unsorted_map{};
// sort upper-lengths
const auto tuple_of_up_lengths =
transform_tuples([](const auto& tran) constexpr { return tran.GetUpperLengths(); },
transforms_);
const auto unsorted_up_lengths = unpack(lambda_merge_arrays{}, tuple_of_up_lengths);
const auto sorted_up_lengths =
container_reorder_given_new2old(unsorted_up_lengths, sorted2unsorted_map);
return sorted_up_lengths;
}
__host__ __device__ constexpr auto GetLengths() const { return GetUpperLengths(); }
template <index_t IDim>
__host__ __device__ constexpr index_t GetLength(Number<IDim>) const
{
return GetLengths()[Number<IDim>{}];
}
__host__ __device__ constexpr index_t GetElementSize() const
{
return container_reduce(GetLengths(), math::multiplies<index_t>{}, index_t{1});
}
__host__ __device__ constexpr index_t GetElementSpace() const
{
return low_tensor_desc_.GetElementSpace();
}
__host__ __device__ constexpr auto GetLowerTensorDescriptor() const { return low_tensor_desc_; }
template <typename LowIdx, typename UpIdx>
__host__ __device__ void CalculateLowerIndex(LowIdx& idx_low, const UpIdx& idx_up) const
{
static_for<0, NTransform, 1>{}([&](auto itran) constexpr {
const auto tran = transforms_.At(itran);
const auto idx_up_part = pick_container_element(idx_up, UpDimensionIds{}.At(itran));
auto idx_low_part = pick_container_element(idx_low, LowDimensionIds{}.At(itran));
tran.CalculateLowerIndex(idx_low_part, idx_up_part);
});
}
template <typename LowIdxDiff, typename UpIdxDiff, typename LowIdx, typename UpIdx>
__host__ __device__ void CalculateLowerIndexDiff(LowIdxDiff& idx_low_diff,
const UpIdxDiff& idx_up_diff,
const LowIdx& idx_low_old,
const UpIdx& idx_up_old) const
{
static_for<0, NTransform, 1>{}([&](auto itran) {
const auto tran = transforms_.At(itran);
const auto idx_up_diff_part =
pick_container_element(idx_up_diff, UpDimensionIds{}.At(itran));
const auto idx_up_old_part =
pick_container_element(idx_up_old, UpDimensionIds{}.At(itran));
const auto idx_low_old_part =
pick_container_element(idx_low_old, LowDimensionIds{}.At(itran));
auto idx_low_diff_part =
pick_container_element(idx_low_diff, LowDimensionIds{}.At(itran));
tran.CalculateLowerIndexDiff(
idx_low_diff_part, idx_up_diff_part, idx_low_old_part, idx_up_old_part);
});
}
template <typename UpIdx>
__host__ __device__ constexpr auto CalculateLowerIndex(const UpIdx& idx_up) const
{
LowerIndex idx_low;
CalculateLowerIndex(idx_low, idx_up);
return idx_low;
}
template <typename UpIdxDiff, typename LowIdx, typename UpIdx>
__host__ __device__ constexpr auto CalculateLowerIndexDiff(const UpIdxDiff& idx_up_diff,
const LowIdx& idx_low_old,
const UpIdx& idx_up_old) const
{
LowerIndex idx_low_diff;
CalculateLowerIndexDiff(idx_low_diff, idx_up_diff, idx_low_old, idx_up_old);
return idx_low_diff;
}
__host__ __device__ constexpr index_t CalculateOffset(const UpperIndex& idx_up) const
{
return low_tensor_desc_.CalculateOffset(CalculateLowerIndex(idx_up));
}
__host__ __device__ constexpr bool IsUpperIndexValid(const UpperIndex& idx_up) const
{
bool flag = true;
static_for<0, NDimUp, 1>{}(
[&](auto i) { flag = flag && idx_up[i] >= 0 && idx_up[i] < GetLength(i); });
return flag;
}
__host__ __device__ constexpr bool
IsValidUpperIndexMappedToValidLowerIndex(const UpperIndex& idx_up) const
{
bool flag = true;
static_for<0, NTransform, 1>{}([&](auto itran) {
const auto tran = Transforms{}.At(itran);
// check a indtransformation if it does not always has a valid mapping
constexpr bool is_valid_up_always_mapped_to_valid_low =
decltype(tran)::IsValidUpperIndexAlwaysMappedToValidLowerIndex();
if constexpr(!is_valid_up_always_mapped_to_valid_low)
{
const auto up_dims_part = UpDimensionIds{}.At(itran);
const auto idx_up_part = pick_container_element(idx_up, up_dims_part);
flag = flag && tran.IsValidUpperIndexMappedToValidLowerIndex(idx_up_part);
}
});
return flag;
}
};
} // namespace ck
#endif
......@@ -28,11 +28,11 @@ void device_dummy_dynamic_transform_v1(InDesc,
using TDevice = typename conditional<is_same<half_float::half, T>::value, half_t, T>::type;
const auto in_nchw_desc = make_dynamic_native_tensor_descriptor(
const auto in_nchw_desc = make_dynamic_native_tensor_descriptor_v1(
to_multi_index(InDesc::GetLengths()), to_multi_index(InDesc::GetStrides()));
const auto wei_kcyx_desc = make_dynamic_native_tensor_descriptor(
const auto wei_kcyx_desc = make_dynamic_native_tensor_descriptor_v1(
to_multi_index(WeiDesc::GetLengths()), to_multi_index(WeiDesc::GetStrides()));
const auto out_nkhw_desc = make_dynamic_native_tensor_descriptor(
const auto out_nkhw_desc = make_dynamic_native_tensor_descriptor_v1(
to_multi_index(OutDesc::GetLengths()), to_multi_index(OutDesc::GetStrides()));
const auto conv_strides = to_multi_index(ConvStrides{});
......@@ -52,7 +52,7 @@ void device_dummy_dynamic_transform_v1(InDesc,
const auto in_gemmk_gemmn_global_desc = tensor_descs.At(Number<0>{});
auto in_gemmk_gemmn_coord =
make_dynamic_tensor_coordinate<2>(in_gemmk_gemmn_global_desc, make_multi_index(0, 0));
make_dynamic_tensor_coordinate(in_gemmk_gemmn_global_desc, make_multi_index(0, 0));
for(index_t iter = 0; iter < 10; ++iter)
{
......@@ -112,9 +112,9 @@ void device_dummy_dynamic_transform_v1(InDesc,
index_t* const,
float* const,
float* const,
const DynamicNativeTensorDescriptor<4>,
const DynamicNativeTensorDescriptor<4>,
const DynamicNativeTensorDescriptor<4>,
const DynamicNativeTensorDescriptor_v1<4>,
const DynamicNativeTensorDescriptor_v1<4>,
const DynamicNativeTensorDescriptor_v1<4>,
const MultiIndex<2>,
const MultiIndex<2>,
const MultiIndex<2>,
......
......@@ -28,11 +28,11 @@ void device_dummy_dynamic_transform_v2(InDesc,
using TDevice = typename conditional<is_same<half_float::half, T>::value, half_t, T>::type;
const auto in_nchw_desc = make_dynamic_native_tensor_descriptor_v2<4>(
const auto in_nchw_desc = make_dynamic_native_tensor_descriptor<4>(
to_multi_index(InDesc::GetLengths()), to_multi_index(InDesc::GetStrides()));
const auto wei_kcyx_desc = make_dynamic_native_tensor_descriptor_v2<4>(
const auto wei_kcyx_desc = make_dynamic_native_tensor_descriptor<4>(
to_multi_index(WeiDesc::GetLengths()), to_multi_index(WeiDesc::GetStrides()));
const auto out_nkhw_desc = make_dynamic_native_tensor_descriptor_v2<4>(
const auto out_nkhw_desc = make_dynamic_native_tensor_descriptor<4>(
to_multi_index(OutDesc::GetLengths()), to_multi_index(OutDesc::GetStrides()));
const auto conv_strides = to_multi_index(ConvStrides{});
......@@ -52,12 +52,11 @@ void device_dummy_dynamic_transform_v2(InDesc,
// test on cpu
{
auto in_gemmk_gemmn_gemmkpack_coord = make_dynamic_tensor_coordinate_v2(
auto in_gemmk_gemmn_gemmkpack_coord = make_dynamic_tensor_coordinate(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(0, 0, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_0_1 =
make_dynamic_tensor_coordinate_step_v2(in_gemmk_gemmn_gemmkpack_global_desc,
make_multi_index(0, 0, 1));
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_0_1 = make_dynamic_tensor_coordinate_step(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(0, 0, 1));
print_array_v2("do_tansforms 0 0 1: ",
in_gemmk_gemmn_gemmkpack_coord_step_0_0_1.do_transforms_);
......@@ -70,19 +69,18 @@ void device_dummy_dynamic_transform_v2(InDesc,
printf("offset: %d\n", in_gemmk_gemmn_gemmkpack_coord.GetOffset());
printf("\n");
move_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_0_1);
move_dynamic_tensor_coordinate(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_0_1);
}
}
{
auto in_gemmk_gemmn_gemmkpack_coord = make_dynamic_tensor_coordinate_v2(
auto in_gemmk_gemmn_gemmkpack_coord = make_dynamic_tensor_coordinate(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(0, 0, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_1_0 =
make_dynamic_tensor_coordinate_step_v2(in_gemmk_gemmn_gemmkpack_global_desc,
make_multi_index(0, 1, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_0_1_0 = make_dynamic_tensor_coordinate_step(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(0, 1, 0));
print_array_v2("do_tansforms 0 1 0: ",
in_gemmk_gemmn_gemmkpack_coord_step_0_1_0.do_transforms_);
......@@ -95,19 +93,18 @@ void device_dummy_dynamic_transform_v2(InDesc,
printf("offset: %d\n", in_gemmk_gemmn_gemmkpack_coord.GetOffset());
printf("\n");
move_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_1_0);
move_dynamic_tensor_coordinate(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_0_1_0);
}
}
{
auto in_gemmk_gemmn_gemmkpack_coord = make_dynamic_tensor_coordinate_v2(
auto in_gemmk_gemmn_gemmkpack_coord = make_dynamic_tensor_coordinate(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(0, 0, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_1_0_0 =
make_dynamic_tensor_coordinate_step_v2(in_gemmk_gemmn_gemmkpack_global_desc,
make_multi_index(1, 0, 0));
const auto in_gemmk_gemmn_gemmkpack_coord_step_1_0_0 = make_dynamic_tensor_coordinate_step(
in_gemmk_gemmn_gemmkpack_global_desc, make_multi_index(1, 0, 0));
print_array_v2("do_tansforms 1 0 0: ",
in_gemmk_gemmn_gemmkpack_coord_step_1_0_0.do_transforms_);
......@@ -120,9 +117,9 @@ void device_dummy_dynamic_transform_v2(InDesc,
printf("offset: %d\n", in_gemmk_gemmn_gemmkpack_coord.GetOffset());
printf("\n");
move_dynamic_tensor_coordinate_v2(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_1_0_0);
move_dynamic_tensor_coordinate(in_gemmk_gemmn_gemmkpack_global_desc,
in_gemmk_gemmn_gemmkpack_coord,
in_gemmk_gemmn_gemmkpack_coord_step_1_0_0);
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment