Commit 670d2d32 authored by Bartlomiej Kocot's avatar Bartlomiej Kocot
Browse files

tmp

parent f741895f
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#pragma once #pragma once
#include "ck/wrapper/layout_utils.hpp" #include "ck/wrapper/utils/layout_utils.hpp"
namespace ck { namespace ck {
namespace wrapper { namespace wrapper {
...@@ -232,6 +232,8 @@ struct Layout ...@@ -232,6 +232,8 @@ struct Layout
using NaiveDescriptorType = using NaiveDescriptorType =
remove_cvref_t<decltype(MakeNaiveDescriptor(Shape{}, DeducedStrides{}))>; remove_cvref_t<decltype(MakeNaiveDescriptor(Shape{}, DeducedStrides{}))>;
constexpr auto GetElementSpaceSize() const { return descriptor_.GetElementSpaceSize(); }
/** /**
* \brief Layout constructor. * \brief Layout constructor.
* *
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "layout.hpp"
#include "utils/tensor_utils.hpp"
namespace ck {
namespace wrapper {
template <AddressSpaceEnum BufferAddressSpace,
typename ElementType,
typename Shape,
typename Strides>
struct Tensor
{
using ElementSpaceSize = decltype(Layout<Shape, Strides>{
Shape{}, Strides{}}.GetElementSpaceSize());
__host__ __device__ Tensor() = delete;
__host__ __device__ Tensor(ElementType* pointer, const Layout<Shape, Strides>& layout)
: layout_(layout),
dynamic_buffer_(
make_dynamic_buffer<BufferAddressSpace>(pointer, layout.GetElementSpaceSize()))
{
}
template <typename... Ts>
__host__ __device__ index_t operator[](const Tuple<Ts...>& Idx) const
{
// Padding is not supported, so we can assume that read should be valid.
return dynamic_buffer_.template Get<ElementType>(layout_(Idx), true /*is_valid*/);
}
template <typename... Ts>
__host__ __device__ index_t operator()(const Tuple<Ts...>& Idx) const
{
return dynamic_buffer_.template Get<ElementType>(layout_(Idx), true /*is_valid*/);
}
template <typename... Idxs>
__host__ __device__ index_t operator()(Idxs... idxs) const
{
const auto idxs_tuple = make_tuple(idxs...);
return dynamic_buffer_.template Get<ElementType>(layout_(idxs_tuple), true /*is_valid*/);
}
private:
const Layout<Shape, Strides>& layout_;
DynamicBuffer<BufferAddressSpace,
ElementType,
ElementSpaceSize,
true /*InvalidElementUseNumericalZeroValue*/>
dynamic_buffer_;
};
} // namespace wrapper
} // namespace ck
...@@ -22,7 +22,7 @@ namespace wrapper { ...@@ -22,7 +22,7 @@ namespace wrapper {
// Disable from doxygen docs generation // Disable from doxygen docs generation
/// @cond /// @cond
// forward declaration // forward declaration
template <typename Shape, typename Strides = Tuple<>> template <typename Shape, typename Strides>
struct Layout; struct Layout;
template <typename T> template <typename T>
...@@ -52,9 +52,9 @@ __host__ __device__ constexpr Layout<Shape, Strides> make_layout(const Shape& sh ...@@ -52,9 +52,9 @@ __host__ __device__ constexpr Layout<Shape, Strides> make_layout(const Shape& sh
* \return Constructed layout. * \return Constructed layout.
*/ */
template <typename Shape> template <typename Shape>
__host__ __device__ constexpr Layout<Shape> make_layout(const Shape& shape) __host__ __device__ constexpr Layout<Shape, Tuple<>> make_layout(const Shape& shape)
{ {
return Layout<Shape>(shape); return Layout<Shape, Tuple<>>(shape);
} }
// Layout helpers // Layout helpers
......
#pragma once
#include "ck/ck.hpp"
// #include "ck/utility/number.hpp"
#include "ck/utility/tuple.hpp"
#include "ck/utility/tuple_helper.hpp"
#include "ck/utility/dynamic_buffer.hpp"
#include "ck/utility/amd_address_space.hpp"
// #include "ck/utility/sequence.hpp"
// #include "ck/utility/sequence_helper.hpp"
// #include "ck/utility/is_detected.hpp"
// #include "ck/tensor_description/tensor_descriptor.hpp"
// #include "ck/tensor_description/tensor_descriptor_helper.hpp"
// #include "ck/tensor_description/multi_index_transform_helper.hpp"
namespace ck {
namespace wrapper {
// Disable from doxygen docs generation
/// @cond
// forward declarations
template <typename Shape, typename Strides>
struct Layout;
template <AddressSpaceEnum BufferAddressSpace,
typename ElementType,
typename Shape,
typename Strides>
struct Tensor;
/// @endcond
template <typename PointerElementType>
struct MemoryPointerTag
{
MemoryPointerTag(PointerElementType* pointer) : pointer_(pointer) {}
using ElementType = PointerElementType;
AddressSpaceEnum buffer_adress_space_ = AddressSpaceEnum::Generic;
ElementType* pointer_;
};
template <typename ElementType>
struct GlobalMemoryPointerTag : public MemoryPointerTag<ElementType>
{
AddressSpaceEnum buffer_adress_space_ = AddressSpaceEnum::Global;
};
template <typename ElementType>
struct SharedMemoryPointerTag : public MemoryPointerTag<ElementType>
{
AddressSpaceEnum buffer_adress_space_ = AddressSpaceEnum::Lds;
};
template <typename ElementType>
struct SgprMemoryPointerTag : public MemoryPointerTag<ElementType>
{
AddressSpaceEnum buffer_adress_space_ = AddressSpaceEnum::Sgpr;
};
template <typename ElementType>
struct VgprMemoryPointerTag : public MemoryPointerTag<ElementType>
{
AddressSpaceEnum buffer_adress_space_ = AddressSpaceEnum::Vgpr;
};
template <typename ElementType>
constexpr auto make_gmem_ptr(const ElementType* pointer)
{
return GlobalMemoryPointerTag<ElementType>(pointer);
}
template <typename ElementType>
constexpr auto make_smem_ptr(const ElementType* pointer)
{
return SharedMemoryPointerTag<ElementType>(pointer);
}
template <typename ElementType>
constexpr auto make_sgprmem_ptr(const ElementType* pointer)
{
return SgprMemoryPointerTag<ElementType>(pointer);
}
template <typename ElementType>
constexpr auto make_vgprmem_ptr(const ElementType* pointer)
{
return VgprMemoryPointerTag<ElementType>(pointer);
}
template <typename ElementType, typename Shape, typename Strides>
constexpr auto make_tensor(ElementType* pointer, const Layout<Shape, Strides>& layout)
{
return Tensor<AddressSpaceEnum::Generic, ElementType, Shape, Strides>(pointer, layout);
}
template <typename ElementType, typename Shape, typename Strides>
constexpr auto make_tensor(MemoryPointerTag<ElementType>& mem_tag,
const Layout<Shape, Strides>& layout)
{
return Tensor<mem_tag.buffer_adress_space_, ElementType, Shape, Strides>(mem_tag.pointer_,
layout);
}
} // namespace wrapper
} // namespace ck
add_gtest_executable(test_layout test_layout.cpp) add_gtest_executable(test_layout test_layout.cpp)
target_link_libraries(test_layout PRIVATE utility) target_link_libraries(test_layout PRIVATE utility)
add_gtest_executable(test_tensor_read_write test_tensor_read_write.cpp)
target_link_libraries(test_tensor_read_write PRIVATE utility)
// SPDX-License-Identifier: MIT
// Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
#include <numeric>
#include <cstdlib>
#include <iostream>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "ck/utility/common_header.hpp"
#include "ck/wrapper/layout.hpp"
#include "ck/wrapper/tensor.hpp"
// Compare data in tensor with offset with layout.
// Data and offset should match if physical memory has been initialized with
// sequentially increasing values.
template <typename TensorType, typename LayoutType, typename Idxs>
__host__ __device__ bool TestTensorReadWriteCheckCustom(const TensorType& tensor,
const LayoutType& layout,
const std::vector<Idxs> idxs)
{
for(size_t i = 0; i < idxs.size(); i++)
{
if(tensor(idxs[i]) != layout(idxs[i]))
{
return false;
}
}
return true;
}
template <typename TensorType, typename LayoutType>
__host__ __device__ bool TestTensorReadWriteCheck1d(const TensorType& tensor,
const LayoutType& layout)
{
for(ck::index_t w = 0; w < ck::wrapper::size<0>(layout); w++)
{
if(tensor(w) != layout(ck::make_tuple(w)))
{
return false;
}
}
return true;
}
TEST(TestTensorReadWrite, HostMemory)
{
constexpr ck::index_t nelems = 8;
std::array<ck::index_t, nelems> data;
std::iota(data.begin(), data.end(), 0);
const auto layout = ck::wrapper::make_layout(ck::make_tuple(4, 2));
const auto tensor = ck::wrapper::make_tensor(&data[0], layout);
std::vector<ck::Tuple<ck::index_t, ck::index_t>> idxs;
for(ck::index_t h = 0; h < ck::wrapper::size<0>(layout); h++)
{
for(ck::index_t w = 0; w < ck::wrapper::size<1>(layout); w++)
{
idxs.emplace_back(h, w);
}
}
EXPECT_TRUE(TestTensorReadWriteCheck1d(tensor, layout));
EXPECT_TRUE(TestTensorReadWriteCheckCustom(tensor, layout, idxs));
}
TEST(TestTensorReadWrite, HostMemoryNested)
{
constexpr ck::index_t nelems = 8;
std::array<ck::index_t, nelems> data;
std::iota(data.begin(), data.end(), 0);
const auto layout = ck::wrapper::make_layout(ck::make_tuple(ck::make_tuple(2, 2), 2));
const auto tensor = ck::wrapper::make_tensor(&data[0], layout);
std::vector<ck::Tuple<ck::Tuple<ck::index_t, ck::index_t>, ck::index_t>> idxs;
for(ck::index_t d = 0; d < ck::wrapper::size<0>(ck::wrapper::get<0>(layout)); d++)
{
for(ck::index_t h = 0; h < ck::wrapper::size<1>(ck::wrapper::get<0>(layout)); h++)
{
for(ck::index_t w = 0; w < ck::wrapper::size<1>(layout); w++)
{
idxs.emplace_back(ck::make_tuple(d, h), w);
}
}
}
EXPECT_TRUE(TestTensorReadWriteCheck1d(tensor, layout));
EXPECT_TRUE(TestTensorReadWriteCheckCustom(tensor, layout, idxs));
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment