Commit 5899b0fc authored by Alan Turner's avatar Alan Turner
Browse files

Formatting

parent e72ecc75
...@@ -131,7 +131,8 @@ struct find_ck_batched_gemm ...@@ -131,7 +131,8 @@ struct find_ck_batched_gemm
void apply(module_pass_manager& mpm, const match::matcher_result& r) const void apply(module_pass_manager& mpm, const match::matcher_result& r) const
{ {
auto ins = r.result; auto ins = r.result;
mpm.get_module().replace_instruction(ins, ck_batched_gemm{ins->get_operator()}, ins->inputs()); mpm.get_module().replace_instruction(
ins, ck_batched_gemm{ins->get_operator()}, ins->inputs());
} }
}; };
......
...@@ -131,10 +131,7 @@ static std::size_t get_tuning_for(const std::vector<shape>& inputs) ...@@ -131,10 +131,7 @@ static std::size_t get_tuning_for(const std::vector<shape>& inputs)
return it->second; return it->second;
} }
static std::size_t get_batch_stride(const shape& s) static std::size_t get_batch_stride(const shape& s) { return s.strides()[s.strides().size() - 3]; }
{
return s.strides()[s.strides().size() - 3];
}
struct ck_batched_gemm_compiler : compiler<ck_batched_gemm_compiler> struct ck_batched_gemm_compiler : compiler<ck_batched_gemm_compiler>
{ {
...@@ -209,7 +206,8 @@ struct ck_batched_gemm_compiler : compiler<ck_batched_gemm_compiler> ...@@ -209,7 +206,8 @@ struct ck_batched_gemm_compiler : compiler<ck_batched_gemm_compiler>
options.kernel_name = "ck_batched_gemm_kernel"; options.kernel_name = "ck_batched_gemm_kernel";
options.virtual_inputs = inputs; options.virtual_inputs = inputs;
auto src = interpolate_string(ck_batched_gemm_kernel, {{"instance", join_strings(instance, ",")}}); auto src =
interpolate_string(ck_batched_gemm_kernel, {{"instance", join_strings(instance, ",")}});
return compile_hip_code_object(src, options); return compile_hip_code_object(src, options);
} }
......
...@@ -111,9 +111,10 @@ constexpr F for_each(Iterator first, Iterator last, F f) ...@@ -111,9 +111,10 @@ constexpr F for_each(Iterator first, Iterator last, F f)
} }
template <class Iterator, class T> template <class Iterator, class T>
constexpr void fill (Iterator first, Iterator last, const T& val) constexpr void fill(Iterator first, Iterator last, const T& val)
{ {
while (first != last) { while(first != last)
{
*first = val; *first = val;
++first; ++first;
} }
......
...@@ -94,15 +94,17 @@ struct ComputePtrOffsetOfStridedBatch ...@@ -94,15 +94,17 @@ struct ComputePtrOffsetOfStridedBatch
ck::index_t BatchStrideE_; ck::index_t BatchStrideE_;
}; };
template <class G, class Settings, class A, class B, class E, class... Ds> template <class G, class Settings, class A, class B, class E, class... Ds>
__device__ void ck_batched_gemm(Settings s, A a, B b, E e, Ds... ds) __device__ void ck_batched_gemm(Settings s, A a, B b, E e, Ds... ds)
{ {
constexpr const G gemm{}; constexpr const G gemm{};
constexpr const auto a_grid_desc_m_k = gemm.matrix_padder.PadADescriptor_M_K(to_ck_batched_tensor<A>()); constexpr const auto a_grid_desc_m_k =
constexpr const auto b_grid_desc_n_k = gemm.matrix_padder.PadBDescriptor_N_K(to_ck_batched_tensor<B>()); gemm.matrix_padder.PadADescriptor_M_K(to_ck_batched_tensor<A>());
constexpr const auto e_grid_desc_m_n = gemm.matrix_padder.PadCDescriptor_M_N(to_ck_batched_tensor<E>()); constexpr const auto b_grid_desc_n_k =
gemm.matrix_padder.PadBDescriptor_N_K(to_ck_batched_tensor<B>());
constexpr const auto e_grid_desc_m_n =
gemm.matrix_padder.PadCDescriptor_M_N(to_ck_batched_tensor<E>());
constexpr const auto ds_grid_desc_m_n = constexpr const auto ds_grid_desc_m_n =
ck::make_tuple(gemm.matrix_padder.PadCDescriptor_M_N(to_ck_batched_tensor<Ds>())...); ck::make_tuple(gemm.matrix_padder.PadCDescriptor_M_N(to_ck_batched_tensor<Ds>())...);
constexpr const auto block_2_etile_map = gemm.MakeDefaultBlock2ETileMap(e_grid_desc_m_n); constexpr const auto block_2_etile_map = gemm.MakeDefaultBlock2ETileMap(e_grid_desc_m_n);
...@@ -127,14 +129,15 @@ __device__ void ck_batched_gemm(Settings s, A a, B b, E e, Ds... ds) ...@@ -127,14 +129,15 @@ __device__ void ck_batched_gemm(Settings s, A a, B b, E e, Ds... ds)
static constexpr ck::index_t NumDTensor = gemm.NumDTensor; static constexpr ck::index_t NumDTensor = gemm.NumDTensor;
std::array<ck::index_t, NumDTensor> batchStrideDs; std::array<ck::index_t, NumDTensor> batchStrideDs;
ck::static_for<0, NumDTensor, 1>{}( ck::static_for<0, NumDTensor, 1>{}([&](auto i) { batchStrideDs[i] = s.batchStrideC; });
[&](auto i) { batchStrideDs[i] = s.batchStrideC; }); const ComputePtrOffsetOfStridedBatch<NumDTensor> compute_ptr_offset_of_batch{
const ComputePtrOffsetOfStridedBatch<NumDTensor> compute_ptr_offset_of_batch{s.batchStrideA, s.batchStrideB, batchStrideDs, s.batchStrideC}; s.batchStrideA, s.batchStrideB, batchStrideDs, s.batchStrideC};
auto batch_count = s.batch_count; auto batch_count = s.batch_count;
const ck::index_t num_blocks_per_batch = const ck::index_t num_blocks_per_batch =
__builtin_amdgcn_readfirstlane(ck::get_grid_size() / batch_count); __builtin_amdgcn_readfirstlane(ck::get_grid_size() / batch_count);
const ck::index_t g_idx = __builtin_amdgcn_readfirstlane(ck::get_block_1d_id() / num_blocks_per_batch); const ck::index_t g_idx =
__builtin_amdgcn_readfirstlane(ck::get_block_1d_id() / num_blocks_per_batch);
const ck::long_index_t a_batch_offset = __builtin_amdgcn_readfirstlane( const ck::long_index_t a_batch_offset = __builtin_amdgcn_readfirstlane(
static_cast<ck::long_index_t>(compute_ptr_offset_of_batch.GetAPtrOffset(g_idx))); static_cast<ck::long_index_t>(compute_ptr_offset_of_batch.GetAPtrOffset(g_idx)));
......
...@@ -38,8 +38,8 @@ struct ck_batched_gemm : verify_program<ck_batched_gemm> ...@@ -38,8 +38,8 @@ struct ck_batched_gemm : verify_program<ck_batched_gemm>
std::size_t n = 3; std::size_t n = 3;
std::size_t k = 3; std::size_t k = 3;
migraphx::shape m1_shape{migraphx::shape::half_type, {b, m, k}}; migraphx::shape m1_shape{migraphx::shape::half_type, {b, m, k}};
std::vector<float> v1(b*m*k, 1); std::vector<float> v1(b * m * k, 1);
std::vector<float> v2(b*k*n, 1);//{1, 2, 3, 4, 5, 6, 7, 8}; std::vector<float> v2(b * k * n, 1); //{1, 2, 3, 4, 5, 6, 7, 8};
// auto l1 = mm->add_parameter("1", m1_shape); // auto l1 = mm->add_parameter("1", m1_shape);
// auto l2 = mm->add_parameter("2", m1_shape); // auto l2 = mm->add_parameter("2", m1_shape);
auto l1 = mm->add_literal(migraphx::literal{m1_shape, v1}); auto l1 = mm->add_literal(migraphx::literal{m1_shape, v1});
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment