/* * The MIT License (MIT) * * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ck_gemm_instances.hpp" namespace migraphx { inline namespace MIGRAPHX_INLINE_NS { namespace gpu { // NOLINTNEXTLINE static const char* const ck_gemm_kernel = R"__migraphx__( #include #include #include namespace migraphx { using gemm_t = CKDeviceGemm<${instance}, ${m}, ${k}, ${n}, ${sa}, ${sb}, ${sc}>; constexpr __device__ gemm_t ckdg{}; using GridwiseGemm = decltype(ckdg.gridwisegemm); extern "C" { __global__ void ck_gemm_kernel(void* a_p, void* b_p, void* c_p) { make_tensors()(a_p, b_p, c_p)([&](auto a_t, auto b_t, auto c_t) { constexpr ck::index_t shared_block_size = GridwiseGemm::GetSharedMemoryNumberOfByte(); __shared__ char p_shared_block[shared_block_size]; make_tensors()(p_shared_block)([&](auto p_t) { ck_gemm(a_t, b_t, c_t, p_t); }); }); } } } // namespace migraphx )__migraphx__"; static std::size_t int_div_ceil(std::size_t x, std::size_t y) { return (x + y - 1) / y; } static std::size_t block_size_index = 13; static std::size_t get_block_size(const std::vector& s) { return std::stoull(s[block_size_index]); } static std::size_t get_grid_size(const std::vector& s, std::size_t m, std::size_t n) { auto mpb = std::stoull(s[block_size_index + 1]); auto npb = std::stoull(s[block_size_index + 2]); return int_div_ceil(m, mpb) * int_div_ceil(n, npb); } struct ck_gemm_compiler : compiler { static std::string get_layout(const shape& s) { return s.transposed() ? "ck::tensor_layout::gemm::ColumnMajor" : "ck::tensor_layout::gemm::RowMajor"; } static std::string get_type(const shape& s) { if(s.type() == shape::half_type) return "ck::half_t"; return shape::cpp_type(s.type()); } std::vector names() const { return {"ck_gemm", "gpu::ck_gemm"}; } operation compile_op(context& /* ctx */, const std::vector& inputs, const value& v) const { auto a_shape = inputs[0]; auto b_shape = inputs[1]; auto c_shape = inputs[2]; auto m = c_shape.lens().front(); auto n = c_shape.lens().back(); auto k = a_shape.lens().back(); auto sa = a_shape.strides().front(); auto sb = b_shape.strides().front(); auto sc = c_shape.strides().front(); int i = v.get("tuning_val", 4); const auto& instance = get_instance(i, [&](const auto& x) -> bool { return get_layout(a_shape) == x[0] and get_layout(b_shape) == x[1] and get_layout(c_shape) == x[2] and get_type(a_shape) == x[3] and get_type(b_shape) == x[4] and get_type(c_shape) == x[5]; }); hip_compile_options options; options.set_launch_params(v, get_grid_size(instance, m, n), get_block_size(instance)); options.inputs = inputs; options.output = c_shape; options.kernel_name = "ck_gemm_kernel"; options.virtual_inputs = inputs; auto src = interpolate_string(ck_gemm_kernel, {{"instance", join_strings(instance, ",")}, {"m", to_string(m)}, {"k", to_string(k)}, {"n", to_string(n)}, {"sa", to_string(sa)}, {"sb", to_string(sb)}, {"sc", to_string(sc)}}); return compile_hip_code_object(src, options); } compiler_replace compile(context& ctx, instruction_ref ins, const operation& op) const { return replace(compile_op(ctx, to_shapes(ins->inputs()), op.to_value())); } }; } // namespace gpu } // namespace MIGRAPHX_INLINE_NS } // namespace migraphx