/* * The MIT License (MIT) * * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ck/include/device_gemm_multiple_d.hpp" namespace migraphx { inline namespace MIGRAPHX_INLINE_NS { namespace gpu { using namespace migraphx::gpu::gen; // NOLINT MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_LOG_CK_GEMM); MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_CK_TUNING); MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_CK_TUNING_VALUE); MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_CK_DEBUG); // NOLINTNEXTLINE static const char* const ck_gemm_kernel = R"__migraphx__( #include #include #include #include namespace migraphx { ${preamble} extern "C" { __global__ void ${kernel}(${params}) { transform_args(make_tensors(), rotate_last())(${args})([](auto... xs) { ck_gemm<${solution}, ${blocks_per_batch}>(xs...); }); } } } // namespace migraphx )__migraphx__"; static bool transposed_matrix(const shape& s) { return s.strides().back() != 1; } template auto action_decorate(F f, Action action) { return [=](auto&&... xs) { action(); f(std::forward(xs)...); }; } using tuning_entry = std::pair, size_t>; static std::vector read_tuning(const std::string& s) { if(not fs::exists(s)) return {}; return from_value>(from_json_string(read_string(s))); } static float matrix_distance(const shape& x, const shape& y) { if(x.type() != y.type()) return std::numeric_limits::max(); if(transposed_matrix(x) != transposed_matrix(y)) return std::numeric_limits::max(); auto sum_squared = std::inner_product(x.lens().rbegin(), x.lens().rbegin() + 2, y.lens().rbegin(), 0, std::plus<>{}, [](auto a, auto b) { return (a - b) * (a - b); }); return std::sqrt(sum_squared); } static std::size_t get_tuning_for(const std::vector& inputs) { static auto tuning = read_tuning(string_value_of(MIGRAPHX_CK_TUNING{}, "")); if(tuning.empty()) { std::cout << "*********** Warning: No CK tuning! for config:" << std::endl; std::cout << " " << inputs[0] << std::endl; std::cout << " " << inputs[1] << std::endl; std::cout << " " << inputs[2] << std::endl; } auto it = std::find_if( tuning.begin(), tuning.end(), [&](const auto& p) { return p.first == inputs; }); if(it == tuning.end()) { std::cout << "*********** Warning: CK tuning missing for config!" << std::endl; std::cout << " " << inputs[0] << std::endl; std::cout << " " << inputs[1] << std::endl; std::cout << " " << inputs[2] << std::endl; std::vector> w; std::transform(tuning.begin(), tuning.end(), std::back_inserter(w), [&](const auto& p) { if(inputs.size() < 3 or p.first.size() < 3) MIGRAPHX_THROW("Invalid CK config"); auto avg_distance = std::inner_product( p.first.begin(), p.first.begin() + 3, inputs.begin(), 0.0f, std::plus<>{}, [](const auto& x, const auto& y) { return matrix_distance(x, y) / 3.0f; }); return std::make_pair(avg_distance, p.second); }); std::sort(w.begin(), w.end()); std::size_t default_value = 4; if(not w.empty()) default_value = w.front().second; auto tuning_val = value_of(MIGRAPHX_CK_TUNING_VALUE{}, default_value); std::cout << "*********** Warning: CK try tuning: " << tuning_val << std::endl; return tuning_val; } return it->second; } struct ck_gemm_compiler : compiler { static std::string get_layout(const shape& s) { return transposed_matrix(s) ? "ck::tensor_layout::gemm::ColumnMajor" : "ck::tensor_layout::gemm::RowMajor"; } static std::string get_type(const shape& s) { if(s.type() == shape::half_type) return "ck::half_t"; return shape::cpp_type(s.type()); } template static std::string ck_tuple(Iterator start, Iterator last, F f) { std::vector s; std::transform(start, last, std::back_inserter(s), f); return "ck::Tuple<" + join_strings(s, ",") + ">"; } static std::vector adjust_inputs(std::vector inputs, bool& swap_inputs) { swap_inputs = false; auto c_shape = inputs.back(); if(not transposed_matrix(c_shape)) return inputs; std::vector perm(c_shape.lens().size()); std::iota(perm.begin(), perm.end(), 0); std::swap(perm[perm.size() - 1], perm[perm.size() - 2]); std::transform(inputs.begin(), inputs.end(), inputs.begin(), [&](shape s) { return reorder_shape(s, perm); }); swap_inputs = true; return inputs; } static std::size_t get_batch_count(const shape& s) { return std::accumulate( s.lens().rbegin() + 2, s.lens().rend(), std::size_t{1}, std::multiplies()); } static void fold_batch_dims(shape& s) { auto lens = s.lens(); if(lens.size() <= 2) return; auto batch_count = get_batch_count(s); auto m1 = lens.at(lens.size() - 2); auto m2 = lens.at(lens.size() - 1); if(transposed_matrix(s)) s = shape{s.type(), {m1, m2 * batch_count}}; else s = shape{s.type(), {m1 * batch_count, m2}}; } static void remove_batch_dims(shape& s) { auto lens = s.lens(); if(lens.size() <= 2) return; auto m1 = lens.at(lens.size() - 2); auto m2 = lens.at(lens.size() - 1); s = shape{s.type(), {m1, m2}}; } std::vector names() const { return {"ck_gemm", "gpu::ck_gemm", "ck_gemm_int8", "gpu::ck_gemm_int8"}; } operation compile_op(context& /* ctx */, const std::vector& inputs, const value& v) const { auto a_shape = inputs[0]; auto b_shape = inputs[1]; auto c_shape = inputs.back(); auto tuning_value = v.get("tuning_val", get_tuning_for({a_shape, b_shape, c_shape})); auto rank = a_shape.lens().size(); auto b_strides = b_shape.strides(); bool can_fold_batch = rank >= 3 and b_strides[rank - 3] == 0; auto batch_count = get_batch_count(c_shape); auto m = c_shape.lens()[rank - 2]; m = can_fold_batch ? m * batch_count : m; auto n = c_shape.lens().back(); auto k = a_shape.lens().back(); const bool transA = transposed_matrix(a_shape); const bool transB = transposed_matrix(b_shape); const bool transE = transposed_matrix(c_shape); const auto a_type = get_type(a_shape); const auto b_type = get_type(b_shape); const auto e_type = get_type(c_shape); std::vector ds_layout; std::transform(inputs.begin() + 2, inputs.end() - 1, std::back_inserter(ds_layout), [](const auto& i) { return transposed_matrix(i); }); std::vector ds_type; std::transform(inputs.begin() + 2, inputs.end() - 1, std::back_inserter(ds_type), [](const auto& i) { return get_type(i); }); std::string ck_passthrough = "ck_passthrough"; std::string cde_op = ck_passthrough; assert(inputs.size() < 4 or v.contains("post")); if(v.contains("post")) { cde_op = v.at("post").to(); } auto problem = ck::tensor_operation::device::device_gemm_multiple_d::Problem{ static_cast(m), static_cast(n), static_cast(k), transA, transB, transE, ds_layout, a_type, b_type, e_type, ds_type, ck_passthrough, ck_passthrough, cde_op}; const auto include_header = problem.GetIncludeHeader(); const auto ck_headers = problem.GetHeaders(); const auto solutions = problem.GetSolutions("gfx90a"); const auto solution = solutions.at(tuning_value); const auto template_str = solution.template_str; const auto blocks_per_batch = solution.grid_size; const auto block_size = solution.block_size; hip_compile_options options; options.embedded_headers = ck_headers; auto grid_size = can_fold_batch ? blocks_per_batch : batch_count * blocks_per_batch; options.set_launch_params(v, grid_size * block_size, block_size); options.inputs = inputs; options.output = c_shape; options.kernel_name = v.get("kernel", "ck_gemm_kernel"); options.virtual_inputs = inputs; if(can_fold_batch) { auto vinputs = inputs; fold_batch_dims(vinputs[0]); remove_batch_dims(vinputs[1]); std::for_each(vinputs.begin() + 2, vinputs.end(), fold_batch_dims); options.virtual_inputs = vinputs; } if(v.get("check", false) or enabled(MIGRAPHX_CK_DEBUG{})) options.params += " -DMIGRAPHX_CK_CHECK=1"; auto src = interpolate_string(ck_gemm_kernel, {{"solution", template_str}, {"include", include_header}, {"params", enum_params(inputs.size(), "void * private_p")}, {"args", enum_params(inputs.size(), "private_p")}, {"blocks_per_batch", to_string(blocks_per_batch)}, {"preamble", v.get("preamble", std::string{})}, {"kernel", options.kernel_name}}); std::cout << "instances: " << solutions.size() << ", val: " << tuning_value << std::endl; std::cout << template_str << std::endl; return compile_hip_code_object(src, options); } compiler_replace compile(context& ctx, instruction_ref ins, const operation& op) const { auto v = op.to_value(); v["kernel"] = "ck_gemm_kernel"; if(not ins->module_inputs().empty()) { auto* pm = ins->module_inputs().front(); v["preamble"] = generate_pointwise(*pm, "post_ck_gemm_function") + "\nMIGRAPHX_LIFT_CLASS(post_ck_gemm, post_ck_gemm_function);"; v["post"] = "ck_function_adaptor"; v["kernel"] = "ck_gemm_" + generate_name_from_ops(*pm) + "_kernel"; } auto shapes = to_shapes(ins->inputs()); return action_decorate(replace(compile_op(ctx, shapes, v)), [=] { if(enabled(MIGRAPHX_LOG_CK_GEMM{})) { std::vector gemm_shapes{ shapes[0], shapes[1], shapes.back().with_type(shapes[0].type())}; std::cout << "ck_gemm: " << to_json_string(to_value(gemm_shapes)) << std::endl; } }); } }; } // namespace gpu } // namespace MIGRAPHX_INLINE_NS } // namespace migraphx