// SPDX-License-Identifier: MIT // Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once bool run_gemm(const ProblemSize& problem_size, const ExecutionConfig& config) { #if defined(BUILD_INT4_EXAMPLE) && defined(CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4) static_assert(sizeof(ck::int4_t) == sizeof(int8_t)); #endif using namespace ck::literals; auto& [M, N, K, StrideA, StrideB, StrideC] = problem_size; auto f_host_tensor_descriptor = [](std::size_t row, std::size_t col, std::size_t stride, auto layout) { if constexpr(std::is_same_v) { return HostTensorDescriptor({row, col}, {stride, 1_uz}); } else { return HostTensorDescriptor({row, col}, {1_uz, stride}); } }; Tensor a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{})); Tensor b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{})); switch(config.init_method) { case 0: ck::utils::FillConstant{1.f}(a_m_k); ck::utils::FillConstant{0.f}(b_k_n); // for (ck::index_t m = 0; m < M; ++m) // { // for (ck::index_t k = 0; k < K; ++k) // { // a_m_k(m, k) = (m * M + k) % 5; // } // } for(ck::index_t n = 0; n < N; ++n) { for(ck::index_t k = 0; k < K; ++k) { if(n == k) b_k_n(k, n) = n * 2; } } break; case 1: ck::utils::FillUniformDistributionIntegerValue{-1.f, 3.f}(a_m_k); ck::utils::FillUniformDistributionIntegerValue{-1.f, 3.f}(b_k_n); break; default: ck::utils::FillUniformDistribution{-1.f, 1.f}(a_m_k); ck::utils::FillUniformDistribution{-1.f, 1.f}(b_k_n); } Tensor c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{})); Tensor c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{})); std::cout << "a_m_k: " << a_m_k.mDesc << std::endl; std::cout << "b_k_n: " << b_k_n.mDesc << std::endl; std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl; #ifdef BUILD_INT4_EXAMPLE DeviceMem a_m_k_device_buf(sizeof(KernelADataType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem b_k_n_device_buf(sizeof(KernelBDataType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem c_m_n_device_buf(sizeof(KernelCDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize()); const Tensor a_m_k_converted(a_m_k); const Tensor b_k_n_converted(b_k_n); a_m_k_device_buf.ToDevice(a_m_k_converted.mData.data()); b_k_n_device_buf.ToDevice(b_k_n_converted.mData.data()); #else DeviceMem a_m_k_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); DeviceMem b_k_n_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize()); DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize()); a_m_k_device_buf.ToDevice(a_m_k.mData.data()); b_k_n_device_buf.ToDevice(b_k_n.mData.data()); c_m_n_device_buf.SetZero(); #endif auto a_element_op = AElementOp{}; auto b_element_op = BElementOp{}; auto c_element_op = CElementOp{}; // do GEMM auto gemm = DeviceGemmInstance{}; auto invoker = gemm.MakeInvoker(); auto argument = gemm.MakeArgument( #ifdef BUILD_INT4_EXAMPLE static_cast(a_m_k_device_buf.GetDeviceBuffer()), static_cast(b_k_n_device_buf.GetDeviceBuffer()), static_cast(c_m_n_device_buf.GetDeviceBuffer()), #else static_cast(a_m_k_device_buf.GetDeviceBuffer()), static_cast(b_k_n_device_buf.GetDeviceBuffer()), static_cast(c_m_n_device_buf.GetDeviceBuffer()), #endif M, N, K, StrideA, StrideB, StrideC, a_element_op, b_element_op, c_element_op); if(!gemm.IsSupportedArgument(argument)) { std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl; return true; } float ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel}); std::size_t flop = 2_uz * M * N * K; std::size_t num_btype = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N; float tflops = static_cast(flop) / 1.E9 / ave_time; float gb_per_sec = num_btype / 1.E6 / ave_time; std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, " << gemm.GetTypeString() << std::endl; bool result = true; if(config.do_verification) { auto ref_gemm = ReferenceGemmInstance{}; auto ref_invoker = ref_gemm.MakeInvoker(); auto ref_argument = ref_gemm.MakeArgument( a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, c_element_op); ref_invoker.Run(ref_argument); #ifdef BUILD_INT4_EXAMPLE Tensor c_m_n_device_result_converted(c_m_n_host_result.mDesc); c_m_n_device_buf.FromDevice(c_m_n_device_result_converted.mData.data()); c_m_n_device_result = c_m_n_device_result_converted.CopyAsType(); result = result && ck::utils::check_err(c_m_n_device_result_converted, c_m_n_host_result); #else c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data()); result = result && ck::utils::check_err(c_m_n_device_result, c_m_n_host_result); #endif } if(config.do_log) { LogRangeAsType(std::cout << "a:\n", a_m_k.mData, ",", 32) << std::endl; LogRangeAsType(std::cout << "b:\n", b_k_n.mData, ",", 32) << std::endl; LogRangeAsType(std::cout << "c_host:\n", c_m_n_host_result.mData, ",", 32) << std::endl; LogRangeAsType(std::cout << "c_device:\n", c_m_n_device_result.mData, ",", 32) << std::endl; } return result; } bool run_gemm_example(int argc, char* argv[]) { ProblemSize problem_size; ExecutionConfig config; return !parse_cmd_args(argc, argv, problem_size, config) || run_gemm(problem_size, config); }