Unverified Commit dc8edad8 authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Merge branch 'develop' into check-mlir-perf

parents b9cbfd8e 32f0b028
...@@ -136,12 +136,14 @@ rocmtest clang_debug: rocmnode('mi100+') { cmake_build -> ...@@ -136,12 +136,14 @@ rocmtest clang_debug: rocmnode('mi100+') { cmake_build ->
} }
}, mlir_debug: rocmnode('mi100+') { cmake_build -> }, mlir_debug: rocmnode('mi100+') { cmake_build ->
stage('MLIR Debug') { stage('MLIR Debug') {
withEnv(['MIGRAPHX_ENABLE_EXTRA_MLIR=1']) { withEnv(['MIGRAPHX_ENABLE_EXTRA_MLIR=1', 'MIGRAPHX_MLIR_USE_SPECIFIC_OPS=fused,attention,convolution,dot']) {
def sanitizers = "undefined" def sanitizers = "undefined"
// Note: the -fno-sanitize= is copied from upstream LLVM_UBSAN_FLAGS. // Note: the -fno-sanitize= is copied from upstream LLVM_UBSAN_FLAGS.
def debug_flags_cxx = "-g -O2 -fsanitize=${sanitizers} -fno-sanitize=vptr,function -fno-sanitize-recover=${sanitizers}" def debug_flags_cxx = "-g -O2 -fsanitize=${sanitizers} -fno-sanitize=vptr,function -fno-sanitize-recover=${sanitizers}"
def debug_flags = "-g -O2 -fsanitize=${sanitizers} -fno-sanitize=vptr -fno-sanitize-recover=${sanitizers}" def debug_flags = "-g -O2 -fsanitize=${sanitizers} -fno-sanitize=vptr -fno-sanitize-recover=${sanitizers}"
def gpu_targets = getgputargets() def gpu_targets = getgputargets()
// Since the purpose of this run verify all things MLIR supports,
// enabling all possible types of offloads
cmake_build(flags: "-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DMIGRAPHX_ENABLE_MLIR=On -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags_cxx}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}' -DGPU_TARGETS='${gpu_targets}'") cmake_build(flags: "-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DMIGRAPHX_ENABLE_MLIR=On -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags_cxx}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}' -DGPU_TARGETS='${gpu_targets}'")
} }
} }
......
...@@ -89,7 +89,7 @@ requests==2.28.2 ...@@ -89,7 +89,7 @@ requests==2.28.2
# via # via
# pygithub # pygithub
# sphinx # sphinx
rocm-docs-core==0.29.0 rocm-docs-core==0.30.0
# via -r requirements.in # via -r requirements.in
smmap==5.0.0 smmap==5.0.0
# via gitdb # via gitdb
......
...@@ -4,13 +4,13 @@ Environment Variables ...@@ -4,13 +4,13 @@ Environment Variables
For parsing For parsing
--------------- ---------------
**MIGRAPHX_TRACE_ONNX_PARSER** .. envvar:: MIGRAPHX_TRACE_ONNX_PARSER
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print debugging traces for the onnx parser. Print debugging traces for the onnx parser.
Prints: initializers (if used), ONNX node operators, added MIGraphX instructions Prints: initializers (if used), ONNX node operators, added MIGraphX instructions
**MIGRAPHX_DISABLE_FP16_INSTANCENORM_CONVERT** .. envvar:: MIGRAPHX_DISABLE_FP16_INSTANCENORM_CONVERT
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Disables the conversion from fp16 to fp32 for the InstanceNormalization ONNX operator that MIGX does as a workaround for accuracy issues with reduce_mean/variance. Disables the conversion from fp16 to fp32 for the InstanceNormalization ONNX operator that MIGX does as a workaround for accuracy issues with reduce_mean/variance.
...@@ -20,16 +20,16 @@ See ``parse_instancenorm.cpp`` for more details. ...@@ -20,16 +20,16 @@ See ``parse_instancenorm.cpp`` for more details.
Matchers Matchers
------------ ------------
**MIGRAPHX_TRACE_MATCHES** .. envvar:: MIGRAPHX_TRACE_MATCHES
Set to "1" to print the matcher that matches an instruction and the matched instruction. Set to "1" to print the matcher that matches an instruction and the matched instruction.
Set to "2" and use the ``MIGRAPHX_TRACE_MATHCES_FOR`` flag to filter out results. Set to "2" and use the ``MIGRAPHX_TRACE_MATHCES_FOR`` flag to filter out results.
**MIGRAPHX_TRACE_MATCHES_FOR** .. envvar:: MIGRAPHX_TRACE_MATCHES_FOR
Set to the name of any matcher and only traces for that matcher will be printed out. Set to the name of any matcher and only traces for that matcher will be printed out.
**MIGRAPHX_VALIDATE_MATCHES** .. envvar:: MIGRAPHX_VALIDATE_MATCHES
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Validate the module after finding the matches (runs ``module.validate()``). Validate the module after finding the matches (runs ``module.validate()``).
...@@ -37,7 +37,7 @@ Validate the module after finding the matches (runs ``module.validate()``). ...@@ -37,7 +37,7 @@ Validate the module after finding the matches (runs ``module.validate()``).
Program Execution Program Execution
--------------------- ---------------------
**MIGRAPHX_TRACE_EVAL** .. envvar:: MIGRAPHX_TRACE_EVAL
Set to "1", "2", or "3" to use. Set to "1", "2", or "3" to use.
"1" prints the instruction run and the time taken. "1" prints the instruction run and the time taken.
...@@ -48,7 +48,7 @@ Set to "1", "2", or "3" to use. ...@@ -48,7 +48,7 @@ Set to "1", "2", or "3" to use.
Program Verification Program Verification
------------------------ ------------------------
**MIGRAPHX_VERIFY_ENABLE_ALLCLOSE** .. envvar:: MIGRAPHX_VERIFY_ENABLE_ALLCLOSE
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Uses ``allclose`` with the given ``atol`` and ``rtol`` for verifying ranges with ``driver verify`` or the tests that use ``migraphx/verify.hpp``. Uses ``allclose`` with the given ``atol`` and ``rtol`` for verifying ranges with ``driver verify`` or the tests that use ``migraphx/verify.hpp``.
...@@ -57,76 +57,76 @@ Uses ``allclose`` with the given ``atol`` and ``rtol`` for verifying ranges with ...@@ -57,76 +57,76 @@ Uses ``allclose`` with the given ``atol`` and ``rtol`` for verifying ranges with
Pass debugging or Pass controls Pass debugging or Pass controls
----------------------------------- -----------------------------------
**MIGRAPHX_TRACE_ELIMINATE_CONTIGUOUS** .. envvar:: MIGRAPHX_TRACE_ELIMINATE_CONTIGUOUS
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Debug print the instructions that have input ``contiguous`` instructions removed. Debug print the instructions that have input ``contiguous`` instructions removed.
**MIGRAPHX_DISABLE_POINTWISE_FUSION** .. envvar:: MIGRAPHX_DISABLE_POINTWISE_FUSION
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Disables the ``fuse_pointwise`` compile pass. Disables the ``fuse_pointwise`` compile pass.
**MIGRAPHX_DEBUG_MEMORY_COLORING** .. envvar:: MIGRAPHX_DEBUG_MEMORY_COLORING
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print debug statements for the ``memory_coloring`` pass. Print debug statements for the ``memory_coloring`` pass.
**MIGRAPHX_TRACE_SCHEDULE** .. envvar:: MIGRAPHX_TRACE_SCHEDULE
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print debug statements for the ``schedule`` pass. Print debug statements for the ``schedule`` pass.
**MIGRAPHX_TRACE_PROPAGATE_CONSTANT** .. envvar:: MIGRAPHX_TRACE_PROPAGATE_CONSTANT
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Traces instructions replaced with a constant. Traces instructions replaced with a constant.
**MIGRAPHX_INT8_QUANTIZATION_PARAMS** .. envvar:: MIGRAPHX_INT8_QUANTIZATION_PARAMS
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print the quantization parameters in only the main module. Print the quantization parameters in only the main module.
**MIGRAPHX_DISABLE_DNNL_POST_OPS_WORKAROUND** .. envvar:: MIGRAPHX_DISABLE_DNNL_POST_OPS_WORKAROUND
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Disable the DNNL post ops workaround. Disable the DNNL post ops workaround.
**MIGRAPHX_DISABLE_MIOPEN_FUSION** .. envvar:: MIGRAPHX_DISABLE_MIOPEN_FUSION
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Disable MIOpen fusions. Disable MIOpen fusions.
**MIGRAPHX_DISABLE_SCHEDULE_PASS** .. envvar:: MIGRAPHX_DISABLE_SCHEDULE_PASS
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Disable the ``schedule`` pass. Disable the ``schedule`` pass.
**MIGRAPHX_DISABLE_REDUCE_FUSION** .. envvar:: MIGRAPHX_DISABLE_REDUCE_FUSION
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Disable the ``fuse_reduce`` pass. Disable the ``fuse_reduce`` pass.
**MIGRAPHX_ENABLE_NHWC** .. envvar:: MIGRAPHX_ENABLE_NHWC
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Enable the ``layout_nhwc`` pass. Enable the ``layout_nhwc`` pass.
**MIGRAPHX_ENABLE_CK** .. envvar:: MIGRAPHX_ENABLE_CK
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Enable using the Composable Kernels library. Enable using the Composable Kernels library.
Should be used in conjunction with ``MIGRAPHX_DISABLE_MLIR=1``. Should be used in conjunction with ``MIGRAPHX_DISABLE_MLIR=1``.
**MIGRAPHX_DISABLE_MLIR** .. envvar:: MIGRAPHX_DISABLE_MLIR*
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Disable using the rocMLIR library. Disable using the rocMLIR library.
**MIGRAPHX_ENABLE_EXTRA_MLIR** .. envvar:: MIGRAPHX_ENABLE_EXTRA_MLIR
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Enables additional opportunities to use MLIR that may improve performance. Enables additional opportunities to use MLIR that may improve performance.
**MIGRAPHX_COPY_LITERALS** .. envvar:: MIGRAPHX_COPY_LITERALS
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Use ``hip_copy_to_gpu`` with a new ``literal`` instruction rather than use ``hip_copy_literal{}``. Use ``hip_copy_to_gpu`` with a new ``literal`` instruction rather than use ``hip_copy_literal{}``.
...@@ -134,22 +134,22 @@ Use ``hip_copy_to_gpu`` with a new ``literal`` instruction rather than use ``hip ...@@ -134,22 +134,22 @@ Use ``hip_copy_to_gpu`` with a new ``literal`` instruction rather than use ``hip
Compilation traces Compilation traces
---------------------- ----------------------
**MIGRAPHX_TRACE_FINALIZE** .. envvar:: MIGRAPHX_TRACE_FINALIZE
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Debug print instructions during the ``module.finalize()`` step. Debug print instructions during the ``module.finalize()`` step.
**MIGRAPHX_TRACE_COMPILE** .. envvar:: MIGRAPHX_TRACE_COMPILE
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print trace information for the graph compilation process. Print trace information for the graph compilation process.
**MIGRAPHX_TRACE_PASSES** .. envvar:: MIGRAPHX_TRACE_PASSES
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print the compile pass and the program after the pass. Print the compile pass and the program after the pass.
**MIGRAPHX_TIME_PASSES** .. envvar:: MIGRAPHX_TIME_PASSES
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Time the compile passes. Time the compile passes.
...@@ -158,77 +158,77 @@ Time the compile passes. ...@@ -158,77 +158,77 @@ Time the compile passes.
GPU Kernels JIT compilation debugging (applicable for both hiprtc and hipclang) GPU Kernels JIT compilation debugging (applicable for both hiprtc and hipclang)
----------------------------------------- -----------------------------------------
**MIGRAPHX_TRACE_CMD_EXECUTE** .. envvar:: MIGRAPHX_TRACE_CMD_EXECUTE
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print commands executed by the MIGraphX ``process``. Print commands executed by the MIGraphX ``process``.
**MIGRAPHX_TRACE_HIPRTC** .. envvar:: MIGRAPHX_TRACE_HIPRTC
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print HIPRTC options and C++ file executed. Print HIPRTC options and C++ file executed.
**MIGRAPHX_DEBUG_SAVE_TEMP_DIR** .. envvar:: MIGRAPHX_DEBUG_SAVE_TEMP_DIR
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Make it so the created temporary directories are not deleted. Make it so the created temporary directories are not deleted.
**MIGRAPHX_GPU_DEBUG** .. envvar:: MIGRAPHX_GPU_DEBUG
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Internally, this adds the option ``-DMIGRAPHX_DEBUG`` when compiling GPU kernels. It enables assertions and capture of source locations for the errors. Internally, this adds the option ``-DMIGRAPHX_DEBUG`` when compiling GPU kernels. It enables assertions and capture of source locations for the errors.
**MIGRAPHX_GPU_DEBUG_SYM** .. envvar:: MIGRAPHX_GPU_DEBUG_SYM
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Adds the option ``-g`` when compiling HIPRTC. Adds the option ``-g`` when compiling HIPRTC.
**MIGRAPHX_GPU_DUMP_SRC** .. envvar:: MIGRAPHX_GPU_DUMP_SRC
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Dump the HIPRTC source files compiled. Dump the HIPRTC source files compiled.
**MIGRAPHX_GPU_DUMP_ASM** .. envvar:: MIGRAPHX_GPU_DUMP_ASM
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Dump the hip-clang assembly. Dump the hip-clang assembly.
**MIGRAPHX_GPU_OPTIMIZE** .. envvar:: MIGRAPHX_GPU_OPTIMIZE
Set the optimization mode for GPU compile (``-O`` option). Set the optimization mode for GPU compile (``-O`` option).
Defaults to ``-O3``. Defaults to ``-O3``.
**MIGRAPHX_GPU_COMPILE_PARALLEL** .. envvar:: MIGRAPHX_GPU_COMPILE_PARALLEL
Set to the number of threads to use. Set to the number of threads to use.
Compile GPU code in parallel with the given number of threads. Compile GPU code in parallel with the given number of threads.
**MIGRAPHX_TRACE_NARY** .. envvar:: MIGRAPHX_TRACE_NARY
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print the ``nary`` device functions used. Print the ``nary`` device functions used.
**MIGRAPHX_ENABLE_HIPRTC_WORKAROUNDS** .. envvar:: MIGRAPHX_ENABLE_HIPRTC_WORKAROUNDS
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Enable HIPRTC workarounds for bugs in HIPRTC. Enable HIPRTC workarounds for bugs in HIPRTC.
**MIGRAPHX_USE_FAST_SOFTMAX** .. envvar:: MIGRAPHX_USE_FAST_SOFTMAX
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Use the fast softmax optimization. Use the fast softmax optimization.
**MIGRAPHX_ENABLE_NULL_STREAM** .. envvar:: MIGRAPHX_ENABLE_NULL_STREAM
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Allow using null stream for miopen and hipStream. Allow using null stream for miopen and hipStream.
**MIGRAPHX_NSTREAMS** .. envvar:: MIGRAPHX_NSTREAMS
Set to the number of streams to use. Set to the number of streams to use.
Defaults to 1. Defaults to 1.
**MIGRAPHX_TRACE_BENCHMARKING** .. envvar:: MIGRAPHX_TRACE_BENCHMARKING
Set to "1" to print benchmarching trace. Set to "1" to print benchmarching trace.
Set to "2" to print benchmarching trace with more detail. Set to "2" to print benchmarching trace with more detail.
...@@ -236,45 +236,49 @@ Set to "2" to print benchmarching trace with more detail. ...@@ -236,45 +236,49 @@ Set to "2" to print benchmarching trace with more detail.
MLIR vars MLIR vars
------------- -------------
**MIGRAPHX_TRACE_MLIR** .. envvar:: MIGRAPHX_TRACE_MLIR
Set to "1" to trace MLIR and print any failures. Set to "1" to trace MLIR and print any failures.
Set to "2" to additionally print all MLIR operations. Set to "2" to additionally print all MLIR operations.
**MIGRAPHX_MLIR_USE_SPECIFIC_OPS** .. envvar:: MIGRAPHX_MLIR_USE_SPECIFIC_OPS
Set to the name of the operations you want to always use MLIR regardless of GPU architecture. Set to the name of the operations you want to always use MLIR regardless of GPU architecture.
Accepts a list of operators separated by commas (ex: "fused", "convolution", "dot"). Accepts a list of operators separated by commas (ex: "fused", "convolution", "dot").
**MIGRAPHX_MLIR_TUNING_DB** .. envvar:: MIGRAPHX_MLIR_TUNING_DB
Set to the path of the MLIR tuning database to load. Set to the path of the MLIR tuning database to load.
**MIGRAPHX_MLIR_TUNING_CFG** .. envvar:: MIGRAPHX_MLIR_TUNING_CFG
Set to the path of the tuning configuration. Set to the path of the tuning configuration.
Appends to tuning cfg file that could be used with rocMLIR tuning scripts. Appends to tuning cfg file that could be used with rocMLIR tuning scripts.
**MIGRAPHX_MLIR_TUNE_EXHAUSTIVE** .. envvar:: MIGRAPHX_MLIR_TUNE_EXHAUSTIVE
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Do exhaustive tuning for MLIR. Do exhaustive tuning for MLIR.
.. envvar:: MIGRAPHX_MLIR_TUNE_LIMIT
Set to an integer greater than 1.
Limits the number of solutions that MLIR will use for tuning.
CK vars CK vars
----------- -----------
**MIGRAPHX_LOG_CK_GEMM** .. envvar:: MIGRAPHX_LOG_CK_GEMM
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Print Composable Kernels GEMM traces. Print Composable Kernels GEMM traces.
**MIGRAPHX_CK_DEBUG** .. envvar:: MIGRAPHX_CK_DEBUG
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Always add the ``-DMIGRAPHX_CK_CHECK=1`` for compiling Composable Kernels operators. Always add the ``-DMIGRAPHX_CK_CHECK=1`` for compiling Composable Kernels operators.
**MIGRAPHX_TUNE_CK** .. envvar:: MIGRAPHX_TUNE_CK
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Use tuning for Composable Kernels. Use tuning for Composable Kernels.
...@@ -282,19 +286,19 @@ Use tuning for Composable Kernels. ...@@ -282,19 +286,19 @@ Use tuning for Composable Kernels.
Testing Testing
------------ ------------
**MIGRAPHX_TRACE_TEST_COMPILE** .. envvar:: MIGRAPHX_TRACE_TEST_COMPILE
Set to the target that you want to trace the compilation of (ex. "gpu", "cpu"). Set to the target that you want to trace the compilation of (ex. "gpu", "cpu").
Prints the compile trace for the given target for the verify tests. Prints the compile trace for the given target for the verify tests.
This flag shouldn't be used in conjunction with ``MIGRAPHX_TRACE_COMPILE``. This flag shouldn't be used in conjunction with ``MIGRAPHX_TRACE_COMPILE``.
For the verify tests only use ``MIGRAPHX_TRACE_TEST_COMPILE``. For the verify tests only use ``MIGRAPHX_TRACE_TEST_COMPILE``.
**MIGRAPHX_TRACE_TEST** .. envvar:: MIGRAPHX_TRACE_TEST
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Prints the reference and target programs even if the verify passed successfully. Prints the reference and target programs even if the verify passed successfully.
**MIGRAPHX_DUMP_TEST** .. envvar:: MIGRAPHX_DUMP_TEST
Set to "1", "enable", "enabled", "yes", or "true" to use. Set to "1", "enable", "enabled", "yes", or "true" to use.
Dumps verify tests to ``.mxr`` files. Dumps verify tests to ``.mxr`` files.
...@@ -241,6 +241,7 @@ register_migraphx_ops( ...@@ -241,6 +241,7 @@ register_migraphx_ops(
transpose transpose
unary_not unary_not
undefined undefined
unique
unknown unknown
unsqueeze unsqueeze
where where
...@@ -290,6 +291,7 @@ find_package(TBB QUIET) ...@@ -290,6 +291,7 @@ find_package(TBB QUIET)
if(TBB_FOUND) if(TBB_FOUND)
check_execution_par(TBB_HAS_EXECUTION_PAR TBB::tbb) check_execution_par(TBB_HAS_EXECUTION_PAR TBB::tbb)
if(TBB_HAS_EXECUTION_PAR) if(TBB_HAS_EXECUTION_PAR)
list(APPEND PACKAGE_DEPENDS PACKAGE TBB)
target_link_libraries(migraphx PUBLIC TBB::tbb) target_link_libraries(migraphx PUBLIC TBB::tbb)
set(MIGRAPHX_HAS_EXECUTORS_DEFAULT On) set(MIGRAPHX_HAS_EXECUTORS_DEFAULT On)
message(STATUS "Using TBB for parallel execution") message(STATUS "Using TBB for parallel execution")
......
...@@ -21,10 +21,13 @@ ...@@ -21,10 +21,13 @@
* ************************************************************************ */ * ************************************************************************ */
#ifndef MIGRAPHX_GUARD_RTGLIB_BITCAST_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_BITCAST_HPP
#define MIGRAPHX_GUARD_RTGLIB_BITCAST_HPP #define MIGRAPHX_GUARD_RTGLIB_BITCAST_HPP
#include <type_traits>
#if defined(__GNUC__) && !defined(__clang__) #if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif #endif
#include <migraphx/requires.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
...@@ -32,7 +35,10 @@ ...@@ -32,7 +35,10 @@
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
template <typename To, typename From> template <typename To,
typename From,
MIGRAPHX_REQUIRES(std::is_trivially_copyable<To>{} and
std::is_trivially_copyable<From>{})>
inline constexpr To bit_cast(From fr) noexcept inline constexpr To bit_cast(From fr) noexcept
{ {
static_assert(sizeof(To) == sizeof(From)); static_assert(sizeof(To) == sizeof(From));
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_UNIQUE_HPP
#define MIGRAPHX_GUARD_OPERATORS_UNIQUE_HPP
#include <migraphx/shape_for_each.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/tune_axis.hpp>
#include <utility>
#include <map>
#include <limits>
#include <optional>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
// https://onnx.ai/onnx/operators/onnx__Unique.html
// The Onnx spec refers to numpy specification, used as a reference:
// https://numpy.org/doc/stable/reference/generated/numpy.unique.html
// Input : Given an array of elements : X.
// Output(s) :
// 1. Find the unique elements (Y) of input (X).
//
// There are three outputs in addition to the unique elements in Y:
// 2. the indices of the input array that give the unique values
// 3. the indices of the unique array that reconstruct the input array
// 4. the number of times each unique value comes up in the input array
// Optional Attribute: 'Sorted' = 1 for sorted; = 0 for unsorted.
// Onnx specification makes 'sorted' a default, while Numpy always sorts.
//
// Optional Attribute: 'Axis' is 'None' (default) or a valid int < rank(X).
// Negative values are allowed.
//
// Numpy has the following important note on Axis:
// ------------------------------------------------------------------
// When an axis is specified the subarrays indexed by the axis are
// sorted. This is done by making the specified axis the first
// dimension of the array (move the axis to the first dimension to
// keep the order of the other axes) and then flattening the subarrays
// in C order. The flattened subarrays are then viewed as a structured
// type with each element given a label, with the effect that we end
// up with a 1-D array of structured types that can be treated in the
// same way as any other 1-D array. The result is that the flattened
// subarrays are sorted in lexicographic order starting with the first
// element.
// ------------------------------------------------------------------
struct unique
{
template <class T>
auto make_idx_less_fn(const T& data, size_t chunk_sz) const
{
return [&data, chunk_sz](auto idx1, auto idx2) {
return std::lexicographical_compare(data.begin() + idx1,
data.begin() + idx1 + chunk_sz,
data.begin() + idx2,
data.begin() + idx2 + chunk_sz);
};
}
// CASE SORTED:
//
// To process into a sorted unique series of elements/chunks:
// Chunk size == 1 means a simple element; >1 means a flat representation.
// Steps: first go through the input elements/chunks for uniqueness.
// At the end of this processing, per the sorted sequence of unique elements:
// update/create data structures: y, y_indices, x_rev_indices, y_count
//
// INPUT x: [2, 1, 1, 3, 4, 3], attr_sorted = 1;
// OUTPUT(s): indices..
// y_indices: [1, 0, 3, 4] --- first incidence, in terms of index in sequence x
// x_rev_indices: [1, 0, 0, 2, 3, 2] --- x seen in terms of indices of unique sequence y
// y_count: [2, 1, 2, 1] -- count at each y_index. sum = len(x)
// NOTE: y [1, 2, 3, 4] --- the unique output is constructed from x[y_indices[...]]
template <class T>
auto sorted_uniq_indices(const T& input_data, size_t chunk_sz) const
{
struct y_info
{
size_t y_idx;
size_t x_idx;
size_t ct = 0;
};
auto idx_less_fn = make_idx_less_fn(input_data, chunk_sz);
std::map<size_t, y_info, decltype(idx_less_fn)> uniq_val_map(idx_less_fn);
std::tuple<std::vector<std::size_t>, std::vector<std::size_t>, std::vector<std::size_t>> rv;
auto& [y_indices, x_rev_indices, y_count] = rv;
// go through all the elements and find the unique elements..
size_t count_x = input_data.size();
for(size_t f_idx = 0, x_idx = 0; f_idx < count_x; f_idx += chunk_sz, x_idx++)
{
y_info entry = {.y_idx = uniq_val_map.size(), .x_idx = x_idx};
auto [itr, added_new] = uniq_val_map.insert({f_idx, entry});
itr->second.ct++;
x_rev_indices.push_back(itr->second.y_idx);
}
std::vector<std::size_t> y2x_indices(uniq_val_map.size());
y_indices.resize(uniq_val_map.size());
y_count.resize(uniq_val_map.size());
size_t idx = 0;
// the unique elements are now sorted:
// post-processing for all the return indices.
for(const auto& v : uniq_val_map)
{
y2x_indices[v.second.y_idx] = idx;
y_indices[idx] = v.second.x_idx;
y_count[idx] = v.second.ct;
idx++;
}
// update x_rev_indices as per the sorted order of y_indices
for(auto& i : x_rev_indices)
i = y2x_indices[i];
return rv;
}
// CASE UNSORTED:
//
// To process into an un-sorted unique series of elements/chunks:
// For chunk size = 1 is a simple element, else use a flat representation of a tensor obj
// Go through the input elements/chunks one by one with inline processing of indices..
// INPUT x: [2, 1, 1, 3, 4, 3], attr_sorted = 0;
// OUTPUT(s): indices..
// y_indices: [0, 1, 3, 4] --- first incidence, in terms of index in sequence x
// x_rev_indices: [0, 1, 1, 2, 3, 2] --- x seen in terms of indices of unique sequence y
// y_count: [1, 2, 2, 1] -- count at each y_index. sum = len(x)
// NOTE: y [2, 1, 3, 4] --- the unique output is constructed from x[y_indices[...]]
// Output data structures: y_indices, x_rev_indices, y_count are processed inline.
template <class T>
auto unsorted_uniq_indices(const T& input_data, size_t chunk_sz) const
{
auto idx_less_fn = make_idx_less_fn(input_data, chunk_sz);
std::map<size_t, size_t, decltype(idx_less_fn)> uniq_val_map(idx_less_fn);
// rv is used for NVRO below..
std::tuple<std::vector<std::size_t>, std::vector<std::size_t>, std::vector<std::size_t>> rv;
auto& [y_indices, x_rev_indices, y_count] = rv;
// go through all the elements and add the unique elements into the map..
// inline processing for outputs: y_indices, x_rev_indices, y_count
size_t count_x = input_data.size();
for(size_t f_idx = 0; f_idx < count_x; f_idx += chunk_sz)
{
auto [itr, added_new] = uniq_val_map.insert({f_idx, y_indices.size()});
if(added_new)
{
y_count.push_back(0);
y_indices.push_back(x_rev_indices.size());
}
y_count[itr->second]++;
x_rev_indices.push_back(itr->second);
}
return rv;
}
// Axis. Default: none. Range: [-rank, rank-1]
std::optional<int64_t> axis;
// Sorted, Default: 1= sorted. 0 = unsorted.
bool sorted = true;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.axis, "axis"), f(self.sorted, "sorted"));
}
std::string name() const { return "unique"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
auto& sh_x = inputs[0];
auto lens_x = sh_x.lens();
size_t dim_x = sh_x.ndim();
size_t max_uniq_ct = sh_x.elements();
std::vector<shape::dynamic_dimension> d_out;
if(axis)
{
int64_t t_axis = migraphx::tune_axis(dim_x, *axis, name());
if(t_axis != 0)
MIGRAPHX_THROW("Unique: Only supports axis = 0 or None");
d_out = sh_x.to_dynamic().dyn_dims();
// only axis = 0 is supported:
max_uniq_ct = lens_x[0];
// min = 1 unique element; max = full dimension along axis 0
d_out[0] = {1, max_uniq_ct};
}
else
{
d_out.push_back({1, max_uniq_ct});
}
shape sh_y = {sh_x.type(), d_out};
// The three outputted Indices are just 1-D:
shape sh_idx{shape::int64_type, {d_out[0]}};
return {{sh_y, sh_idx, sh_idx, sh_idx}};
}
argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{
auto sh_x = args.front().get_shape();
auto lens_x = sh_x.lens();
shape output_shape = dyn_out.computed_shape;
auto vec_ss = output_shape.sub_shapes();
auto ct_x = sh_x.elements();
shape sh_y = {vec_ss[0].type(), {ct_x}};
shape sh_idx = {vec_ss[1].type(), {ct_x}};
shape sh_x_idx = {vec_ss[1].type(), {ct_x}};
argument res_y{sh_y};
argument res_y_idx{sh_idx};
argument res_x_rev_idx{sh_idx};
argument res_y_ct_idx{sh_idx};
std::vector<size_t> out_y_idx;
std::vector<size_t> out_x_rev_idx;
std::vector<size_t> out_y_ct;
// If axis is not none, for >1D tensors, we have to consider
// then, the uniqueness of chunks of sub-tensors: a subsequence of built-ins..
// For a built-in type, chunk_sz is of course = 1
size_t chunk_sz = 1;
if(axis)
chunk_sz = ct_x / lens_x[0]; // axis = 0 is supported.
visit_all(args.front(), res_y)([&](auto x, auto y_flat) {
using o_type = typename decltype(x)::value_type;
std::vector<o_type> x_in(x.begin(), x.end());
std::tie(out_y_idx, out_x_rev_idx, out_y_ct) =
sorted ? sorted_uniq_indices(x_in, chunk_sz)
: unsorted_uniq_indices(x_in, chunk_sz);
const auto uniq_ct = out_y_idx.size();
// construct y from x[indices] in flattened form
// later we reshape y to the final shape..
auto y_dst = y_flat.begin();
for(size_t idx = 0; idx < uniq_ct; idx++)
y_dst = copy_n(x_in.begin() + out_y_idx[idx] * chunk_sz, chunk_sz, y_dst);
std::vector<size_t> lens_y;
// if axis is specified:
// the output shape keeps the n-1 dimensions of x
if(axis)
{
lens_y = lens_x;
lens_y[0] = uniq_ct;
}
else
{
lens_y = {uniq_ct};
}
sh_y = {sh_y.type(), lens_y};
sh_idx = {sh_idx.type(), {uniq_ct}};
});
visit_all(res_y_idx, res_x_rev_idx, res_y_ct_idx)(
[&](auto y_indices, auto x_rev_indices, auto y_count) {
std::copy(out_y_idx.begin(), out_y_idx.end(), y_indices.begin());
std::copy(out_x_rev_idx.begin(), out_x_rev_idx.end(), x_rev_indices.begin());
std::copy(out_y_ct.begin(), out_y_ct.end(), y_count.begin());
sh_x_idx = {sh_idx.type(), {out_x_rev_idx.size()}};
});
return {{res_y.reshape(sh_y),
res_y_idx.reshape(sh_idx),
res_x_rev_idx.reshape(sh_x_idx),
res_y_ct_idx.reshape(sh_idx)}};
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -139,6 +139,7 @@ ...@@ -139,6 +139,7 @@
#include <migraphx/op/unary.hpp> #include <migraphx/op/unary.hpp>
#include <migraphx/op/unary_not.hpp> #include <migraphx/op/unary_not.hpp>
#include <migraphx/op/undefined.hpp> #include <migraphx/op/undefined.hpp>
#include <migraphx/op/unique.hpp>
#include <migraphx/op/unknown.hpp> #include <migraphx/op/unknown.hpp>
#include <migraphx/op/unsqueeze.hpp> #include <migraphx/op/unsqueeze.hpp>
#include <migraphx/op/where.hpp> #include <migraphx/op/where.hpp>
......
/* /*
* The MIT License (MIT) * The MIT License (MIT)
* *
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal * of this software and associated documentation files (the "Software"), to deal
...@@ -24,21 +24,21 @@ ...@@ -24,21 +24,21 @@
#ifndef MIGRAPHX_GUARD_OPERATORS_TUNE_AXIS_HPP #ifndef MIGRAPHX_GUARD_OPERATORS_TUNE_AXIS_HPP
#define MIGRAPHX_GUARD_OPERATORS_TUNE_AXIS_HPP #define MIGRAPHX_GUARD_OPERATORS_TUNE_AXIS_HPP
#include <utility>
#include <cstdint>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <migraphx/errors.hpp> #include <migraphx/errors.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
inline int tune_axis(const int n_dim, const int axis, const std::string& op_name = "OPERATOR") inline int tune_axis(int n_dim, int axis, const std::string& op_name = "OPERATOR")
{ {
if(axis >= n_dim or std::abs(axis) > n_dim) if(axis < 0)
{ axis += n_dim;
if(axis < 0 or axis >= n_dim)
MIGRAPHX_THROW(to_upper(op_name) + ": axis is out of range."); MIGRAPHX_THROW(to_upper(op_name) + ": axis is out of range.");
}
return (axis < 0) ? axis + n_dim : axis; return axis;
} }
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -127,9 +127,9 @@ struct parse_multinomial : op_parser<parse_multinomial> ...@@ -127,9 +127,9 @@ struct parse_multinomial : op_parser<parse_multinomial>
// use literal. The array populated by random_uniform may have any shape, as long its // use literal. The array populated by random_uniform may have any shape, as long its
// number of elements is batch_size * sample_size . // number of elements is batch_size * sample_size .
size_t batch_size = s0.lens().front(); size_t batch_size = s0.lens().front();
auto rand_dummy = info.add_literal( auto rand_dummy = info.add_literal(migraphx::literal{
migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}}); migraphx::shape{migraphx::shape::float_type, {batch_size, sample_size}},
std::vector<float>(batch_size * sample_size)});
randoms = randoms =
info.add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy); info.add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy);
} }
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/tune_axis.hpp>
#include <optional>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
// generate unique output stream y, given input stream x;
//
// case unsorted:
// input x: [2, 1, 1, 3, 4, 3], attr_sorted = 0;
// output(s):
// y: [2, 1, 3, 4] --- the unique output
// y_indices: [0, 1, 3, 4] --- first incidence, in terms of indices of x
// x_rev_indices: [0, 1, 1, 2, 3, 2] --- x seen in terms of indices of y
// y_count: [1, 2, 2, 1] -- count at each y_index. sum = len(x)
//
// case sorted:
// input x: [2, 1, 1, 3, 4, 3], attr_sorted = 1;
// output(s):
// y: [1, 2, 3, 4] --- the unique output
// y_indices: [1, 0, 3, 4] --- first incidence, in terms of indices of x
// x_rev_indices: [1, 0, 0, 2, 3, 2] --- x seen in terms of indices of y
// y_count: [2, 1, 2, 1] -- count at each y_index. sum = len(x)
struct parse_unique : op_parser<parse_unique>
{
std::vector<op_desc> operators() const { return {{"Unique"}}; }
std::vector<instruction_ref> parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
std::vector<instruction_ref> args) const
{
int64_t sorted = 1; // default = sorted.
if(contains(info.attributes, "sorted"))
sorted = parser.parse_value(info.attributes.at("sorted")).at<int>();
std::optional<int64_t> axis;
if(contains(info.attributes, "axis"))
{
auto n_dim = args[0]->get_shape().ndim();
axis = parser.parse_value(info.attributes.at("axis")).at<int>();
axis = tune_axis(n_dim, *axis, opd.op_name);
}
migraphx::argument data_arg = args.back()->eval();
auto opr = axis ? migraphx::make_op("unique", {{"axis", *axis}, {"sorted", sorted}})
: migraphx::make_op("unique", {{"sorted", sorted}});
auto u_opr = info.add_instruction(opr, args.at(0));
auto i_y = info.add_instruction(make_op("get_tuple_elem", {{"index", 0}}), u_opr);
auto i_y_idx = info.add_instruction(make_op("get_tuple_elem", {{"index", 1}}), u_opr);
auto i_x_idx = info.add_instruction(make_op("get_tuple_elem", {{"index", 2}}), u_opr);
auto i_count = info.add_instruction(make_op("get_tuple_elem", {{"index", 3}}), u_opr);
return {i_y, i_y_idx, i_x_idx, i_count};
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -68,6 +68,7 @@ dnnl::memory::data_type to_dnnl_memory_data_type(shape::type_t t) ...@@ -68,6 +68,7 @@ dnnl::memory::data_type to_dnnl_memory_data_type(shape::type_t t)
case st::int32_type: return dt::s32; case st::int32_type: return dt::s32;
case st::int8_type: return dt::s8; case st::int8_type: return dt::s8;
case st::uint8_type: return dt::u8; case st::uint8_type: return dt::u8;
case st::fp8e4m3fnuz_type: MIGRAPHX_THROW("fp8e4m3fnuz unsupported in DNNL");
default: MIGRAPHX_THROW("Unsupported data type"); default: MIGRAPHX_THROW("Unsupported data type");
} }
} }
......
...@@ -340,7 +340,6 @@ struct cpu_apply ...@@ -340,7 +340,6 @@ struct cpu_apply
{"reduce_min", "reduction_min"}, {"reduce_min", "reduction_min"},
{"reduce_sum", "reduction_sum"}, {"reduce_sum", "reduction_sum"},
}); });
extend_op("concat", "dnnl::concat"); extend_op("concat", "dnnl::concat");
extend_op("contiguous", "dnnl::reorder"); extend_op("contiguous", "dnnl::reorder");
extend_op("convolution", "dnnl::convolution"); extend_op("convolution", "dnnl::convolution");
...@@ -376,6 +375,12 @@ struct cpu_apply ...@@ -376,6 +375,12 @@ struct cpu_apply
// Apply these operators first so the inputs can be const folded // Apply these operators first so the inputs can be const folded
for(auto it : iterator_for(*modl)) for(auto it : iterator_for(*modl))
{ {
// skip lowering if input has fp8 as one of the inputs since oneDNN doesn't have fp8
// supported yet.
if(std::any_of(it->inputs().begin(), it->inputs().end(), [](const auto& i) {
return i->get_shape().type() == migraphx::shape::fp8e4m3fnuz_type;
}))
continue;
if(it->name() == "pow") if(it->name() == "pow")
{ {
apply_pow(it); apply_pow(it);
...@@ -383,6 +388,12 @@ struct cpu_apply ...@@ -383,6 +388,12 @@ struct cpu_apply
} }
for(auto it : iterator_for(*modl)) for(auto it : iterator_for(*modl))
{ {
// skip lowering if input has fp8 as one of the inputs since oneDNN doesn't have fp8
// supported yet.
if(std::any_of(it->inputs().begin(), it->inputs().end(), [](const auto& i) {
return i->get_shape().type() == migraphx::shape::fp8e4m3fnuz_type;
}))
continue;
if(it->name() == "pooling") if(it->name() == "pooling")
{ {
apply_pooling(it); apply_pooling(it);
......
...@@ -54,6 +54,11 @@ vectorize vectorize::elements(std::size_t axis, ...@@ -54,6 +54,11 @@ vectorize vectorize::elements(std::size_t axis,
const std::vector<shape>& inputs, const std::vector<shape>& inputs,
const std::vector<std::size_t>& sizes) const std::vector<std::size_t>& sizes)
{ {
// disable vectorization for fp8 types
if(std::any_of(inputs.begin(), inputs.end(), [&](auto ishape) {
return ishape.type() == migraphx::shape::fp8e4m3fnuz_type;
}))
return {1, axis};
if(std::all_of( if(std::all_of(
inputs.begin(), inputs.end(), [&](const auto& s) { return s.lens()[axis] == 1; })) inputs.begin(), inputs.end(), [&](const auto& s) { return s.lens()[axis] == 1; }))
return {1, axis}; return {1, axis};
...@@ -86,6 +91,11 @@ vectorize vectorize::elements(std::size_t axis, ...@@ -86,6 +91,11 @@ vectorize vectorize::elements(std::size_t axis,
vectorize vectorize::elements(context& ctx, std::size_t axis, const std::vector<shape>& inputs) vectorize vectorize::elements(context& ctx, std::size_t axis, const std::vector<shape>& inputs)
{ {
// disable vectorization for fp8 types
if(std::any_of(inputs.begin(), inputs.end(), [&](auto ishape) {
return ishape.type() == migraphx::shape::fp8e4m3fnuz_type;
}))
return {1, axis};
if(inputs.empty()) if(inputs.empty())
return {1, axis}; return {1, axis};
std::size_t n = std::max_element(inputs.begin(), std::size_t n = std::max_element(inputs.begin(),
......
...@@ -38,6 +38,18 @@ namespace gpu { ...@@ -38,6 +38,18 @@ namespace gpu {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_EXTRA_MLIR); MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_EXTRA_MLIR);
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MLIR); MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MLIR);
/**
* @brief Declares a new MIGraphX environment variable which forces to generate
* only specific MLIR operations.
*
* The variable, if defined, forces MIGraphX to use only specific operations
* with MLIR regardless of the underlying GPU architecture. The variable accepts
* a list of operations separated by comma. The variable recognizes the following
* operations: "fused", "convolution", "dot". If the variable is not defined MIGraphX
* will decide by itself which operations to delegate to MLIR. The variable is
* intended to be primarily used by rocMLIR developers.
*/
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_MLIR_USE_SPECIFIC_OPS);
bool mlir_enabled() bool mlir_enabled()
{ {
...@@ -49,6 +61,26 @@ bool mlir_enabled() ...@@ -49,6 +61,26 @@ bool mlir_enabled()
#endif #endif
} }
static bool is_requested(std::string_view option, bool fallback = false)
{
auto string_value = string_value_of(MIGRAPHX_MLIR_USE_SPECIFIC_OPS{}, "");
if(string_value.empty())
return fallback;
const auto options = split_string(string_value, ',');
return contains(options, option);
}
bool mlir_attention_enabled()
{
#ifdef MIGRAPHX_MLIR
if(not mlir_enabled())
return false;
return is_requested("attention");
#else
return false;
#endif
}
#ifdef MIGRAPHX_MLIR #ifdef MIGRAPHX_MLIR
struct mlir_op struct mlir_op
...@@ -62,31 +94,20 @@ struct mlir_op ...@@ -62,31 +94,20 @@ struct mlir_op
return pack(f(self.op, "op")); return pack(f(self.op, "op"));
} }
shape compute_shape(std::vector<shape> inputs, const std::vector<module_ref>& mods) const shape compute_shape(const std::vector<shape>& inputs, const std::vector<module_ref>& mods) const
{ {
module_ref mod = mods[0];
check_shapes{inputs, *this}.packed_or_broadcasted(); check_shapes{inputs, *this}.packed_or_broadcasted();
if(mods.size() != 1) if(mods.size() != 1)
MIGRAPHX_THROW("should have one submodule."); MIGRAPHX_THROW("should have one submodule.");
if(inputs.size() < 2) if(inputs.size() < 2)
MIGRAPHX_THROW("should have at least two inputs."); MIGRAPHX_THROW("should have at least two inputs.");
module_ref mod = mods[0]; auto type = mod->get_output_shapes().front().type();
auto type = mod->get_output_shapes().front().type();
std::unordered_map<instruction_ref, shape> ins_shapes; std::unordered_map<instruction_ref, shape> ins_shapes;
size_t param_cnt = 0;
std::vector<std::string> names = mod->get_parameter_names();
std::sort(names.begin(), names.end());
for(const std::string& param_name : names)
{
ins_shapes[mod->get_parameter(param_name)] = inputs[param_cnt++];
}
for(auto ins : iterator_for(*mod)) for(auto ins : iterator_for(*mod))
{ {
if(ins->name() == "@param") if(ins->name() == "@literal" or ins->name() == "@param")
{
continue;
}
if(ins->name() == "@literal")
{ {
ins_shapes[ins] = ins->get_shape(); ins_shapes[ins] = ins->get_shape();
continue; continue;
...@@ -112,38 +133,48 @@ struct mlir_op ...@@ -112,38 +133,48 @@ struct mlir_op
MIGRAPHX_REGISTER_OP(mlir_op); MIGRAPHX_REGISTER_OP(mlir_op);
namespace { namespace {
std::tuple<instruction_ref, std::vector<operation>>
get_fusable_input_op_stream(instruction_ref lower_input)
{
instruction_ref upper_input = lower_input;
std::vector<operation> op_stream;
while(
contains({"slice", "transpose", "contiguous", "reshape", "squeeze", "flatten", "unsqueeze"},
upper_input->name()))
{
operation op = upper_input->get_operator();
if(contains({"squeeze", "flatten", "unsqueeze"}, upper_input->name()))
{
op = migraphx::make_op("reshape", {{"dims", upper_input->get_shape().lens()}});
}
op_stream.push_back(op);
upper_input = upper_input->inputs().at(0);
}
return {upper_input, op_stream};
}
std::tuple<instruction_ref, std::vector<instruction_ref>> std::tuple<instruction_ref, std::vector<instruction_ref>>
fuse_input_ops_and_gemm_based_op(module_ref mm, instruction_ref gemm_based_op) fuse_input_ops_and_gemm_based_op(module_ref mm,
const std::vector<instruction_ref>& gemm_based_op_inputs,
const operation& gemm_based_op)
{ {
std::vector<instruction_ref> top_inputs; std::vector<instruction_ref> top_inputs;
std::vector<instruction_ref> imm_inputs; std::vector<instruction_ref> imm_inputs;
size_t input_cnt = 0; size_t input_cnt = 0;
for(instruction_ref input : gemm_based_op->inputs()) for(instruction_ref input : gemm_based_op_inputs)
{ {
std::vector<operation> op_stream; auto [upper_input, op_stream] = get_fusable_input_op_stream(input);
while(contains( top_inputs.push_back(upper_input);
{"slice", "transpose", "contiguous", "reshape", "squeeze", "flatten", "unsqueeze"},
input->name()))
{
operation op = input->get_operator();
if(contains({"squeeze", "flatten", "unsqueeze"}, input->name()))
{
op = migraphx::make_op("reshape", {{"dims", input->get_shape().lens()}});
}
op_stream.push_back(op);
input = input->inputs().at(0);
}
top_inputs.push_back(input);
instruction_ref prev_input = instruction_ref prev_input =
mm->add_parameter("y" + std::to_string(input_cnt++), input->get_shape()); mm->add_parameter("y" + std::to_string(input_cnt++), upper_input->get_shape());
for(const auto& op : reverse(op_stream)) for(const auto& op : reverse(op_stream))
{ {
prev_input = mm->add_instruction(op, {prev_input}); prev_input = mm->add_instruction(op, {prev_input});
} }
imm_inputs.push_back(prev_input); imm_inputs.push_back(prev_input);
} }
instruction_ref new_gemm_based_op = instruction_ref new_gemm_based_op = mm->add_instruction(gemm_based_op, imm_inputs);
mm->add_instruction(gemm_based_op->get_operator(), imm_inputs);
return {new_gemm_based_op, top_inputs}; return {new_gemm_based_op, top_inputs};
} }
...@@ -205,102 +236,135 @@ auto is_mlir_conv(mlir_mode mode) ...@@ -205,102 +236,135 @@ auto is_mlir_conv(mlir_mode mode)
}); });
} }
struct find_mlir_fused_ops std::unordered_map<instruction_ref, instruction_ref>
create_param_map_with_literals(module_ref mm, const module* pm, const shape& shape)
{ {
mlir_mode conv_mode = mlir_mode::none; std::unordered_map<instruction_ref, instruction_ref> ins_map;
mlir_mode dot_mode = mlir_mode::none; for(auto ins : iterator_for(*pm))
auto matcher() const
{ {
auto dot_or_conv = match::skip(match::name("contiguous"))( if(ins->name() != "@literal")
match::any_of(is_mlir_dot(dot_mode), is_mlir_conv(conv_mode)).bind("gemm_based_op"));
return match::name("pointwise")(match::any_of[match::inputs()](dot_or_conv.bind("x")));
}
std::unordered_map<instruction_ref, instruction_ref>
create_param_map_with_literals(module_ref mm, const module* pm, const shape& shape) const
{
std::unordered_map<instruction_ref, instruction_ref> ins_map;
for(auto ins : iterator_for(*pm))
{ {
if(ins->name() != "@literal") continue;
{
continue;
}
literal r = ins->get_literal();
instruction_ref literal = mm->add_literal(r);
instruction_ref mbcast = mm->add_instruction(
make_op("multibroadcast", {{"out_lens", shape.lens()}}), literal);
ins_map[ins] = mbcast;
} }
return ins_map; literal r = ins->get_literal();
instruction_ref literal = mm->add_literal(r);
instruction_ref mbcast =
mm->add_instruction(make_op("multibroadcast", {{"out_lens", shape.lens()}}), literal);
ins_map[ins] = mbcast;
} }
return ins_map;
}
// Whitelist supported fusion options, including imposing type constraints std::vector<instruction_ref>
// for cases where MLIR only supports an operation (usually a pointwise function) fold_pointwise_mod(instruction_ref pm_ins,
// on particular types. module_ref parent_mod,
bool is_pointwise_op_supported_by_mlir(const instruction& i) const const std::unordered_map<instruction_ref, instruction_ref>& ins_map)
{
auto* pm = pm_ins->module_inputs().front();
auto names = pm->get_parameter_names();
std::sort(names.begin(), names.end());
std::unordered_map<instruction_ref, instruction_ref> param_map =
create_param_map_with_literals(parent_mod, pm, pm_ins->get_shape());
std::transform(names.begin(),
names.end(),
pm_ins->inputs().begin(),
std::inserter(param_map, param_map.end()),
[&](auto name, auto input) {
if(ins_map.count(input))
return std::make_pair(pm->get_parameter(name), ins_map.at(input));
return std::make_pair(pm->get_parameter(name),
parent_mod->add_parameter(name, input->get_shape()));
});
return parent_mod->insert_instructions(parent_mod->end(), pm, param_map);
}
// Whitelist supported fusion options, including imposing type constraints
// for cases where MLIR only supports an operation (usually a pointwise function)
// on particular types.
bool is_pointwise_op_supported_by_mlir(const instruction& i)
{
using type_t = shape::type_t;
const auto& name = i.name();
const auto result_type = i.get_shape().type();
const std::initializer_list<type_t> allowed_types = {type_t::float_type,
type_t::half_type,
type_t::int8_type,
type_t::int32_type,
type_t::bool_type};
// Preliminary type check.
if(not contains(allowed_types, result_type))
{ {
using type_t = shape::type_t;
const auto& name = i.name();
const auto result_type = i.get_shape().type();
const std::initializer_list<type_t> allowed_types = {type_t::float_type,
type_t::half_type,
type_t::int8_type,
type_t::int32_type,
type_t::bool_type};
// Preliminary type check.
if(not contains(allowed_types, result_type))
{
return false;
}
const std::initializer_list<std::string> any_type_ops = {"@literal", "@param", "@return"};
const std::initializer_list<std::string> no_bool_ops = {
"convolution",
"quant_convolution",
"dot",
"quant_dot",
"add",
"clip",
"relu",
"sub",
"mul",
"div",
"pow",
"where",
"quantizelinear",
"dequantizelinear",
"abs",
"neg",
};
const std::initializer_list<std::string> fp_only_ops = {
"ceil",
"erf",
"exp",
"floor",
"log",
"recip",
"rsqrt",
"sigmoid",
"softmax",
"tanh",
};
bool is_float = contains({type_t::float_type, type_t::half_type}, result_type);
if(contains(any_type_ops, name))
return true;
if(result_type != type_t::bool_type and contains(no_bool_ops, name))
return true;
if(is_float and contains(fp_only_ops, name))
return true;
// Only conversions between floating types are known to be unambigiously
// supported.
if(is_float and name == "convert")
{
return std::all_of(i.inputs().begin(), i.inputs().end(), [](const auto& arg) {
return contains({type_t::float_type, type_t::half_type}, arg->get_shape().type());
});
}
return false; return false;
} }
const std::initializer_list<std::string> any_type_ops = {"@literal", "@param", "@return"};
const std::initializer_list<std::string> no_bool_ops = {
"convolution",
"quant_convolution",
"dot",
"quant_dot",
"add",
"clip",
"relu",
"sub",
"mul",
"div",
"pow",
"where",
"quantizelinear",
"dequantizelinear",
"abs",
"neg",
};
const std::initializer_list<std::string> fp_only_ops = {
"ceil",
"erf",
"exp",
"floor",
"log",
"recip",
"rsqrt",
"sigmoid",
"softmax",
"tanh",
};
bool is_float = contains({type_t::float_type, type_t::half_type}, result_type);
if(contains(any_type_ops, name))
return true;
if(result_type != type_t::bool_type and contains(no_bool_ops, name))
return true;
if(is_float and contains(fp_only_ops, name))
return true;
// Only conversions between floating types are known to be unambigiously
// supported.
if(is_float and name == "convert")
{
return std::all_of(i.inputs().begin(), i.inputs().end(), [](const auto& arg) {
return contains({type_t::float_type, type_t::half_type}, arg->get_shape().type());
});
}
return false;
}
MIGRAPHX_PRED_MATCHER(mlir_pointwise, instruction_ref ins)
{
if(ins->name() != "pointwise")
return false;
auto* pm = ins->module_inputs().front();
return std::all_of(pm->begin(), pm->end(), [&](const auto& i) {
return is_pointwise_op_supported_by_mlir(i);
});
}
struct find_mlir_fused_ops
{
mlir_mode conv_mode = mlir_mode::none;
mlir_mode dot_mode = mlir_mode::none;
auto matcher() const
{
auto dot_or_conv = match::skip(match::name("contiguous"))(
match::any_of(is_mlir_dot(dot_mode), is_mlir_conv(conv_mode)).bind("gemm_based_op"));
return mlir_pointwise()(match::any_of[match::inputs()](dot_or_conv.bind("x")));
}
void apply(module_pass_manager& mpm, const match::matcher_result& r) const void apply(module_pass_manager& mpm, const match::matcher_result& r) const
{ {
...@@ -309,29 +373,12 @@ struct find_mlir_fused_ops ...@@ -309,29 +373,12 @@ struct find_mlir_fused_ops
auto x_ins = r.instructions["x"]; // input after contiguous auto x_ins = r.instructions["x"]; // input after contiguous
auto* pm = ins->module_inputs().front(); auto* pm = ins->module_inputs().front();
auto names = pm->get_parameter_names(); auto names = pm->get_parameter_names();
// Whitelist pointwise operators.
if(std::any_of(pm->begin(), pm->end(), [&](const auto& i) {
return not is_pointwise_op_supported_by_mlir(i);
}))
return;
std::sort(names.begin(), names.end()); std::sort(names.begin(), names.end());
module_ref mm = mpm.create_module("mlir_" + pm->name()); module_ref mm = mpm.create_module("mlir_" + pm->name());
mm->set_bypass(); mm->set_bypass();
std::unordered_map<instruction_ref, instruction_ref> param_map = auto [anchor_op, top_inputs] = fuse_input_ops_and_gemm_based_op(
create_param_map_with_literals(mm, pm, gemm_based_op->get_shape()); mm, gemm_based_op->inputs(), gemm_based_op->get_operator());
auto [anchor_op, top_inputs] = fuse_input_ops_and_gemm_based_op(mm, gemm_based_op); mm->add_return(fold_pointwise_mod(ins, mm, {{x_ins, anchor_op}}));
std::transform(names.begin(),
names.end(),
ins->inputs().begin(),
std::inserter(param_map, param_map.end()),
[&, &anchor = anchor_op](auto name, auto input) {
if(input == x_ins)
return std::make_pair(pm->get_parameter(name), anchor);
return std::make_pair(pm->get_parameter(name),
mm->add_parameter(name, input->get_shape()));
});
mm->add_return(mm->insert_instructions(mm->end(), pm, param_map));
std::vector<instruction_ref> inputs; std::vector<instruction_ref> inputs;
std::copy_if(ins->inputs().begin(), std::copy_if(ins->inputs().begin(),
...@@ -349,52 +396,103 @@ struct find_mlir_standalone_op ...@@ -349,52 +396,103 @@ struct find_mlir_standalone_op
{ {
mlir_mode mode = mlir_mode::none; mlir_mode mode = mlir_mode::none;
auto matcher() const { return Matcher(mode); } auto matcher() const { return Matcher(mode); }
void apply(module_pass_manager& mpm, const match::matcher_result& r) const void apply(module_pass_manager& mpm, const match::matcher_result& r) const
{ {
auto conv_based_op = r.result; auto gemm_based_op = r.result;
//
// enable only for fp32/fp16/i8 types // enable only for fp32/fp16/i8 types
if(std::any_of(conv_based_op->inputs().begin(), conv_based_op->inputs().end(), [&](auto i) { if(std::any_of(gemm_based_op->inputs().begin(), gemm_based_op->inputs().end(), [&](auto i) {
return not contains( return not contains(
{shape::type_t::float_type, shape::type_t::half_type, shape::type_t::int8_type}, {shape::type_t::float_type, shape::type_t::half_type, shape::type_t::int8_type},
i->get_shape().type()); i->get_shape().type());
})) }))
return; return;
static size_t counter = 0; static size_t counter = 0;
module_ref mm = module_ref mm =
mpm.create_module("mlir_" + conv_based_op->name() + std::to_string(counter++)); mpm.create_module("mlir_" + gemm_based_op->name() + std::to_string(counter++));
mm->set_bypass(); mm->set_bypass();
auto [anchor_op, top_inputs] = fuse_input_ops_and_gemm_based_op(mm, conv_based_op); auto [anchor_op, top_inputs] = fuse_input_ops_and_gemm_based_op(
mm, gemm_based_op->inputs(), gemm_based_op->get_operator());
mm->add_return({anchor_op}); mm->add_return({anchor_op});
mpm.get_module().replace_instruction( mpm.get_module().replace_instruction(
conv_based_op, mlir_op{conv_based_op->get_operator()}, top_inputs, {mm}); gemm_based_op, mlir_op{gemm_based_op->get_operator()}, top_inputs, {mm});
} }
}; };
using find_mlir_standalone_convolution_op = find_mlir_standalone_op<&is_mlir_conv>; using find_mlir_standalone_convolution_op = find_mlir_standalone_op<&is_mlir_conv>;
using find_mlir_standalone_dot_op = find_mlir_standalone_op<&is_mlir_dot>; using find_mlir_standalone_dot_op = find_mlir_standalone_op<&is_mlir_dot>;
/** struct find_mlir_standalone_attention_op
* @brief Declares a new MIGraphX environment variable which forces to generate {
* only specific MLIR operations. auto matcher() const
* {
* The variable, if defined, forces MIGraphX to use only specific operations return match::name("gpu::pre_gemm_softmax_gemm").bind("gemm_softmax_gemm");
* with MLIR regardless of the underlying GPU architecture. The variable accepts }
* a list of operations separated by comma. The variable recognizes the following
* operations: "fused", "convolution", "dot". If the variable is not defined MIGraphX void apply(module_pass_manager& mpm, const match::matcher_result& r) const
* will decide by itself which operations to delegate to MLIR. The variable is {
* intended to be primarily used by rocMLIR developers. static size_t counter = 0;
*/ module_ref mm = mpm.create_module("mlir_" + std::to_string(counter++));
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_MLIR_USE_SPECIFIC_OPS); auto gemm_softmax_gemm = r.instructions["gemm_softmax_gemm"];
std::vector<instruction_ref> inputs;
mm->set_bypass();
bool is_requested(std::string_view option, bool fallback = false) std::unordered_map<instruction_ref, instruction_ref> ins_map;
auto gemm0_inputs = gemm_softmax_gemm->inputs();
gemm0_inputs.pop_back();
auto [gemm0, top_gemm0_inputs] =
fuse_input_ops_and_gemm_based_op(mm, gemm0_inputs, make_op("dot"));
inputs.insert(inputs.begin(), top_gemm0_inputs.begin(), top_gemm0_inputs.end());
// handle scale
auto v = gemm_softmax_gemm->get_operator().to_value();
assert(v.contains("scale"));
auto scale = v.at("scale").to<float>();
auto scale_lit = mm->add_literal(literal{shape{gemm0->get_shape().type()}, {scale}});
instruction_ref scale_lit_mbcast = mm->add_instruction(
make_op("multibroadcast", {{"out_lens", gemm0->get_shape().lens()}}), scale_lit);
auto scaled_gemm0 = mm->add_instruction(make_op("mul"), gemm0, scale_lit_mbcast);
auto softmax = mm->add_instruction(
make_op("softmax", {{"axis", gemm0->get_shape().lens().size() - 1}}), scaled_gemm0);
auto [old_upper_v, upper_v_op_stream] =
get_fusable_input_op_stream(gemm_softmax_gemm->inputs()[2]);
instruction_ref new_upper_v = mm->add_parameter("z", old_upper_v->get_shape());
for(const auto& op : reverse(upper_v_op_stream))
{
new_upper_v = mm->add_instruction(op, {new_upper_v});
}
inputs.push_back(old_upper_v);
auto gemm1 = mm->add_instruction(make_op("dot"), {softmax, new_upper_v});
ins_map[gemm_softmax_gemm] = gemm1;
auto ins_to_replace = gemm1;
auto ins_to_be_replaced = gemm_softmax_gemm;
if(r.instructions.find("trailing_pm") != r.instructions.end())
{
ins_to_replace = fold_pointwise_mod(r.instructions["trailing_pm"], mm, ins_map)[0];
std::copy_if(r.instructions["trailing_pm"]->inputs().begin(),
r.instructions["trailing_pm"]->inputs().end(),
std::back_inserter(inputs),
[&](auto input) { return input != gemm_softmax_gemm; });
ins_to_be_replaced = r.instructions["trailing_pm"];
}
mm->add_return({ins_to_replace});
mpm.get_module().replace_instruction(
ins_to_be_replaced, mlir_op{gemm1->get_operator()}, inputs, {mm});
}
};
struct find_mlir_attention_fused_ops : public find_mlir_standalone_attention_op
{ {
auto string_value = string_value_of(MIGRAPHX_MLIR_USE_SPECIFIC_OPS{}, ""); auto matcher() const
if(string_value.empty()) {
return fallback; auto standalone_matcher = find_mlir_standalone_attention_op::matcher();
const auto options = split_string(string_value, ','); return mlir_pointwise()(
return contains(options, option); match::any_of[match::inputs()](standalone_matcher).bind("trailing_pm"));
} ;
}
};
} // namespace } // namespace
#endif // MIGRAPHX_MLIR #endif // MIGRAPHX_MLIR
...@@ -416,6 +514,13 @@ void fuse_mlir::apply(module_pass_manager& mpm) const ...@@ -416,6 +514,13 @@ void fuse_mlir::apply(module_pass_manager& mpm) const
mlir_mode mode = mlir_mode mode =
(enabled(MIGRAPHX_ENABLE_EXTRA_MLIR{}) or enable_extra) ? mlir_mode::fast : mlir_mode::none; (enabled(MIGRAPHX_ENABLE_EXTRA_MLIR{}) or enable_extra) ? mlir_mode::fast : mlir_mode::none;
// Attention offloads; default disabled
if(mlir_attention_enabled())
{
match::find_matches(mpm, find_mlir_attention_fused_ops{});
match::find_matches(mpm, find_mlir_standalone_attention_op{});
}
match::find_matches(mpm, match::find_matches(mpm,
find_mlir_fused_ops{.conv_mode = get_mode("fused", mlir_mode::fast), find_mlir_fused_ops{.conv_mode = get_mode("fused", mlir_mode::fast),
.dot_mode = get_mode("fused", mode)}); .dot_mode = get_mode("fused", mode)});
......
...@@ -34,10 +34,11 @@ struct module_pass_manager; ...@@ -34,10 +34,11 @@ struct module_pass_manager;
namespace gpu { namespace gpu {
MIGRAPHX_GPU_EXPORT bool mlir_enabled(); MIGRAPHX_GPU_EXPORT bool mlir_enabled();
MIGRAPHX_GPU_EXPORT bool mlir_attention_enabled();
struct MIGRAPHX_GPU_EXPORT fuse_mlir struct MIGRAPHX_GPU_EXPORT fuse_mlir
{ {
context* ctx = nullptr; context* ctx = nullptr;
bool enable_extra = false; bool enable_extra = false;
std::string name() const { return "gpu::fuse_mlir"; } std::string name() const { return "gpu::fuse_mlir"; }
void apply(module_pass_manager& mpm) const; void apply(module_pass_manager& mpm) const;
......
...@@ -66,6 +66,10 @@ struct gemm_softmax_gemm ...@@ -66,6 +66,10 @@ struct gemm_softmax_gemm
} }
static bool is_ck_supported_type(shape::type_t t) { return contains({shape::half_type}, t); } static bool is_ck_supported_type(shape::type_t t) { return contains({shape::half_type}, t); }
static bool is_mlir_supported_type(shape::type_t t)
{
return contains({shape::type_t::float_type, shape::half_type}, t);
}
}; };
} // namespace gpu } // namespace gpu
......
/* ************************************************************************
* Copyright (C) 2016-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
* ies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
* PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
* CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ************************************************************************ */
#ifndef MIGRAPHX_GUARD_KERNELS_BITCAST_HPP
#define MIGRAPHX_GUARD_KERNELS_BITCAST_HPP
#include <migraphx/kernels/type_traits.hpp>
namespace migraphx {
template <typename To,
typename From,
MIGRAPHX_REQUIRES(is_trivially_copyable<To>{} and is_trivially_copyable<From>{})>
inline constexpr To bit_cast(From fr) noexcept
{
static_assert(sizeof(To) == sizeof(From));
return __builtin_bit_cast(To, fr);
}
} // namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_BITCAST_HPP
/* ************************************************************************
* Copyright (C) 2016-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
* ies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
* PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
* CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ************************************************************************ */
#ifndef MIGRAPHX_GUARD_KERNELS_FLOAT8_HPP
#define MIGRAPHX_GUARD_KERNELS_FLOAT8_HPP
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wfloat-equal"
#pragma clang diagnostic ignored "-Wc++20-extensions" // required for "asm" inside constexpr
#endif // __clang__
// We are clipping in down conversion by default
#define MIGRAPHX_F8_DOWNCAST_CLIPPING 1 // NOLINT
#include <migraphx/kernels/types.hpp>
#include <migraphx/kernels/type_traits.hpp>
#include <migraphx/kernels/float8_impl.hpp>
namespace migraphx {
namespace fp8 {
enum class rounding_mode
{
standard, // standard rounding is doing RNE -- round to nearest even
stochastic
};
enum class f8_type
{
bf8 = 0, // s1e5m2
fp8 = 1 // s1e4m3
};
template <typename T>
class numeric_limits;
template <migraphx::fp8::f8_type T = migraphx::fp8::f8_type::fp8, bool FNUZ = true>
struct float8
{
uint8_t data;
// default constructor
__device__ constexpr float8() = default;
// default copy constructor
__device__ constexpr float8(const float8& y) = default;
struct from_bits_t
{
};
static constexpr __device__ from_bits_t from_bits() { return from_bits_t(); }
__device__ explicit constexpr float8(uint8_t bits, from_bits_t) : data(bits) {}
#if defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)
// device specific optimized F8 down-conversion code
template <bool stochastic_rounding = false>
static __device__ uint8_t cast_to_f8_from_f32(float v, uint32_t rng = 0)
{
uint8_t i8data = 0x00;
union
{
float fval;
uint32_t i32val;
uint8_t i8val[4]; // NOTE: not endian independent
} val;
uint32_t ival = 0;
val.fval = v;
#ifdef MIGRAPHX_F8_DOWNCAST_CLIPPING
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
if((val.i32val & 0x7F800000) != 0x7F800000) /// propagate NAN/INF, no clipping
val.fval = __builtin_amdgcn_fmed3f(val.fval, 240.0, -240.0);
}
else
{
if((val.i32val & 0x7F800000) != 0x7F800000) // propagate NAN/INF, no clipping
val.fval = __builtin_amdgcn_fmed3f(val.fval, 57344.0, -57344.0);
}
#endif
if(stochastic_rounding)
{
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
ival = __builtin_amdgcn_cvt_sr_fp8_f32(val.fval, rng, ival, 0); // 0 pos
}
else
{
ival = __builtin_amdgcn_cvt_sr_bf8_f32(val.fval, rng, ival, 0); // 0 pos
}
}
else // RNE CVT
{
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
ival = __builtin_amdgcn_cvt_pk_fp8_f32(
val.fval, val.fval, ival, false); // false -> WORD0
}
else
{
ival = __builtin_amdgcn_cvt_pk_bf8_f32(
val.fval, val.fval, ival, false); // false -> WORD0}
}
}
val.i32val = ival;
i8data = val.i8val[0]; // little endian
return i8data;
}
#endif // __gfx940__
// constructor from float
#if defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)
// NOTE: ON-DEVICE... always optimal bias
explicit constexpr __device__
float8(const float v,
migraphx::fp8::rounding_mode rm = migraphx::fp8::rounding_mode::standard,
uint32_t rng = 0)
{
if(__builtin_is_constant_evaluated())
{
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
#ifdef MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<3, 4, float, FNUZ /*negative_zero_nan*/, true /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#else // MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<3, 4, float, FNUZ /*negative_zero_nan*/, false /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#endif // MIGRAPHX_F8_DOWNCAST_CLIPPING
}
else
{
#ifdef MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<2, 5, float, FNUZ /*negative_zero_nan*/, true /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#else // MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<2, 5, float, FNUZ /*negative_zero_nan*/, false /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#endif // MIGRAPHX_FP8_DOWNCAST_CLIPPING}
}
}
else
{
// runtime branch, use cast_to_f8_from_f32 if want to avoid it
if(rm == migraphx::fp8::rounding_mode::stochastic)
data = cast_to_f8_from_f32<true>(v, rng);
else
data = cast_to_f8_from_f32<false>(v);
}
}
#else
// DEVICE for non-gfx940 using s/w simulation
explicit constexpr __device__
float8(const float v,
migraphx::fp8::rounding_mode rm = migraphx::fp8::rounding_mode::standard,
uint32_t rng = 0)
{
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
#ifdef MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<3, 4, float, FNUZ /*negative_zero_nan*/, true /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#else // MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<3, 4, float, FNUZ /*negative_zero_nan*/, false /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#endif // MIGRAPHX_F8_DOWNCAST_CLIPPING
}
else
{
#ifdef MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<2, 5, float, FNUZ /*negative_zero_nan*/, true /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#else // MIGRAPHX_F8_DOWNCAST_CLIPPING
data = migraphx::fp8::impl::
cast_to_f8<2, 5, float, FNUZ /*negative_zero_nan*/, false /*clip*/>(
v, (rm == migraphx::fp8::rounding_mode::stochastic), rng);
#endif // MIGRAPHX_FP8_DOWNCAST_CLIPPING}
}
}
#endif // __gfx940___
// Constructor from half
explicit constexpr __device__
float8(const _Float16 v, rounding_mode rm = rounding_mode::standard, uint32_t rng = 0)
: float8(static_cast<float>(v), rm, rng)
{
}
// constructor from int
explicit constexpr __device__
float8(const int v, rounding_mode rm = rounding_mode::standard, uint32_t rng = 0)
: float8(static_cast<float>(v), rm, rng)
{
}
// constructor from uint
explicit constexpr __device__
float8(const uint32_t v, rounding_mode rm = rounding_mode::standard, uint32_t rng = 0)
: float8(static_cast<float>(v), rm, rng)
{
}
// constructor from double
explicit constexpr __device__
float8(const double v, rounding_mode rm = rounding_mode::standard, uint32_t rng = 0)
: float8(static_cast<float>(v), rm, rng)
{
}
// constructor from bool
explicit constexpr __device__
float8(const bool v, rounding_mode rm = rounding_mode::standard, uint32_t rng = 0)
: float8(static_cast<float>(v), rm, rng)
{
}
// convert to float
#if defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__) // NOLINT
// upcast using device specific intrinsic
inline constexpr __device__ operator float() const
{
if(__builtin_is_constant_evaluated())
{
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
return migraphx::fp8::impl::cast_from_f8<3, 4, float, FNUZ /*negative_zero_nan*/>(
data);
} // else
return migraphx::fp8::impl::cast_from_f8<2, 5, float, FNUZ /*negative_zero_nan*/>(data);
}
else
{
float fval = 0;
uint32_t i32val = static_cast<uint32_t>(data);
// upcast
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
__asm__ volatile("v_cvt_f32_fp8 %0, %1 src0_sel:BYTE_0" : "=v"(fval) : "v"(i32val));
}
else
{
__asm__ volatile("v_cvt_f32_bf8 %0, %1 src0_sel:BYTE_0" : "=v"(fval) : "v"(i32val));
}
return fval;
}
}
#else // non gfx940
inline constexpr __device__ operator float() const
{
if constexpr(T == migraphx::fp8::f8_type::fp8)
{
return migraphx::fp8::impl::cast_from_f8<3, 4, float, FNUZ /*negative_zero_nan*/>(data);
} // else
return migraphx::fp8::impl::cast_from_f8<2, 5, float, FNUZ /*negative_zero_nan*/>(data);
}
#endif
inline constexpr explicit __device__ operator bool() const { return not is_zero(); }
// check for zero
inline __device__ constexpr bool is_zero() const
{
if constexpr(FNUZ)
{
return data == 0x00;
}
else
{
return (data == 0x00) || (data == 0x80);
}
}
// check for nan
inline __device__ constexpr bool is_nan() const
{
if constexpr(FNUZ)
{
return data == 0x80;
}
else
{
if(T == migraphx::fp8::f8_type::bf8)
{
return (data == 0x7D) or (data == 0x7E) or (data == 0x7F) or (data == 0xFD) or
(data == 0xFE) or (data == 0xFF);
}
else
{
return (data == 0x7F) or (data == 0xFF);
}
}
}
// check for inf
inline __device__ constexpr bool is_inf() const
{
if constexpr(FNUZ)
{
return data == 0x80;
}
else
{
if(T == migraphx::fp8::f8_type::bf8)
{
return (data == 0x7C) or (data == 0xFC);
}
else
{
// no infinities in e4m3fn, represent them as NaNs
return (data == 0x7F) or (data == 0xFF);
}
}
}
// NOLINTNEXTLINE
#define MIGRAPHX_FP8_SHORT_UNARY_OP(unary_op, binary_op) \
constexpr float8& __device__ operator unary_op(const float8& rhs) \
{ \
const auto tmp = static_cast<float>(*this) binary_op static_cast<float>(rhs); \
*this = static_cast<float8>(tmp); \
return *this; \
} \
constexpr float8& __device__ operator unary_op(const float& rhs) \
{ \
const auto tmp = static_cast<float>(*this) binary_op static_cast<float>(rhs); \
*this = static_cast<float8>(tmp); \
return *this; \
}
MIGRAPHX_FP8_SHORT_UNARY_OP(*=, *)
MIGRAPHX_FP8_SHORT_UNARY_OP(-=, -)
MIGRAPHX_FP8_SHORT_UNARY_OP(+=, +)
MIGRAPHX_FP8_SHORT_UNARY_OP(/=, /)
inline __device__ constexpr float8& operator=(const float8& rhs) = default;
inline __device__ constexpr float8& operator=(float8&& rhs) noexcept = default;
inline __device__ constexpr bool operator<(const float8& rhs) const
{
const auto we = static_cast<float>(*this);
const auto them = static_cast<float>(rhs);
return we < them;
}
inline __device__ constexpr bool operator>(const float8& rhs) const
{
const auto we = static_cast<float>(*this);
const auto them = static_cast<float>(rhs);
return we > them;
}
};
// https://onnx.ai/onnx/technical/float8.html
using fp8e4m3fn = float8<migraphx::fp8::f8_type::fp8, false>;
using fp8e5m2 = float8<migraphx::fp8::f8_type::bf8, false>;
using fp8e4m3fnuz = float8<migraphx::fp8::f8_type::fp8, true>;
using fp8e5m2fnuz = float8<migraphx::fp8::f8_type::bf8, true>;
// NOLINTNEXTLINE
#define MIGRAPHX_FP8_BINARY_OP(binary_op, T, U) \
inline constexpr U __device__ operator binary_op(const T& lhs, const T& rhs) \
{ \
return U(static_cast<float>(lhs) binary_op static_cast<float>(rhs)); \
}
// NOLINTNEXTLINE
#define MIGRAPHX_FP8_OTHER_OPS(T) \
inline constexpr __device__ T fabs(T v) \
{ \
/*NOLINTNEXTLINE*/ \
v.data = v.data & 0x7f; \
return v; \
} \
inline __device__ constexpr bool operator==(const T& lhs, const T& rhs) \
{ \
if(rhs.is_nan() or rhs.is_inf() or lhs.is_nan() or lhs.is_inf()) \
return false; \
else if((rhs.is_zero() and lhs.is_zero()) or (lhs.data == rhs.data)) \
return true; \
return false; \
}
// NOLINTNEXTLINE
#define MIGRAPHX_FP8_GEN_OP_OVERLOADS(T) \
MIGRAPHX_FP8_BINARY_OP(*, T, T) \
MIGRAPHX_FP8_BINARY_OP(-, T, T) \
MIGRAPHX_FP8_BINARY_OP(/, T, T) \
MIGRAPHX_FP8_BINARY_OP(+, T, T) \
MIGRAPHX_FP8_BINARY_OP(>=, T, bool) \
MIGRAPHX_FP8_BINARY_OP(<=, T, bool) \
MIGRAPHX_FP8_BINARY_OP(!=, T, bool) \
MIGRAPHX_FP8_OTHER_OPS(T)
MIGRAPHX_FP8_GEN_OP_OVERLOADS(fp8e5m2)
MIGRAPHX_FP8_GEN_OP_OVERLOADS(fp8e5m2fnuz)
MIGRAPHX_FP8_GEN_OP_OVERLOADS(fp8e4m3fn)
MIGRAPHX_FP8_GEN_OP_OVERLOADS(fp8e4m3fnuz)
template <>
class numeric_limits<fp8e4m3fnuz>
{
public:
static constexpr bool has_infinity = false;
static constexpr __device__ fp8e4m3fnuz epsilon()
{
return fp8e4m3fnuz(0x28, fp8e4m3fnuz::from_bits());
}
// NOLINTNEXTLINE
static constexpr __device__ fp8e4m3fnuz quiet_NaN()
{
return fp8e4m3fnuz(0x80, fp8e4m3fnuz::from_bits());
}
static constexpr __device__ fp8e4m3fnuz max()
{
return fp8e4m3fnuz(0x7F, fp8e4m3fnuz::from_bits());
}
// this is min value that is not DeNormalized(DeNorm). DeNorm min is 0x01
static constexpr __device__ fp8e4m3fnuz min()
{
return fp8e4m3fnuz(0x08, fp8e4m3fnuz::from_bits());
}
static constexpr __device__ fp8e4m3fnuz lowest()
{
return fp8e4m3fnuz(0xFF, fp8e4m3fnuz::from_bits());
}
};
template <>
class numeric_limits<fp8e4m3fn>
{
public:
static constexpr bool has_infinity = false;
static constexpr __device__ fp8e4m3fn epsilon()
{
return fp8e4m3fn(0x20, fp8e4m3fn::from_bits());
}
// NOLINTNEXTLINE
static constexpr __device__ fp8e4m3fn quiet_NaN()
{
return fp8e4m3fn(0x7F, fp8e4m3fn::from_bits());
}
static constexpr __device__ fp8e4m3fn max() { return fp8e4m3fn(0x7E, fp8e4m3fn::from_bits()); }
// this is min value that is not DeNormalized(DeNorm). DeNorm min is 0x01
static constexpr __device__ fp8e4m3fn min() { return fp8e4m3fn(0x08, fp8e4m3fn::from_bits()); }
static constexpr __device__ fp8e4m3fn lowest()
{
return fp8e4m3fn(0xFE, fp8e4m3fn::from_bits());
}
};
template <>
class numeric_limits<fp8e5m2fnuz>
{
public:
static constexpr bool has_infinity = false;
static constexpr __device__ fp8e5m2fnuz epsilon()
{
return fp8e5m2fnuz(0x34, fp8e5m2fnuz::from_bits());
}
static constexpr __device__ fp8e5m2fnuz quiet_NaN() // NOLINT
{
return fp8e5m2fnuz(0x80, fp8e5m2fnuz::from_bits());
}
static constexpr __device__ fp8e5m2fnuz max()
{
return fp8e5m2fnuz(0x7F, fp8e5m2fnuz::from_bits());
}
// this is min value that is not DeNormalized(DeNorm). DeNorm min is 0x01. I am not sure if we
// want to make this distinction. For the floating points we would end up using lowest most of
// the times.
static constexpr __device__ fp8e5m2fnuz min()
{
return fp8e5m2fnuz(0x4, fp8e5m2fnuz::from_bits());
}
static constexpr __device__ fp8e5m2fnuz lowest()
{
return fp8e5m2fnuz(0xFF, fp8e5m2fnuz::from_bits());
}
};
template <>
class numeric_limits<fp8e5m2>
{
public:
static constexpr bool has_infinity = true;
static constexpr __device__ fp8e5m2 epsilon() { return fp8e5m2(0x34, fp8e5m2::from_bits()); }
// 7D, 7E, 7F are positive NaNs and FD, FE, FF are negative NaNs
static constexpr __device__ fp8e5m2 quiet_NaN() // NOLINT
{
return fp8e5m2(0xFF, fp8e5m2::from_bits());
}
static constexpr __device__ fp8e5m2 max() { return fp8e5m2(0x7B, fp8e5m2::from_bits()); }
// this is min value that is not DeNormalized(DeNorm). DeNorm min is 0x01. I am not sure if we
// want to make this distinction. For the floating points we would end up using lowest most of
// the times.
static constexpr __device__ fp8e5m2 min() { return fp8e5m2(0x4, fp8e5m2::from_bits()); }
static constexpr __device__ fp8e5m2 lowest() { return fp8e5m2(0xFB, fp8e5m2::from_bits()); }
// 7C and FC both are infinity
static constexpr __device__ fp8e5m2 infinity() { return fp8e5m2(0x7C, fp8e5m2::from_bits()); }
};
} // namespace fp8
template <class T,
MIGRAPHX_REQUIRES(is_same<T, fp8::fp8e4m3fnuz>{} or is_same<T, fp8::fp8e5m2fnuz>{} or
is_same<T, fp8::fp8e4m3fn>{} or is_same<T, fp8::fp8e5m2>{})>
constexpr T numeric_max(migraphx::fp8::f8_type unused = migraphx::fp8::f8_type::fp8)
{
// unused parameter is added to make this numeric_max different overload definition
// compared to numeric_max defined in type_traits.hpp
(void)(unused);
return fp8::numeric_limits<T>::max();
}
template <class T,
MIGRAPHX_REQUIRES(is_same<T, fp8::fp8e4m3fnuz>{} or is_same<T, fp8::fp8e5m2fnuz>{} or
is_same<T, fp8::fp8e4m3fn>{} or is_same<T, fp8::fp8e5m2>{})>
constexpr T numeric_lowest(migraphx::fp8::f8_type unused = migraphx::fp8::f8_type::fp8)
{
// unused parameter is added to make this numeric_lowest different overload definition
// compared to numeric_lowest defined in type_traits.hpp
(void)(unused);
return fp8::numeric_limits<T>::lowest();
}
} // namespace migraphx
// =================================================================================================
#if defined(__clang__)
#pragma clang diagnostic pop
#endif // __clang__
#endif // MIGRAPHX_GUARD_KERNELS_FLOAT8_HPP
/* ************************************************************************
* Copyright (C) 2016-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
* ies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
* PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
* CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ************************************************************************ */
#ifndef MIGRAPHX_GUARD_KERNELS_FP8_IMPL_HPP
#define MIGRAPHX_GUARD_KERNELS_FP8_IMPL_HPP
#include <migraphx/kernels/bit_cast.hpp>
#include <migraphx/kernels/type_traits.hpp>
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-identifier"
#endif
namespace migraphx {
namespace fp8 {
namespace impl {
// NOLINTBEGIN
template <int Wm, int We, typename T, bool NegativeZeroNan, bool Clip>
__device__ constexpr uint8_t cast_to_f8(T f_x, bool stoch = false, uint32_t rng = 0)
{
constexpr bool is_float = true;
// half is not supported for now
constexpr bool is_half = false;
static_assert(Wm + We == 7, "Wm+We==7");
static_assert(is_float or is_half, "Only float can be cast to f8");
const uint32_t mfmt = (sizeof(T) == 4) ? 23 : 10;
typename migraphx::conditional_t<sizeof(T) == 2, uint16_t, uint32_t> x;
if constexpr(sizeof(T) == 4)
x = migraphx::bit_cast<uint32_t>(f_x);
else
x = migraphx::bit_cast<uint16_t>(f_x);
uint32_t head = 0;
uint32_t mantissa = 0;
int exponent = 0;
uint32_t bias = 0;
uint32_t sign = 0;
if constexpr(sizeof(T) == 4)
{
head = x & 0xFF800000;
mantissa = x & 0x7FFFFF;
exponent = (head >> 23) & 0xFF;
sign = head >> 31;
bias = 127;
}
else
{
head = x & 0xFC00;
mantissa = x & 0x3FF;
exponent = (head >> 10) & 0x1F;
sign = head >> 15;
bias = 15;
}
uint32_t signed_inf = (sign << 7) + (((1 << We) - 1) << Wm);
uint32_t signed_all_ones = (sign << 7) + ((((1 << We) - 1) << Wm) + ((1 << Wm) - 1));
// Calcualte maximum singed value FLT_MAX, FLT_MIN
uint32_t signed_max = signed_all_ones;
if(not NegativeZeroNan)
signed_max = (Wm == 2) ? (signed_max - 4) : (signed_max - 1);
// Deal with inf and NaNs
if(NegativeZeroNan) // For the FNUZ cases, it is simple just return NaNs
{
if((sizeof(T) == 4 and ((x & 0x7F800000) == 0x7F800000)) or
(sizeof(T) == 2 and ((x & 0x7C00) == 0x7C00)))
return 0x80;
}
else
{
// calculate most common NaN mantissa for FP8, which is all Ones in binary
uint32_t nan_mantissa = 1;
for(auto i = 1; i < Wm; ++i)
{
nan_mantissa |= (nan_mantissa << 1);
}
if((sizeof(T) == 4 and ((x & 0x7F800000) == 0x7F800000)) or
(sizeof(T) == 2 and ((x & 0x7C00) == 0x7C00)))
{
// infinity
if(mantissa == 0)
{
if(sign == 0)
return (Wm == 2) ? 0x7B : 0x7E;
else
return (Wm == 2) ? 0xFB : 0xFE;
}
else // NaNs
return signed_inf + nan_mantissa;
}
}
// handle positive zero
if(x == 0)
return 0;
// handle negative zero
else if((sizeof(T) == 4 and x == 0x80000000) or (sizeof(T) == 2 and x == 0x8000))
{
return NegativeZeroNan ? 0 : 0x80; // For FNUZ types neg zero is just positive zero
}
/* First need to check if it is normal or denorm as there is a difference of implict 1
Then need to adjust the exponent to align with the F8 exponent, in the meanwhile, shift
The mantissa. Then for stochastic rounding, add rng to mantissa and truncate. And for
RNE, no need to add rng. Then probably need to check whether there is carry and adjust
exponent and mantissa again*/
// For IEEE bias mode, the bias is 2^(k-1) -1 where k is the width of exponent bits
const int f8_bias = (1 << (We - 1u)) - 1 + (NegativeZeroNan ? 1 : 0);
const int f8_denormal_act_exponent = 1 - f8_bias; // actual exponent of f8 denormal
/* act_exponent is the actual exponent of fp32/fp16 (after subtracting bias)
f8_exponent is the converted f8 exponent with bias encoding
exponent_diff is the diff between fp32/fp16 exponent and f8 exponent,
the difference needs to be adjusted and mantissa shifted*/
int act_exponent = 0;
int f8_exponent = 0;
int exponent_diff = 0;
if(exponent == 0 and mantissa != 0)
{ // fp32/fp16 is in denormal.
/* fp32 denormal is below 2^-127 so it is usually not a concern here, we mostly concern fp16
here. In this case, f8 is usually in denormal. But there could be exceptions. fp16 denormal
has exponent bias 15 while bf8 with FNUZ has exponent bias 16. It means that there are some
numbers in fp16 denormal but they are bf8 (FNUZ) normals - smallest bf8 (FNUZ) normal is
2^-15. fp16 numbers where exponent==0 (actual exponent -14) and highest bit of mantissa is 1
are bf8 (FNUZ) normal. In this case, the fp16 mantissa should be shift left by 1 */
act_exponent = 1 - bias;
exponent_diff = f8_denormal_act_exponent -
act_exponent; // actual exponent is exponent-bias+1 as it is denormal
}
else
{ // fp32/fp16 is normal with implicit 1
act_exponent = exponent - bias;
if(act_exponent <= f8_denormal_act_exponent)
{
/* This is the case where fp32/fp16 is normal but it is in f8 denormal range.
For example fp8 FNUZ mode, denormal exponent is -7, but if the fp32/fp16
actual exponent is -7, it is actually larger due to the implict 1,
Therefore it needs to be adjust to -6 and mantissa shift right by 1.
So for fp32/fp16, exponent -8 is the cut point to convert to fp8 FNUZ */
exponent_diff = f8_denormal_act_exponent - act_exponent;
}
else
{ // both fp32/fp16 and f8 are in normal range
exponent_diff =
0; // exponent_diff=0 does not mean there is no difference for this case,
// act_exponent could be larger. Just that it does not need shift mantissa
}
mantissa += (1 << mfmt); // Add the implicit 1 into mantissa
}
// need to know whether the number is right in the middle of two adjacent fp8 numbers. use max
// value of 31 to avoid undefined behaviour
bool midpoint = (mantissa & ((1u << (mfmt - Wm + exponent_diff)) - 1)) ==
(1u << (mfmt - Wm + exponent_diff - 1));
/* This part is a bit tricky. The judgment of whether it is a tie needs to be done before we
shift right as shift right could rip off some residual part and make something not midpoint look
like midpoint. For example, the fp16 number 0x1002 (0 00100 0000000010), it is larger than
midpoint, but after shift right by 4 bits, it would look like midpoint.
*/
if(exponent_diff > 0)
mantissa >>= exponent_diff;
else if(exponent_diff == -1)
mantissa <<= -exponent_diff;
bool implicit_one = mantissa & (1 << mfmt);
// if there is no implict 1, it means the f8 is denormal and need to adjust to denorm exponent
f8_exponent =
(act_exponent + exponent_diff) /*actual f8 exponent*/ + f8_bias - (implicit_one ? 0 : 1);
// Now we have the exponent and mantissa adjusted
uint32_t drop_mask = (1 << (mfmt - Wm)) - 1;
bool odd =
mantissa & (1 << (mfmt - Wm)); // if the least significant bit that is not truncated is 1
/*
This part is doing rounding by adding mantissa part that is going to get dropped.
e.g. if the dropped part for less than 0.5 than it would round down.
if the dropped part is more than 0.5 then it would round up by rolling carry to LSB of retained
mantissa.
For the mid point when bit pattern is like this for Odd: `xy1:10000000` for Odd and
`xy0:10000000` for the Even. where `:` is delimiter for dropped v/s retained part.
For the odd case :
this will add xy1:10000000 + 000:10000000 which would roll over carry to LSB of retained
part making it RNE.
For the even case : this will add xy0:10000000 + 000:01111111 which would
round down and keep number Even
*/
mantissa += (stoch ? rng : (midpoint ? (odd ? mantissa : mantissa - 1) : mantissa)) & drop_mask;
// Now we deal with overflow
if(f8_exponent == 0 and ((1 << mfmt) & mantissa))
{
f8_exponent = 1; // denormal overflow to become normal, promote exponent
}
else if((1 << (mfmt + 1)) & mantissa)
{
mantissa >>= 1;
f8_exponent++;
}
mantissa >>= (mfmt - Wm);
// above range: quantize to maximum possible float of the same sign
// for e5m2 case, max_exp is 14, since exp = 15 is reserved for Infs and Nans
const int max_exp = (1 << We) - ((NegativeZeroNan or Wm == 3) ? 1 : 2);
if(f8_exponent > max_exp)
{
if(Clip)
return signed_max;
else
{
// https://onnx.ai/onnx/technical/float8.html#cast
if(NegativeZeroNan)
return 0x80;
else
return (Wm == 2) ? signed_inf : signed_all_ones;
}
}
if(f8_exponent == 0 and mantissa == 0)
return NegativeZeroNan ? 0 : (sign << 7);
mantissa &= (1 << Wm) - 1;
return (sign << 7) | (f8_exponent << Wm) | mantissa;
}
// NOLINTEND
template <int Wm, int We, typename T, bool NegativeZeroNan>
__device__ constexpr T cast_from_f8(uint8_t x)
{
// half is not supported for now
constexpr bool is_half = false;
constexpr bool is_float = true;
static_assert(is_float or is_half, "Only float are supported");
constexpr int weo = is_half ? 5 : 8;
constexpr int wmo = is_half ? 10 : (is_float ? 23 : 7);
// NOLINTNEXTLINE
T f_inf, f_neg_inf, f_nan, f_neg0;
if constexpr(is_float)
{
const uint32_t if_inf = 0x7F800000;
const uint32_t if_neg_inf = 0xFF800000;
const uint32_t if_nan = 0x7F800001;
const uint32_t if_neg0 = 0x80000000;
f_inf = migraphx::bit_cast<float>(if_inf);
f_neg_inf = migraphx::bit_cast<float>(if_neg_inf);
f_nan = migraphx::bit_cast<float>(if_nan);
f_neg0 = migraphx::bit_cast<float>(if_neg0);
}
if(x == 0)
return 0;
uint32_t sign = x >> 7; // NOLINT
uint32_t mantissa = x & ((1 << Wm) - 1); // NOLINT
int exponent = (x & 0x7F) >> Wm; // NOLINT
if(NegativeZeroNan)
{
if(x == 0x80)
return f_nan;
}
else
{
if(x == 0x80)
return f_neg0;
if(exponent == ((1 << We) - 1) and Wm == 2) // NOLINT
return (mantissa == 0) ? (sign ? f_neg_inf : f_inf) : f_nan;
else if(Wm == 3 and (x == 0x7F or x == 0xFF))
return f_nan;
}
typename migraphx::conditional_t<sizeof(T) == 2, uint16_t, uint32_t> retval;
const int exp_low_cutoff =
(1 << (weo - 1)) - (1 << (We - 1)) + 1 - (NegativeZeroNan ? 1 : 0); // NOLINT
// subnormal input
if(exponent == 0)
{
// guaranteed mantissa!=0 since cases 0x0 and 0x80 are handled above
int sh = 1 + __builtin_clz(mantissa) - (32 - Wm);
mantissa <<= sh; // NOLINT
exponent += 1 - sh;
mantissa &= ((1 << Wm) - 1); // NOLINT
}
exponent += exp_low_cutoff - 1;
mantissa <<= wmo - Wm; // NOLINT
// subnormal output (occurs when T=half, We=5, negative_zero_nan=true)
if(exponent <= 0)
{
mantissa |= 1 << wmo; // NOLINT
mantissa >>= 1 - exponent; // NOLINT
exponent = 0;
}
if(sizeof(T) == 2)
retval = (sign << 15) | (exponent << 10) | mantissa; // NOLINT
else
retval = (sign << 31) | (exponent << 23) | mantissa; // NOLINT
return migraphx::bit_cast<T>(retval);
}
} // namespace impl
} // namespace fp8
} // namespace migraphx
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
#endif // MIGRAPHX_GUARD_KERNELS_FP8_IMPL_HPP
...@@ -52,22 +52,25 @@ __device__ void generic_binary_layernorm( ...@@ -52,22 +52,25 @@ __device__ void generic_binary_layernorm(
block::template run<reduce_output>([&](auto, auto r) { block::template run<reduce_output>([&](auto, auto r) {
auto input = r.inner([&](auto x1, auto x2) { return op(x1, x2); })(input1, input2); auto input = r.inner([&](auto x1, auto x2) { return op(x1, x2); })(input1, input2);
using value_type = typename Input1::type; using value_type = typename Input1::type;
using vec_value_type = vec_type<value_type>;
constexpr auto relements = r.template elements<Input1>(); constexpr auto relements = r.template elements<Input1>();
constexpr auto relements_r = vec_type<value_type>{1.0 / relements}; constexpr auto relements_r = vec_value_type{1.0 / relements};
auto relements_rsqrt = sqrt(relements_r); auto relements_rsqrt = sqrt(relements_r);
auto means = r.reduce(op::sum{}, make_array<vec_type<value_type>>(0, 0), [&](auto x) { auto means = r.reduce(op::sum{},
auto x_out = x * relements_r; make_array<vec_value_type>(vec_value_type{0}, vec_value_type{0}),
// dividing x by sqrt(relements) before squaring allows computing higher values [&](auto x) {
// before overflow in low precision auto x_out = x * relements_r;
auto x2_sqrt = x * relements_rsqrt; // dividing x by sqrt(relements) before squaring allows computing
return make_array(x_out, x2_sqrt * x2_sqrt); // higher values before overflow in low precision
})(input); auto x2_sqrt = x * relements_rsqrt;
return make_array(x_out, x2_sqrt * x2_sqrt);
})(input);
auto mean_x = means[0]; auto mean_x = means[0];
auto mean_x2 = means[1]; auto mean_x2 = means[1];
auto variance = mean_x2 - (mean_x * mean_x); auto variance = mean_x2 - (mean_x * mean_x);
value_type eps_val = eps; // implicit conversion for eps value_type eps_val = implicit_conversion(eps);
r.inner([&](auto& y, auto x, auto... xs) { r.inner([&](auto& y, auto x, auto... xs) {
auto m = x - mean_x; auto m = x - mean_x;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment