"src/quantize_8bits.cpp" did not exist on "c0398dedc045ba70a69b952856b6bce996688588"
Commit 1ce916c4 authored by Umang Yadav's avatar Umang Yadav
Browse files

Merge branch 'develop' into quant_gemm_fp8

parents 4315a991 8c73c72e
......@@ -301,6 +301,7 @@ target_link_libraries(migraphx_gpu PUBLIC migraphx MIOpen roc::rocblas)
target_link_libraries(migraphx_gpu PRIVATE migraphx_device migraphx_kernels)
if(MIGRAPHX_USE_COMPOSABLEKERNEL)
target_link_libraries(migraphx_gpu PRIVATE composable_kernel::jit_library)
target_compile_definitions(migraphx_gpu PRIVATE MIGRAPHX_USE_COMPOSABLEKERNEL=1)
endif()
add_subdirectory(driver)
......
......@@ -49,12 +49,8 @@ constexpr unsigned int dpp_row_bcast(unsigned int x)
return y;
}
template <unsigned int DppCtrl,
unsigned int RowMask = 0xf,
unsigned int BankMask = 0xf,
bool BoundCtrl = false,
class T>
__device__ T dpp_mov(T& x)
template <class T, class F>
__device__ T dpp_op(T& x, F f)
{
static const index_int n = sizeof(T) < 4 ? 1 : sizeof(T) / 4;
union type
......@@ -68,10 +64,28 @@ __device__ T dpp_mov(T& x)
input.data = x;
for(index_int i = 0; i < n; i++)
{
output.reg[i] = __hip_move_dpp(input.reg[i], DppCtrl, RowMask, BankMask, BoundCtrl);
output.reg[i] = f(input.reg[i]);
}
return output.data;
}
template <unsigned int DppCtrl,
unsigned int RowMask = 0xf,
unsigned int BankMask = 0xf,
bool BoundCtrl = false,
class T>
__device__ T dpp_mov(T& x)
{
return dpp_op(x,
[](auto i) { return __hip_move_dpp(i, DppCtrl, RowMask, BankMask, BoundCtrl); });
}
template <unsigned int Mask, class T>
__device__ T dpp_swizzle(T& x)
{
return dpp_op(x, [](auto i) { return __hip_ds_swizzle(i, Mask); });
}
#endif // MIGRAPHX_HAS_DPP
} // namespace migraphx
......
......@@ -45,7 +45,10 @@ __device__ void dpp_reduce(T& in, Op op)
in = op(in, out);
out = dpp_mov<dpp_row_shr(8), 0xf, 0xc>(in);
in = op(in, out);
#if __AMDGCN_WAVEFRONT_SIZE == 64
#if __AMDGCN_WAVEFRONT_SIZE == 32
out = dpp_swizzle<0x1e0>(in);
in = op(in, out);
#else
out = dpp_mov<dpp_row_bcast(15), 0xa>(in);
in = op(in, out);
out = dpp_mov<dpp_row_bcast(31), 0xc>(in);
......@@ -54,9 +57,11 @@ __device__ void dpp_reduce(T& in, Op op)
}
#if defined(MIGRAPHX_USE_CLANG_TIDY) || defined(CPPCHECK)
// NOLINTNEXTLINE
#define MIGRAPHX_DPP_REDUCE_ASM(x, ins) x = 1
#define MIGRAPHX_DPP_REDUCE_ASM(x, ins, f) \
(void)f; \
x = 1
#elif __AMDGCN_WAVEFRONT_SIZE == 64
#define MIGRAPHX_DPP_REDUCE_ASM(x, ins) \
#define MIGRAPHX_DPP_REDUCE_ASM(x, ins, f) \
__asm__ volatile("s_nop 4\n" #ins " %0 %0 %0 row_shr:1\n" \
"s_nop 1\n" #ins " %0 %0 %0 row_shr:2\n" \
"s_nop 1\n" #ins " %0 %0 %0 row_shr:4 bank_mask:0xe\n" \
......@@ -65,29 +70,42 @@ __device__ void dpp_reduce(T& in, Op op)
"s_nop 1\n" #ins " %0 %0 %0 row_bcast:31 row_mask:0xc\n" \
"s_nop 1\n" \
: "=v"(x) \
: "0"(x))
: "0"(x)); \
(void)f
#else
#define MIGRAPHX_DPP_REDUCE_ASM(x, ins) \
#define MIGRAPHX_DPP_REDUCE_ASM(x, ins, f) \
__asm__ volatile("s_nop 4\n" #ins " %0 %0 %0 row_shr:1\n" \
"s_nop 1\n" #ins " %0 %0 %0 row_shr:2\n" \
"s_nop 1\n" #ins " %0 %0 %0 row_shr:4 bank_mask:0xe\n" \
"s_nop 1\n" #ins " %0 %0 %0 row_shr:8 bank_mask:0xc\n" \
"s_nop 1\n" \
"s_nop 1\n" \
: "=v"(x) \
: "0"(x))
: "0"(x)); \
auto y = dpp_swizzle<0x1e0>(x); \
x = f(x, y)
#endif
// NOLINTNEXTLINE
#define MIGRAPHX_DPP_REDUCE(op, prefix, sign) \
__device__ inline void dpp_reduce(double& x, op) { MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_f64); } \
__device__ inline void dpp_reduce(float& x, op) { MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_f32); } \
__device__ inline void dpp_reduce(half& x, op) { MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_f16); } \
__device__ inline void dpp_reduce(int32_t& x, op) \
{ \
MIGRAPHX_DPP_REDUCE_ASM(x, prefix##sign##32); \
} \
__device__ inline void dpp_reduce(uint32_t& x, op) { MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_u32); }
#define MIGRAPHX_DPP_REDUCE(op, prefix, sign) \
__device__ inline void dpp_reduce(double& x, op f) \
{ \
MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_f64, f); \
} \
__device__ inline void dpp_reduce(float& x, op f) \
{ \
MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_f32, f); \
} \
__device__ inline void dpp_reduce(half& x, op f) \
{ \
MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_f16, f); \
} \
__device__ inline void dpp_reduce(int32_t& x, op f) \
{ \
MIGRAPHX_DPP_REDUCE_ASM(x, prefix##sign##32, f); \
} \
__device__ inline void dpp_reduce(uint32_t& x, op f) \
{ \
MIGRAPHX_DPP_REDUCE_ASM(x, prefix##_u32, f); \
}
// Note: when max and min are in int32_t, signed version of instruction needs to be used.
MIGRAPHX_DPP_REDUCE(op::sum, v_add, _u)
......@@ -99,11 +117,7 @@ template <class Op, class T, class Index, class F>
__device__ auto block_reduce(index idx, Op op, T init, Index n, F f)
{
MIGRAPHX_ASSERT(idx.max_nlocal() == idx.nlocal());
#if __AMDGCN_WAVEFRONT_SIZE == 32
constexpr index_int lanes_per_thread = 16;
#else
constexpr index_int lanes_per_thread = 64;
#endif
constexpr index_int lanes_per_thread = __AMDGCN_WAVEFRONT_SIZE;
using type = decltype(index::invoke_loop(f, 0, _c<0>));
__shared__ type buffer[idx.max_nlocal() / lanes_per_thread];
type x = type(init);
......
......@@ -111,6 +111,16 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
unsupported_fp8_ops.insert("dot");
unsupported_fp8_ops.insert("quant_dot");
}
// add all device kernels
unsupported_fp8_ops.insert("logsoftmax");
unsupported_fp8_ops.insert("nonzero");
unsupported_fp8_ops.insert("prefix_scan_sum");
unsupported_fp8_ops.insert("scatter_none");
unsupported_fp8_ops.insert("topk");
unsupported_fp8_ops.insert("rnn_var_sl_shift_output");
unsupported_fp8_ops.insert("multinomial");
unsupported_fp8_ops.insert("argmax");
unsupported_fp8_ops.insert("argmin");
// clang-format off
return
{
......
......@@ -48,5 +48,5 @@ struct gemm_2args_mm_8 : verify_program<gemm_2args_mm_8<DType>>
};
template struct gemm_2args_mm_8<migraphx::shape::float_type>;
// template struct gemm_2args_mm_8<migraphx::shape::half_type>;
// template struct gemm_2args_mm_8<migraphx::shape::half_type>; // fails with CK, issue#2514
template struct gemm_2args_mm_8<migraphx::shape::fp8e4m3fnuz_type>;
......@@ -51,5 +51,5 @@ struct gemm_add_broadcast2 : verify_program<gemm_add_broadcast2<DType>>
};
template struct gemm_add_broadcast2<migraphx::shape::float_type>;
// template struct gemm_add_broadcast2<migraphx::shape::half_type>;
// template struct gemm_add_broadcast2<migraphx::shape::half_type>; // fails with CK, issue#2514
template struct gemm_add_broadcast2<migraphx::shape::fp8e4m3fnuz_type>;
This diff is collapsed.
......@@ -29,16 +29,20 @@
#include <cassert>
struct test_contiguous : verify_program<test_contiguous>
template <migraphx::shape::type_t DType>
struct test_contiguous : verify_program<test_contiguous<DType>>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {4, 4, 4, 3}, {48, 4, 1, 16}};
migraphx::shape s{DType, {4, 4, 4, 3}, {48, 4, 1, 16}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("contiguous"), x);
assert(p.get_output_shapes().back().standard());
return p;
}
};
template struct test_contiguous<migraphx::shape::float_type>;
template struct test_contiguous<migraphx::shape::fp8e4m3fnuz_type>;
......@@ -50,3 +50,7 @@ template struct test_logsoftmax<1, migraphx::shape::half_type>;
template struct test_logsoftmax<0, migraphx::shape::half_type>;
template struct test_logsoftmax<2, migraphx::shape::half_type>;
template struct test_logsoftmax<3, migraphx::shape::half_type>;
template struct test_logsoftmax<0, migraphx::shape::fp8e4m3fnuz_type>;
template struct test_logsoftmax<1, migraphx::shape::fp8e4m3fnuz_type>;
template struct test_logsoftmax<2, migraphx::shape::fp8e4m3fnuz_type>;
template struct test_logsoftmax<3, migraphx::shape::fp8e4m3fnuz_type>;
......@@ -27,7 +27,8 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_multinomial : verify_program<test_multinomial>
template <migraphx::shape::type_t DType>
struct test_multinomial : verify_program<test_multinomial<DType>>
{
migraphx::program create_program() const
{
......@@ -40,10 +41,10 @@ struct test_multinomial : verify_program<test_multinomial>
std::uniform_real_distribution<> dis(0.0, 1.0);
std::vector<float> rand_samples(batch_size * sample_size);
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {batch_size, sample_size}};
migraphx::shape rs{DType, {batch_size, sample_size}};
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples});
migraphx::shape s{migraphx::shape::float_type, {batch_size, 5}};
migraphx::shape s{DType, {batch_size, 5}};
auto input = mm->add_parameter("input", s);
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
......@@ -58,3 +59,8 @@ struct test_multinomial : verify_program<test_multinomial>
return p;
}
};
template struct test_multinomial<migraphx::shape::float_type>;
template struct test_multinomial<migraphx::shape::half_type>;
// This fails, need to figure out why
// template struct test_multinomial<migraphx::shape::fp8e4m3fnuz_type>;
......@@ -27,13 +27,14 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_nonzero : verify_program<test_nonzero>
template <migraphx::shape::type_t DType>
struct test_nonzero : verify_program<test_nonzero<DType>>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4, 5}};
migraphx::shape s{DType, {2, 3, 4, 5}};
auto x = mm->add_parameter("data", s);
auto r = mm->add_instruction(migraphx::make_op("nonzero"), x);
mm->add_return({r});
......@@ -41,3 +42,7 @@ struct test_nonzero : verify_program<test_nonzero>
return p;
}
};
template struct test_nonzero<migraphx::shape::float_type>;
template struct test_nonzero<migraphx::shape::half_type>;
template struct test_nonzero<migraphx::shape::fp8e4m3fnuz_type>;
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_nonzero_half : verify_program<test_nonzero_half>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {3, 4, 3, 5}};
auto x = mm->add_parameter("data", s);
auto r = mm->add_instruction(migraphx::make_op("nonzero"), x);
mm->add_return({r});
return p;
}
};
......@@ -23,16 +23,18 @@
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_prefix_scan_sum_2d_small : verify_program<test_prefix_scan_sum_2d_small>
template <migraphx::shape::type_t DType>
struct test_prefix_scan_sum_2d_small : verify_program<test_prefix_scan_sum_2d_small<DType>>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {1}};
migraphx::shape s{DType, {1}};
auto x = mm->add_parameter("x", s);
auto xb =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 3}}}), x);
......@@ -42,16 +44,25 @@ struct test_prefix_scan_sum_2d_small : verify_program<test_prefix_scan_sum_2d_sm
}
};
struct test_prefix_scan_sum_2d_large : verify_program<test_prefix_scan_sum_2d_large>
template struct test_prefix_scan_sum_2d_small<migraphx::shape::float_type>;
template struct test_prefix_scan_sum_2d_small<migraphx::shape::half_type>;
template struct test_prefix_scan_sum_2d_small<migraphx::shape::fp8e4m3fnuz_type>;
template <migraphx::shape::type_t DType>
struct test_prefix_scan_sum_2d_large : verify_program<test_prefix_scan_sum_2d_large<DType>>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3, 1000}};
migraphx::shape s{DType, {3, 1000}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), x);
return p;
}
};
template struct test_prefix_scan_sum_2d_large<migraphx::shape::float_type>;
template struct test_prefix_scan_sum_2d_large<migraphx::shape::half_type>;
template struct test_prefix_scan_sum_2d_large<migraphx::shape::fp8e4m3fnuz_type>;
......@@ -46,11 +46,13 @@ struct test_reduce_op_small : verify_program<test_reduce_op_small<Op, Axis, T>>
};
template struct test_reduce_op_small<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>;
template struct test_reduce_op_small<migraphx::op::reduce_sum, 3, migraphx::shape::float_type>;
template struct test_reduce_op_small<migraphx::op::reduce_sum, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_mean, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_max, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_min, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_sum, 3, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_sum, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_mean, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_max, 2, migraphx::shape::half_type>;
......@@ -60,6 +62,9 @@ template struct test_reduce_op_small<migraphx::op::reduce_prod, -2, migraphx::sh
template struct test_reduce_op_small<migraphx::op::reduce_sum,
2,
migraphx::shape::fp8e4m3fnuz_type>;
template struct test_reduce_op_small<migraphx::op::reduce_sum,
3,
migraphx::shape::fp8e4m3fnuz_type>;
template struct test_reduce_op_small<migraphx::op::reduce_mean,
2,
migraphx::shape::fp8e4m3fnuz_type>;
......
......@@ -26,16 +26,21 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_reverse : verify_program<test_reverse>
template <migraphx::shape::type_t DType>
struct test_reverse : verify_program<test_reverse<DType>>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {4, 16}};
migraphx::shape s{DType, {4, 16}};
auto a0 = mm->add_parameter("data", s);
std::vector<int64_t> axis = {0};
mm->add_instruction(migraphx::make_op("reverse", {{"axes", axis}}), a0);
return p;
}
};
template struct test_reverse<migraphx::shape::float_type>;
template struct test_reverse<migraphx::shape::half_type>;
template struct test_reverse<migraphx::shape::fp8e4m3fnuz_type>;
......@@ -31,7 +31,8 @@
#include <migraphx/op/common.hpp>
struct test_rnn_sql_1 : verify_program<test_rnn_sql_1>
template <migraphx::shape::type_t DType>
struct test_rnn_sql_1 : verify_program<test_rnn_sql_1<DType>>
{
migraphx::program create_program() const
{
......@@ -44,12 +45,12 @@ struct test_rnn_sql_1 : verify_program<test_rnn_sql_1>
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
migraphx::shape in_shape{DType, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{DType, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{DType, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{DType, {num_dirct, 2 * hidden_size}};
migraphx::shape s_shape{migraphx::shape::int32_type, {batch_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape ih_shape{DType, {num_dirct, batch_size, hidden_size}};
auto seq = mm->add_parameter("seq", in_shape);
auto w = mm->add_parameter("w", w_shape);
......@@ -81,3 +82,7 @@ struct test_rnn_sql_1 : verify_program<test_rnn_sql_1>
}
std::string section() const { return "rnn"; }
};
template struct test_rnn_sql_1<migraphx::shape::float_type>;
template struct test_rnn_sql_1<migraphx::shape::half_type>;
template struct test_rnn_sql_1<migraphx::shape::fp8e4m3fnuz_type>;
......@@ -27,16 +27,17 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_scatter0 : verify_program<test_scatter0>
template <migraphx::shape::type_t DType>
struct test_scatter0 : verify_program<test_scatter0<DType>>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {3, 3}};
migraphx::shape sd{DType, {3, 3}};
migraphx::shape si{migraphx::shape::int32_type, {2, 3}};
std::vector<int> vi = {1, 0, 2, 0, 2, 1};
migraphx::shape su{migraphx::shape::float_type, {2, 3}};
migraphx::shape su{DType, {2, 3}};
auto pd = mm->add_parameter("data", sd);
auto li = mm->add_literal(migraphx::literal{si, vi});
......@@ -47,3 +48,7 @@ struct test_scatter0 : verify_program<test_scatter0>
return p;
}
};
template struct test_scatter0<migraphx::shape::float_type>;
template struct test_scatter0<migraphx::shape::half_type>;
template struct test_scatter0<migraphx::shape::fp8e4m3fnuz_type>;
......@@ -27,13 +27,14 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_topk_0 : verify_program<test_topk_0>
template <migraphx::shape::type_t DType>
struct test_topk_0 : verify_program<test_topk_0<DType>>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3, 5}};
migraphx::shape s{DType, {3, 5}};
auto data = mm->add_parameter("data", s);
auto r = mm->add_instruction(
migraphx::make_op("topk", {{"axis", 1}, {"k", 4}, {"largest", 1}}), data);
......@@ -43,3 +44,7 @@ struct test_topk_0 : verify_program<test_topk_0>
return p;
}
};
template struct test_topk_0<migraphx::shape::float_type>;
template struct test_topk_0<migraphx::shape::half_type>;
template struct test_topk_0<migraphx::shape::fp8e4m3fnuz_type>;
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment