Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
8d2f2f8c
Commit
8d2f2f8c
authored
Dec 05, 2024
by
coderfeli
Browse files
Merge branch 'develop' into ck_tile/gemm_debug_alias
parents
99c8123f
4cb3d7d7
Changes
91
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1723 additions
and
134 deletions
+1723
-134
example/ck_tile/17_grouped_gemm/grouped_gemm.hpp
example/ck_tile/17_grouped_gemm/grouped_gemm.hpp
+53
-0
example/ck_tile/17_grouped_gemm/run_grouped_gemm_example.inc
example/ck_tile/17_grouped_gemm/run_grouped_gemm_example.inc
+191
-0
example/ck_tile/17_grouped_gemm/utils.hpp
example/ck_tile/17_grouped_gemm/utils.hpp
+38
-0
example/ck_tile/CMakeLists.txt
example/ck_tile/CMakeLists.txt
+1
-0
include/ck/README.md
include/ck/README.md
+19
-0
include/ck/library/utility/host_tensor.hpp
include/ck/library/utility/host_tensor.hpp
+1
-1
include/ck/library/utility/host_tensor_generator.hpp
include/ck/library/utility/host_tensor_generator.hpp
+25
-6
include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_two_stage_xdl_cshuffle.hpp
...device_grouped_conv_bwd_weight_two_stage_xdl_cshuffle.hpp
+1
-2
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp
...ion/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp
+6
-6
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_splitk_xdl_cshuffle_two_stage.hpp
...grouped_gemm_multiple_d_splitk_xdl_cshuffle_two_stage.hpp
+5
-5
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_xdl_cshuffle_tile_loop.hpp
...device_grouped_gemm_multiple_d_xdl_cshuffle_tile_loop.hpp
+4
-4
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp
...sor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp
+6
-6
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl_splitk_cshuffle.hpp
...u/device/impl/device_grouped_gemm_xdl_splitk_cshuffle.hpp
+5
-5
include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdl_cshuffle_v3_multi_d_ab_scale.hpp
...u/grid/gridwise_gemm_xdl_cshuffle_v3_multi_d_ab_scale.hpp
+1
-2
include/ck/utility/amd_buffer_addressing.hpp
include/ck/utility/amd_buffer_addressing.hpp
+8
-6
include/ck/utility/amd_ck_fp8.hpp
include/ck/utility/amd_ck_fp8.hpp
+988
-0
include/ck/utility/amd_xdlops.hpp
include/ck/utility/amd_xdlops.hpp
+1
-1
include/ck/utility/data_type.hpp
include/ck/utility/data_type.hpp
+360
-83
include/ck/utility/math_v2.hpp
include/ck/utility/math_v2.hpp
+2
-2
include/ck/utility/random_gen.hpp
include/ck/utility/random_gen.hpp
+8
-5
No files found.
example/ck_tile/17_grouped_gemm/grouped_gemm.hpp
0 → 100644
View file @
8d2f2f8c
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <string>
#include "ck_tile/core.hpp"
#include "ck_tile/host/kernel_launch.hpp"
#include "ck_tile/ops/gemm/kernel/grouped_gemm_kernel.hpp"
template
<
typename
DataType
>
struct
GemmBasicTypeConfig
;
template
<
>
struct
GemmBasicTypeConfig
<
ck_tile
::
half_t
>
{
using
ADataType
=
ck_tile
::
half_t
;
using
BDataType
=
ck_tile
::
half_t
;
using
CDataType
=
ck_tile
::
half_t
;
using
AccDataType
=
float
;
};
using
Types
=
GemmBasicTypeConfig
<
ck_tile
::
half_t
>
;
// Specific type aliases for easy access
using
ADataType
=
Types
::
ADataType
;
using
BDataType
=
Types
::
BDataType
;
using
AccDataType
=
Types
::
AccDataType
;
using
CDataType
=
Types
::
CDataType
;
using
grouped_gemm_kargs
=
ck_tile
::
GroupedGemmHostArgs
;
auto
create_args
(
int
argc
,
char
*
argv
[])
{
ck_tile
::
ArgParser
arg_parser
;
arg_parser
.
insert
(
"a_layout"
,
"R"
,
"A tensor data layout - Row by default"
)
.
insert
(
"b_layout"
,
"R"
,
"B tensor data layout - Row by default"
)
.
insert
(
"c_layout"
,
"R"
,
"C tensor data layout - Row by default"
)
.
insert
(
"validate"
,
"1"
,
"0. No validation, 1. Validation on CPU"
)
.
insert
(
"warmup"
,
"10"
,
"number of iterations before benchmark the kernel"
)
.
insert
(
"repeat"
,
"100"
,
"number of iterations to benchmark the kernel"
)
.
insert
(
"group_count"
,
"16"
,
"group count"
);
bool
result
=
arg_parser
.
parse
(
argc
,
argv
);
return
std
::
make_tuple
(
result
,
arg_parser
);
}
std
::
size_t
GetWorkspaceSize
(
const
std
::
vector
<
grouped_gemm_kargs
>&
gemm_descs
);
float
grouped_gemm_calc
(
const
std
::
vector
<
grouped_gemm_kargs
>&
gemm_descs
,
const
ck_tile
::
stream_config
&
s
,
void
*
p_workspace_
);
example/ck_tile/17_grouped_gemm/run_grouped_gemm_example.inc
0 → 100644
View file @
8d2f2f8c
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
template
<
typename
ALayout
,
typename
BLayout
,
typename
CLayout
>
float
invoke_gemm
(
int
n_warmup
,
int
n_repeat
,
int
group_count
,
const
std
::
vector
<
grouped_gemm_kargs
>&
args
)
{
ck_tile
::
DeviceMem
gemm_workspace
;
gemm_workspace
.
Realloc
(
GetWorkspaceSize
(
args
));
float
ave_time
=
grouped_gemm
<
ALayout
,
BLayout
,
CLayout
>
(
args
,
ck_tile
::
stream_config
{
nullptr
,
true
,
1
,
n_warmup
,
n_repeat
},
gemm_workspace
.
GetDeviceBuffer
());
std
::
string
op_name
{
"Grouped Gemm"
};
std
::
size_t
flop
=
0
,
num_btype
=
0
;
for
(
int
j
=
0
;
j
<
group_count
;
++
j
)
{
flop
+=
std
::
size_t
(
2
)
*
args
[
j
]
.
M
*
args
[
j
]
.
N
*
args
[
j
]
.
K
;
num_btype
+=
sizeof
(
ADataType
)
*
args
[
j
]
.
M
*
args
[
j
]
.
K
+
sizeof
(
BDataType
)
*
args
[
j
]
.
K
*
args
[
j
]
.
N
+
sizeof
(
CDataType
)
*
args
[
j
]
.
M
*
args
[
j
]
.
N
;
}
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
op_name
<<
std
::
endl
;
return
ave_time
;
}
template
<
typename
ALayout
,
typename
BLayout
,
typename
CLayout
>
int
run_grouped_gemm_example_with_layouts
(
int
argc
,
char
*
argv
[],
const
ALayout
a_layout
=
ALayout
{},
const
BLayout
b_layout
=
BLayout
{},
[[
maybe_unused
]]
const
CLayout
c_layout
=
CLayout
{})
{
auto
[
result
,
arg_parser
]
=
create_args
(
argc
,
argv
);
if
(
!
result
)
{
return
-
1
;
};
const
int
group_count
=
arg_parser
.
get_int
(
"group_count"
);
const
int
repeat
=
arg_parser
.
get_int
(
"repeat"
);
const
int
warmup
=
arg_parser
.
get_int
(
"warmup"
);
std
::
vector
<
ck_tile
::
index_t
>
Ms
;
std
::
vector
<
ck_tile
::
index_t
>
Ns
;
std
::
vector
<
ck_tile
::
index_t
>
Ks
;
std
::
vector
<
ck_tile
::
index_t
>
stride_As
;
std
::
vector
<
ck_tile
::
index_t
>
stride_Bs
;
std
::
vector
<
ck_tile
::
index_t
>
stride_Cs
;
for
(
int
i
=
0
;
i
<
group_count
;
i
++
)
{
Ms
.
push_back
(
256
+
256
*
i
);
Ns
.
push_back
(
128
+
128
*
i
);
Ks
.
push_back
(
128
+
64
*
i
);
stride_As
.
push_back
(
Ks
[
i
]);
stride_Bs
.
push_back
(
Ks
[
i
]);
stride_Cs
.
push_back
(
Ns
[
i
]);
}
std
::
vector
<
ck_tile
::
HostTensor
<
ADataType
>>
a_m_k_tensors
;
std
::
vector
<
ck_tile
::
HostTensor
<
BDataType
>>
b_k_n_tensors
;
std
::
vector
<
ck_tile
::
HostTensor
<
CDataType
>>
c_m_n_tensors
;
a_m_k_tensors
.
reserve
(
group_count
);
b_k_n_tensors
.
reserve
(
group_count
);
c_m_n_tensors
.
reserve
(
group_count
);
std
::
vector
<
std
::
unique_ptr
<
ck_tile
::
DeviceMem
>>
a_m_k_dev_buf
;
std
::
vector
<
std
::
unique_ptr
<
ck_tile
::
DeviceMem
>>
b_k_n_dev_buf
;
std
::
vector
<
std
::
unique_ptr
<
ck_tile
::
DeviceMem
>>
c_m_n_dev_buf
;
a_m_k_dev_buf
.
reserve
(
group_count
);
b_k_n_dev_buf
.
reserve
(
group_count
);
c_m_n_dev_buf
.
reserve
(
group_count
);
std
::
vector
<
grouped_gemm_kargs
>
gemm_descs
;
gemm_descs
.
reserve
(
group_count
);
for
(
int
i
=
0
;
i
<
group_count
;
++
i
)
{
const
ck_tile
::
index_t
M
=
Ms
[
i
];
const
ck_tile
::
index_t
N
=
Ns
[
i
];
const
ck_tile
::
index_t
K
=
Ks
[
i
];
stride_As
[
i
]
=
f_get_default_stride
(
M
,
N
,
stride_As
[
i
],
a_layout
);
stride_Bs
[
i
]
=
f_get_default_stride
(
K
,
N
,
stride_Bs
[
i
],
b_layout
);
stride_Cs
[
i
]
=
f_get_default_stride
(
M
,
N
,
stride_Cs
[
i
],
CLayout
{});
a_m_k_tensors
.
push_back
(
ck_tile
::
HostTensor
<
ADataType
>
(
f_host_tensor_descriptor
(
M
,
K
,
stride_As
[
i
],
a_layout
)));
b_k_n_tensors
.
push_back
(
ck_tile
::
HostTensor
<
BDataType
>
(
f_host_tensor_descriptor
(
K
,
N
,
stride_Bs
[
i
],
b_layout
)));
c_m_n_tensors
.
push_back
(
ck_tile
::
HostTensor
<
CDataType
>
(
f_host_tensor_descriptor
(
M
,
N
,
stride_Cs
[
i
],
CLayout
{})));
std
::
cout
<<
"gemm["
<<
i
<<
"]"
<<
" a_m_k: "
<<
a_m_k_tensors
[
i
]
.
mDesc
<<
" b_k_n: "
<<
b_k_n_tensors
[
i
]
.
mDesc
<<
" c_m_n: "
<<
c_m_n_tensors
[
i
]
.
mDesc
<<
std
::
endl
;
ck_tile
::
FillUniformDistribution
<
ADataType
>
{
-
5.
f
,
5.
f
}(
a_m_k_tensors
[
i
]);
ck_tile
::
FillUniformDistribution
<
BDataType
>
{
-
5.
f
,
5.
f
}(
b_k_n_tensors
[
i
]);
a_m_k_dev_buf
.
push_back
(
std
::
make_unique
<
ck_tile
::
DeviceMem
>
(
a_m_k_tensors
[
i
]
.
get_element_space_size_in_bytes
()));
b_k_n_dev_buf
.
push_back
(
std
::
make_unique
<
ck_tile
::
DeviceMem
>
(
b_k_n_tensors
[
i
]
.
get_element_space_size_in_bytes
()));
c_m_n_dev_buf
.
push_back
(
std
::
make_unique
<
ck_tile
::
DeviceMem
>
(
c_m_n_tensors
[
i
]
.
get_element_space_size_in_bytes
()));
a_m_k_dev_buf
[
i
]
->
ToDevice
(
a_m_k_tensors
[
i
]
.
data
());
b_k_n_dev_buf
[
i
]
->
ToDevice
(
b_k_n_tensors
[
i
]
.
data
());
c_m_n_dev_buf
[
i
]
->
SetZero
();
c_m_n_tensors
[
i
]
.
SetZero
();
const
void
*
p_a
=
a_m_k_dev_buf
[
i
]
->
GetDeviceBuffer
();
const
void
*
p_b
=
b_k_n_dev_buf
[
i
]
->
GetDeviceBuffer
();
void
*
p_c
=
c_m_n_dev_buf
[
i
]
->
GetDeviceBuffer
();
gemm_descs
.
push_back
({
p_a
,
p_b
,
p_c
,
M
,
N
,
K
,
stride_As
[
i
],
stride_Bs
[
i
],
stride_Cs
[
i
]});
}
invoke_gemm
<
ALayout
,
BLayout
,
CLayout
>
(
warmup
,
repeat
,
group_count
,
gemm_descs
);
for
(
int
i
=
0
;
i
<
group_count
;
i
++
)
{
c_m_n_dev_buf
[
i
]
->
FromDevice
(
c_m_n_tensors
[
i
]
.
data
());
}
bool
pass
{
true
};
if
(
arg_parser
.
get_int
(
"validate"
))
{
for
(
int
i
=
0
;
i
<
group_count
;
++
i
)
{
ck_tile
::
HostTensor
<
CDataType
>
c_m_n_host_ref
(
f_host_tensor_descriptor
(
Ms
[
i
],
Ns
[
i
],
stride_Cs
[
i
],
CLayout
{}));
c_m_n_host_ref
.
SetZero
();
ck_tile
::
reference_gemm
<
ADataType
,
BDataType
,
AccDataType
,
CDataType
>
(
a_m_k_tensors
[
i
],
b_k_n_tensors
[
i
],
c_m_n_host_ref
);
pass
&=
ck_tile
::
check_err
(
c_m_n_tensors
[
i
],
c_m_n_host_ref
);
}
std
::
cout
<<
"The CPU veification result is:"
<<
(
pass
?
"correct"
:
"fail"
)
<<
std
::
endl
;
}
return
pass
;
}
int
run_grouped_gemm_example
(
int
argc
,
char
*
argv
[])
{
auto
[
result
,
arg_parser
]
=
create_args
(
argc
,
argv
);
if
(
!
result
)
{
return
-
1
;
}
const
std
::
string
a_layout
=
arg_parser
.
get_str
(
"a_layout"
);
const
std
::
string
b_layout
=
arg_parser
.
get_str
(
"b_layout"
);
using
Row
=
ck_tile
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck_tile
::
tensor_layout
::
gemm
::
ColumnMajor
;
if
(
a_layout
==
"R"
&&
b_layout
==
"C"
)
{
return
run_grouped_gemm_example_with_layouts
(
argc
,
argv
,
Row
{},
Col
{},
Row
{});
}
else
if
(
a_layout
==
"R"
&&
b_layout
==
"R"
)
{
return
run_grouped_gemm_example_with_layouts
(
argc
,
argv
,
Row
{},
Row
{},
Row
{});
}
else
{
throw
std
::
runtime_error
(
"Unsupported data layout configuration for A,B and C tensors!"
);
}
}
example/ck_tile/17_grouped_gemm/utils.hpp
0 → 100644
View file @
8d2f2f8c
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
template
<
typename
TLayout
>
constexpr
auto
f_host_tensor_descriptor
(
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
TLayout
layout
)
{
using
namespace
ck_tile
::
literals
;
if
constexpr
(
std
::
is_same_v
<
decltype
(
layout
),
ck_tile
::
tensor_layout
::
gemm
::
RowMajor
>
)
{
return
ck_tile
::
HostTensorDescriptor
({
row
,
col
},
{
stride
,
1
_uz
});
}
else
{
return
ck_tile
::
HostTensorDescriptor
({
row
,
col
},
{
1
_uz
,
stride
});
}
}
template
<
typename
TLayout
>
constexpr
auto
f_get_default_stride
(
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
TLayout
layout
)
{
if
(
stride
==
0
)
{
if
constexpr
(
std
::
is_same_v
<
decltype
(
layout
),
ck_tile
::
tensor_layout
::
gemm
::
RowMajor
>
)
{
return
col
;
}
else
{
return
row
;
}
}
else
return
stride
;
}
example/ck_tile/CMakeLists.txt
View file @
8d2f2f8c
...
...
@@ -16,3 +16,4 @@ add_subdirectory(13_moe_sorting)
add_subdirectory
(
14_moe_smoothquant
)
add_subdirectory
(
15_fused_moe
)
add_subdirectory
(
16_batched_gemm
)
add_subdirectory
(
17_grouped_gemm
)
include/ck/README.md
0 → 100644
View file @
8d2f2f8c
[
Back to the main page
](
../../README.md
)
# Composable Kernel supported operations
## Supported device operations
*
[
Average pooling
](
)
*
[
Batched contraction
](
)
*
[
Batched gemm
](
)
*
[
Batchnorm
](
)
*
[
CGEMM
](
)
*
[
Contraction
](
)
*
[
Convolution
](
)
*
[
Image to Column and Column to Image
](
)
*
[
Elementwise
](
)
*
[
GEMM
](
)
*
[
Max pooling
](
)
*
[
Reduce
](
)
*
[
Normalization
](
)
*
[
Permute
](
)
*
[
Put
](
)
*
[
Softmax
](
)
include/ck/library/utility/host_tensor.hpp
View file @
8d2f2f8c
...
...
@@ -326,7 +326,7 @@ struct Tensor
std
::
size_t
GetElementSpaceSizeInBytes
()
const
{
return
sizeof
(
T
)
*
GetElementSpaceSize
();
}
void
SetZero
()
{
ck
::
ranges
::
fill
<
T
>
(
mData
,
0
);
}
void
SetZero
()
{
ck
::
ranges
::
fill
<
T
>
(
mData
,
T
{
0
}
);
}
template
<
typename
F
>
void
ForEach_impl
(
F
&&
f
,
std
::
vector
<
size_t
>&
idx
,
size_t
rank
)
...
...
include/ck/library/utility/host_tensor_generator.hpp
View file @
8d2f2f8c
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
...
...
@@ -37,7 +37,7 @@ struct GeneratorTensor_1<ck::half_t>
float
value
=
1.0
;
template
<
typename
...
Is
>
ck
::
b
half_t
operator
()(
Is
...)
ck
::
half_t
operator
()(
Is
...)
{
return
ck
::
type_convert
<
ck
::
half_t
>
(
value
);
}
...
...
@@ -62,7 +62,7 @@ struct GeneratorTensor_1<ck::f8_t>
float
value
=
1.0
;
template
<
typename
...
Is
>
ck
::
bhal
f_t
operator
()(
Is
...)
ck
::
f
8
_t
operator
()(
Is
...)
{
return
ck
::
type_convert
<
ck
::
f8_t
>
(
value
);
}
...
...
@@ -256,14 +256,33 @@ struct GeneratorTensor_Checkboard
}
};
template
<
ck
::
index_t
Dim
>
/**
* @brief Is used to generate sequential values based on the specified dimension.
*
* @tparam T The type of the tensor values.
* @tparam Dim The specific dimension used for generation.
*
* GeneratorTensor_Sequential<1>{} will generate the following values for a 3x3 tensor:
*
* 0 1 2
* 0 1 2
* 0 1 2
*
* Essentially, the values generated are logical coordinates of the generated element that
* correspond to dimension Dim. E.g. for 2-dimensional tensor and Dim=1, the values are the column
* indices.
*
*/
template
<
typename
T
,
ck
::
index_t
Dim
>
struct
GeneratorTensor_Sequential
{
template
<
typename
...
Ts
>
float
operator
()(
Ts
...
Xs
)
const
T
operator
()(
Ts
...
Xs
)
const
{
std
::
array
<
ck
::
index_t
,
sizeof
...(
Ts
)
>
dims
=
{{
static_cast
<
ck
::
index_t
>
(
Xs
)...}};
return
dims
[
Dim
];
float
tmp
=
dims
[
Dim
];
return
ck
::
type_convert
<
T
>
(
tmp
);
}
};
...
...
include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_two_stage_xdl_cshuffle.hpp
View file @
8d2f2f8c
...
...
@@ -111,8 +111,7 @@ __global__ void
[[
maybe_unused
]]
const
ComputePtrOffsetOfBatch
compute_ptr_offset_of_batch
,
[[
maybe_unused
]]
const
index_t
num_k_per_block
)
{
#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx908__) || defined(__gfx90a__) || \
defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__))
#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx9__))
// offset base pointer for each work-group
const
index_t
g_idx
=
__builtin_amdgcn_readfirstlane
(
blockIdx
.
z
*
NumGroupsToMerge
);
const
index_t
k_idx
=
__builtin_amdgcn_readfirstlane
(
blockIdx
.
y
*
num_k_per_block
);
...
...
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp
View file @
8d2f2f8c
#pragma once
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
...
...
@@ -603,11 +603,11 @@ struct DeviceGroupedGemmMultipleD_Dl : public DeviceGroupedGemm<ALayout,
}
hipGetErrorString
(
hipMemcpy
WithStream
(
arg
.
p_workspace_
,
arg
.
gemm_desc_kernel_arg_
.
data
(),
arg
.
gemm_desc_kernel_arg_
.
size
()
*
sizeof
(
GemmKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
hipMemcpy
Async
(
arg
.
p_workspace_
,
arg
.
gemm_desc_kernel_arg_
.
data
(),
arg
.
gemm_desc_kernel_arg_
.
size
()
*
sizeof
(
GemmKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
auto
launch_kernel
=
[
&
](
auto
has_main_k_block_loop
,
auto
has_double_tail_k_block_loop
)
{
...
...
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_splitk_xdl_cshuffle_two_stage.hpp
View file @
8d2f2f8c
...
...
@@ -761,11 +761,11 @@ struct DeviceGroupedGemmMultipleDSplitKXdlCShuffleTwoStage
float
time
{
0.
f
};
hip_check_error
(
hipMemcpy
WithStream
(
dev_gemm_kargs
,
arg
.
gemm_kernel_args_
.
data
(),
arg
.
gemm_kernel_args_
.
size
()
*
sizeof
(
GemmTransKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
hipMemcpy
Async
(
dev_gemm_kargs
,
arg
.
gemm_kernel_args_
.
data
(),
arg
.
gemm_kernel_args_
.
size
()
*
sizeof
(
GemmTransKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
auto
preprocess
=
[
&
]()
{
hip_check_error
(
hipMemsetAsync
(
...
...
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_xdl_cshuffle_tile_loop.hpp
View file @
8d2f2f8c
...
...
@@ -940,10 +940,10 @@ struct DeviceGroupedGemmMultipleDXdlCShuffleTileLoop
const
void
*
p_host_kernel_args
)
const
{
arg
.
p_dev_gemm_args_
=
p_dev_kernel_args
;
hip_check_error
(
hipMemcpy
(
p_dev_kernel_args
,
p_host_kernel_args
,
GetDeviceKernelArgSize
(
&
arg
),
hipMemcpyHostToDevice
));
hip_check_error
(
hipMemcpy
Async
(
p_dev_kernel_args
,
p_host_kernel_args
,
GetDeviceKernelArgSize
(
&
arg
),
hipMemcpyHostToDevice
));
}
virtual
void
SetDeviceKernelArgs
(
BaseArgument
*
p_arg
,
...
...
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp
View file @
8d2f2f8c
...
...
@@ -557,12 +557,12 @@ struct DeviceGroupedGemm_Xdl : public DeviceGroupedGemm<ALayout,
}
}
hipGetErrorString
(
hipMemcpyWithStream
(
arg
.
p_workspace_
,
arg
.
gemm_desc_kernel_arg_
.
data
()
,
arg
.
gemm_desc_kernel_arg_
.
size
()
*
sizeof
(
GemmBiasTransKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
hipGetErrorString
(
hipMemcpyAsync
(
arg
.
p_workspace_
,
arg
.
gemm_desc_kernel_arg_
.
data
(),
arg
.
gemm_desc_kernel_arg_
.
size
()
*
sizeof
(
GemmBiasTransKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
float
ave_time
=
0
;
...
...
include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl_splitk_cshuffle.hpp
View file @
8d2f2f8c
...
...
@@ -421,11 +421,11 @@ struct DeviceGroupedGemmXdlSplitKCShuffle : public DeviceGroupedGemmSplitK<ALayo
}
hip_check_error
(
hipMemcpy
WithStream
(
arg
.
p_workspace_
,
arg
.
gemm_kernel_args_
.
data
(),
arg
.
gemm_kernel_args_
.
size
()
*
sizeof
(
GemmTransKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
hipMemcpy
Async
(
arg
.
p_workspace_
,
arg
.
gemm_kernel_args_
.
data
(),
arg
.
gemm_kernel_args_
.
size
()
*
sizeof
(
GemmTransKernelArg
),
hipMemcpyHostToDevice
,
stream_config
.
stream_id_
));
float
ave_time
=
0
;
...
...
include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdl_cshuffle_v3_multi_d_ab_scale.hpp
View file @
8d2f2f8c
...
...
@@ -38,8 +38,7 @@ __global__ void
// __attribute__((amdgpu_waves_per_eu(1, 1)))
kernel_gemm_xdl_cshuffle_v3
(
typename
GridwiseGemm
::
Argument
karg
)
{
#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx908__) || defined(__gfx90a__) || \
defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__))
#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx9__))
__shared__
char
p_shared
[
GridwiseGemm
::
GetSharedMemoryNumberOfByte
()];
GridwiseGemm
::
template
Run
<
HasMainKBlockLoop
,
CGlobalMemoryDataOperation
,
TailNum
>(
...
...
include/ck/utility/amd_buffer_addressing.hpp
View file @
8d2f2f8c
...
...
@@ -549,8 +549,10 @@ __device__ void amd_buffer_store_impl(const typename vector_type<T, N>::type src
(
is_same
<
T
,
half_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
bhalf_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
int32_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
f8_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
bf8_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
f8_fnuz_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
bf8_fnuz_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
fp8_storage_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
))
||
(
is_same
<
T
,
int8_t
>::
value
&&
(
N
==
1
||
N
==
2
||
N
==
4
||
N
==
8
||
N
==
16
)),
"wrong! not implemented"
);
...
...
@@ -843,8 +845,8 @@ amd_buffer_load_invalid_element_return_zero(const T* p_src_wave,
#else
vector_t
tmp
=
amd_buffer_load_impl
<
scalar_t
,
vector_size
,
coherence
>
(
src_wave_buffer_resource
,
src_thread_addr_offset
,
0
);
vector_t
tmp
{
amd_buffer_load_impl
<
scalar_t
,
vector_size
,
coherence
>
(
src_wave_buffer_resource
,
src_thread_addr_offset
,
0
)
}
;
return
src_thread_element_valid
?
tmp
:
vector_t
(
0
);
#endif
}
...
...
@@ -873,8 +875,8 @@ amd_buffer_load_invalid_element_return_customized_value(const T* p_src_wave,
constexpr
index_t
vector_size
=
scalar_type
<
vector_t
>::
vector_size
;
vector_t
tmp
=
amd_buffer_load_impl
<
scalar_t
,
vector_size
,
coherence
>
(
src_wave_buffer_resource
,
src_thread_addr_offset
,
0
);
vector_t
tmp
{
amd_buffer_load_impl
<
scalar_t
,
vector_size
,
coherence
>
(
src_wave_buffer_resource
,
src_thread_addr_offset
,
0
)
}
;
return
src_thread_element_valid
?
tmp
:
vector_t
(
customized_value
);
}
...
...
include/ck/utility/amd_ck_fp8.hpp
0 → 100644
View file @
8d2f2f8c
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/utility/random_gen.hpp"
#include "ck/utility/type.hpp"
#ifdef CK_USE_FNUZ_FP8
#define CK_USE_FNUZ_FP8 1
#else
#define CK_USE_FNUZ_FP8 0
#endif
#ifdef CK_USE_OCP_FP8
#define CK_USE_OCP_FP8 1
#else
#define CK_USE_OCP_FP8 0
#endif
namespace
ck
{
using
f8_fnuz_t
=
_BitInt
(
8
);
using
bf8_fnuz_t
=
unsigned
_BitInt
(
8
);
#if(defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__) || defined(__gfx1200__) || \
defined(__gfx1201__)) && \
__HIP_DEVICE_COMPILE__
#define CK_FP8_CVT_FAST_PATH 1
#else
#define CK_FP8_CVT_FAST_PATH 0
#endif
#if(defined(__gfx1200__) || defined(__gfx1201__)) && __HIP_DEVICE_COMPILE__
#define CK_OCP_FP8_CVT_FAST_PATH 1
#else
#define CK_OCP_FP8_CVT_FAST_PATH 0
#endif
typedef
unsigned
char
fp8_storage_t
;
/**
* \brief Describes FP8 interpretation
*/
enum
class
ck_fp8_interpretation_t
{
CK_E4M3_OCP
=
0
,
// OCP E4M3
CK_E5M2_OCP
=
1
,
// OCP E5M2
CK_E4M3_FNUZ
=
2
,
// FP8
CK_E5M2_FNUZ
=
3
,
// BF8
};
/**
* \brief Describes saturation behavior
*/
enum
class
ck_saturation_t
{
CK_NOSAT
=
0
,
// No saturation - replace with NaN or Inf
CK_SATFINITE
=
1
,
// Saturate to finite
};
namespace
fp8_impl
{
typedef
fp8_storage_t
fp8x2_storage_t
__attribute__
((
ext_vector_type
(
2
)));
typedef
float
float2_t
__attribute__
((
ext_vector_type
(
2
)));
__host__
__device__
static
inline
constexpr
bool
fnuz_f8_is_nan
(
f8_fnuz_t
a
)
{
return
static_cast
<
unsigned
char
>
(
a
)
==
0x80
;
}
__host__
__device__
static
inline
constexpr
bool
fnuz_bf8_is_nan
(
bf8_fnuz_t
a
)
{
return
static_cast
<
unsigned
char
>
(
a
)
==
0x80
;
}
__host__
__device__
static
inline
constexpr
bool
ocp_f8_is_nan
(
fp8_storage_t
a
)
{
return
(
a
&
0x7f
)
==
0x7f
;
}
__host__
__device__
static
inline
constexpr
bool
ocp_bf8_is_nan
(
fp8_storage_t
a
)
{
return
(
a
&
0x7f
)
>
0x7c
;
}
// The conversion function is from rocblas
// https://github.com/ROCm/rocBLAS/blob/9b7f692abe3c54b88d1e77e045a7db7f1f188b69/library/include/internal/rocblas_hip_f8_impl.h#L220
// This has been modified to handle double types as well
template
<
typename
T
,
int
wm
,
int
we
,
bool
is_fnuz
,
bool
clip
=
false
>
__host__
__device__
static
inline
T
cast_from_f8
(
fp8_storage_t
x
)
{
constexpr
bool
is_half
=
__hip_internal
::
is_same
<
T
,
_Float16
>::
value
;
constexpr
bool
is_float
=
__hip_internal
::
is_same
<
T
,
float
>::
value
;
constexpr
bool
is_double
=
__hip_internal
::
is_same
<
T
,
double
>::
value
;
static_assert
(
is_half
||
is_float
||
is_double
,
"only half, float and double are supported"
);
constexpr
int
weo
=
is_half
?
5
:
(
is_float
?
8
:
11
);
constexpr
int
wmo
=
is_half
?
10
:
(
is_float
?
23
:
52
);
T
fInf
,
fNegInf
,
fNaN
,
fNeg0
,
fmax
,
fmin
;
if
constexpr
(
is_half
)
{
const
unsigned
short
int
ihInf
=
0x7C00
;
const
unsigned
short
int
ihNegInf
=
0xFC00
;
const
unsigned
short
int
ihNaN
=
0x7C01
;
const
unsigned
short
int
ihNeg0
=
0x8000
;
/* Max number in e5m2 57344*/
const
unsigned
short
int
ifmax
=
0x7B00
;
const
unsigned
short
int
ifmin
=
0xFB00
;
fInf
=
bit_cast
<
_Float16
>
(
ihInf
);
fNegInf
=
bit_cast
<
_Float16
>
(
ihNegInf
);
fNaN
=
bit_cast
<
_Float16
>
(
ihNaN
);
fNeg0
=
bit_cast
<
_Float16
>
(
ihNeg0
);
fmax
=
bit_cast
<
_Float16
>
(
ifmax
);
fmin
=
bit_cast
<
_Float16
>
(
ifmin
);
}
else
if
constexpr
(
is_float
)
{
const
unsigned
int
ifInf
=
0x7F800000
;
const
unsigned
int
ifNegInf
=
0xFF800000
;
const
unsigned
int
ifNaN
=
0x7F800001
;
const
unsigned
int
ifNeg0
=
0x80000000
;
/* Max number in e5m2 57344*/
const
unsigned
int
ifmax
=
0x47600000
;
const
unsigned
int
ifmin
=
0xC7600000
;
fInf
=
bit_cast
<
float
>
(
ifInf
);
fNegInf
=
bit_cast
<
float
>
(
ifNegInf
);
fNaN
=
bit_cast
<
float
>
(
ifNaN
);
fNeg0
=
bit_cast
<
float
>
(
ifNeg0
);
fmax
=
bit_cast
<
float
>
(
ifmax
);
fmin
=
bit_cast
<
float
>
(
ifmin
);
}
else
if
constexpr
(
is_double
)
{
const
unsigned
long
long
ifInf
=
0x7FF0000000000000ull
;
const
unsigned
long
long
ifNegInf
=
0xFFF0000000000000ull
;
const
unsigned
long
long
ifNaN
=
0x7FF0000000000001ull
;
const
unsigned
long
long
ifNeg0
=
0x8000000000000000ull
;
/* Max number in e5m2 57344*/
const
unsigned
long
long
ifmax
=
0x40EC000000000000ull
;
const
unsigned
long
long
ifmin
=
0xC0EC000000000000ull
;
fInf
=
bit_cast
<
double
>
(
ifInf
);
fNegInf
=
bit_cast
<
double
>
(
ifNegInf
);
fNaN
=
bit_cast
<
double
>
(
ifNaN
);
fNeg0
=
bit_cast
<
double
>
(
ifNeg0
);
fmax
=
bit_cast
<
double
>
(
ifmax
);
fmin
=
bit_cast
<
double
>
(
ifmin
);
}
if
(
x
==
0
)
{
return
0
;
}
unsigned
long
long
sign
=
x
>>
7
;
unsigned
long
long
mantissa
=
x
&
((
1
<<
wm
)
-
1
);
int
exponent
=
(
x
&
0x7F
)
>>
wm
;
if
constexpr
(
is_fnuz
)
{
if
(
x
==
0x80
)
{
return
fNaN
;
}
}
else
{
if
(
x
==
0x80
)
{
return
fNeg0
;
}
if
constexpr
(
we
==
4
)
{
// e4m3
if
((
x
&
0x7F
)
==
0x7F
)
{
return
fNaN
;
}
}
else
if
((
x
&
0x7C
)
==
0x7C
)
{
// e5m2
if
((
x
&
0x3
)
==
0
)
{
if
constexpr
(
clip
)
{
return
sign
?
fmin
:
fmax
;
}
return
sign
?
fNegInf
:
fInf
;
}
return
fNaN
;
}
}
typename
__hip_internal
::
conditional
<
sizeof
(
T
)
==
2
,
unsigned
short
int
,
typename
__hip_internal
::
conditional
<
sizeof
(
T
)
==
4
,
unsigned
int
,
unsigned
long
long
>::
type
>::
type
retval
;
if
constexpr
(
we
==
5
&&
is_half
&&
!
is_fnuz
)
{
retval
=
x
<<
8
;
return
bit_cast
<
T
>
(
retval
);
}
const
int
exp_low_cutoff
=
(
1
<<
(
weo
-
1
))
-
(
1
<<
(
we
-
1
))
+
1
-
(
is_fnuz
?
1
:
0
);
// subnormal input
if
(
exponent
==
0
)
{
#if defined(__HIP_DEVICE_COMPILE__) && __HIP_DEVICE_COMPILE__
// guaranteed mantissa!=0 since cases 0x0 and 0x80 are handled above
int
sh
=
1
+
__clz
(
mantissa
)
-
(
32
-
wm
);
#else
int
sh
=
1
+
__builtin_clz
(
mantissa
)
-
(
32
-
wm
);
#endif
mantissa
<<=
sh
;
exponent
+=
1
-
sh
;
mantissa
&=
((
1ull
<<
wm
)
-
1
);
}
exponent
+=
exp_low_cutoff
-
1
;
mantissa
<<=
wmo
-
wm
;
// subnormal output (occurs when T=half, we=5, negative_zero_nan=true)
if
(
exponent
<=
0
)
{
mantissa
|=
1
<<
wmo
;
mantissa
>>=
1
-
exponent
;
exponent
=
0
;
}
if
constexpr
(
sizeof
(
T
)
==
2
)
retval
=
(
sign
<<
15
)
|
(
exponent
<<
10
)
|
mantissa
;
else
if
constexpr
(
sizeof
(
T
)
==
4
)
retval
=
(
sign
<<
31
)
|
(
exponent
<<
23
)
|
mantissa
;
else
retval
=
(
sign
<<
63
)
|
(
static_cast
<
unsigned
long
long
>
(
exponent
)
<<
52
)
|
mantissa
;
return
bit_cast
<
T
>
(
retval
);
}
#if CK_FP8_CVT_FAST_PATH
template
<
ck_fp8_interpretation_t
interpret
>
static
__device__
float
cast_to_f32_from_f8
(
fp8_storage_t
v
)
{
union
{
unsigned
int
i32val
;
unsigned
char
i8val
[
4
];
}
val
;
val
.
i8val
[
0
]
=
v
;
static_assert
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
||
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
||
interpret
==
ck_fp8_interpretation_t
::
CK_E5M2_FNUZ
||
interpret
==
ck_fp8_interpretation_t
::
CK_E5M2_OCP
,
"Only FNUZ and OCP interpretations are supported"
);
if
constexpr
((
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
)
||
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
))
{
return
__builtin_amdgcn_cvt_f32_fp8
(
val
.
i32val
,
0
);
}
else
{
return
__builtin_amdgcn_cvt_f32_bf8
(
val
.
i32val
,
0
);
}
}
template
<
ck_fp8_interpretation_t
interpret
>
static
__device__
float2_t
cast_to_f32x2_from_f8x2
(
fp8x2_storage_t
v
)
{
const
auto
i16val
=
bit_cast
<
uint16_t
>
(
v
);
static_assert
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
||
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
||
interpret
==
ck_fp8_interpretation_t
::
CK_E5M2_FNUZ
||
interpret
==
ck_fp8_interpretation_t
::
CK_E5M2_OCP
,
"Only FNUZ and OCP interpretations are supported"
);
if
constexpr
((
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
)
||
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
))
{
return
__builtin_amdgcn_cvt_pk_f32_fp8
(
i16val
,
false
);
}
else
{
return
__builtin_amdgcn_cvt_pk_f32_bf8
(
i16val
,
false
);
}
}
#endif
}
// namespace fp8_impl
struct
f8_ocp_t
{
using
data_type
=
fp8_storage_t
;
data_type
data
;
static
constexpr
ck_saturation_t
default_saturation
=
ck_saturation_t
::
CK_SATFINITE
;
static
constexpr
ck_fp8_interpretation_t
default_interpret
=
ck_fp8_interpretation_t
::
CK_E4M3_OCP
;
static
constexpr
unsigned
int
we
=
4
;
// exponent width
static
constexpr
unsigned
int
wm
=
3
;
// mantissa width
__host__
__device__
constexpr
bool
operator
==
(
const
f8_ocp_t
&
other
)
const
{
return
(
data
==
other
.
data
)
&&
(
fp8_impl
::
ocp_f8_is_nan
(
data
)
==
false
);
// NaN != NaN
}
#if CK_USE_OCP_FP8
__host__
__device__
explicit
operator
float
()
const
#else
__host__
explicit
operator
float
()
const
#endif
{
#if CK_OCP_FP8_CVT_FAST_PATH
return
fp8_impl
::
cast_to_f32_from_f8
<
default_interpret
>
(
this
->
data
);
#else
return
fp8_impl
::
cast_from_f8
<
float
,
wm
,
we
,
false
>
(
this
->
data
);
// XXX: clip==false must be consistent with operator _Float16
#endif
}
#if CK_USE_OCP_FP8
__host__
__device__
explicit
operator
_Float16
()
const
#else
__host__
explicit
operator
_Float16
()
const
#endif
{
#if CK_OCP_FP8_CVT_FAST_PATH
return
static_cast
<
_Float16
>
(
fp8_impl
::
cast_to_f32_from_f8
<
default_interpret
>
(
this
->
data
));
#else
return
fp8_impl
::
cast_from_f8
<
_Float16
,
wm
,
we
,
false
>
(
this
->
data
);
// XXX: clip==false must be consistent with operator float
#endif
}
};
struct
bf8_ocp_t
{
using
data_type
=
fp8_storage_t
;
data_type
data
;
static
constexpr
ck_saturation_t
default_saturation
=
ck_saturation_t
::
CK_SATFINITE
;
static
constexpr
ck_fp8_interpretation_t
default_interpret
=
ck_fp8_interpretation_t
::
CK_E5M2_OCP
;
static
constexpr
unsigned
int
we
=
5
;
// exponent width
static
constexpr
unsigned
int
wm
=
2
;
// mantissa width
__host__
__device__
constexpr
bool
operator
==
(
const
bf8_ocp_t
&
other
)
const
{
return
(
data
==
other
.
data
)
&&
(
fp8_impl
::
ocp_bf8_is_nan
(
data
)
==
false
);
// NaN != NaN
}
#if CK_USE_OCP_FP8
__host__
__device__
explicit
operator
float
()
const
#else
__host__
explicit
operator
float
()
const
#endif
{
#if defined(__gfx1200__) || defined(__gfx1201__)
return
fp8_impl
::
cast_to_f32_from_f8
<
default_interpret
>
(
this
->
data
);
#else
return
fp8_impl
::
cast_from_f8
<
float
,
wm
,
we
,
false
>
(
this
->
data
);
// XXX: clip==false must be consistent with operator _Float16
#endif
}
#if CK_USE_OCP_FP8
__host__
__device__
explicit
operator
_Float16
()
const
#else
__host__
explicit
operator
_Float16
()
const
#endif
{
#if defined(__gfx1200__) || defined(__gfx1201__)
return
static_cast
<
_Float16
>
(
fp8_impl
::
cast_to_f32_from_f8
<
default_interpret
>
(
this
->
data
));
#else
return
fp8_impl
::
cast_from_f8
<
_Float16
,
wm
,
we
,
false
>
(
this
->
data
);
// XXX: clip==false must be consistent with operator float
#endif
}
};
template
<
typename
T
>
__host__
__device__
static
inline
constexpr
bool
fp8_is_nan
(
T
);
template
<
>
__host__
__device__
inline
constexpr
bool
fp8_is_nan
(
f8_ocp_t
a
)
{
return
fp8_impl
::
ocp_f8_is_nan
(
a
.
data
);
}
template
<
>
__host__
__device__
inline
constexpr
bool
fp8_is_nan
(
bf8_ocp_t
a
)
{
return
fp8_impl
::
ocp_bf8_is_nan
(
a
.
data
);
}
template
<
>
__host__
__device__
inline
constexpr
bool
fp8_is_nan
(
f8_fnuz_t
a
)
{
return
fp8_impl
::
fnuz_f8_is_nan
(
a
);
}
template
<
>
__host__
__device__
inline
constexpr
bool
fp8_is_nan
(
bf8_fnuz_t
a
)
{
return
fp8_impl
::
fnuz_bf8_is_nan
(
a
);
}
template
<
typename
T
,
std
::
enable_if_t
<
std
::
is_same_v
<
T
,
bf8_ocp_t
>
||
std
::
is_same_v
<
T
,
f8_ocp_t
>
||
std
::
is_same_v
<
T
,
bf8_fnuz_t
>
||
std
::
is_same_v
<
T
,
f8_fnuz_t
>
,
bool
>
=
true
>
__host__
__device__
static
inline
constexpr
bool
fp8_is_inf
(
T
)
{
return
false
;
}
template
<
>
__host__
__device__
inline
constexpr
bool
fp8_is_inf
(
bf8_ocp_t
a
)
{
return
(
a
.
data
&
0x7f
)
==
0x7c
;
}
namespace
fp8_impl
{
// Assertions to check for supported conversion types
#define __assert_ocp_support(interp) \
{ \
if(interp != ck_fp8_interpretation_t::CK_E4M3_OCP && \
interp != ck_fp8_interpretation_t::CK_E5M2_OCP) \
{ \
__hip_assert(false && "type is unsupported by current target device"); \
} \
}
#define __assert_fnuz_support(interp) \
{ \
if(interp != ck_fp8_interpretation_t::CK_E4M3_FNUZ && \
interp != ck_fp8_interpretation_t::CK_E5M2_FNUZ) \
{ \
__hip_assert(false && "type is unsupported by current target device"); \
} \
}
__host__
__device__
static
inline
void
__is_interpret_supported
([[
maybe_unused
]]
ck_fp8_interpretation_t
interp
)
{
#if defined(__HIP_DEVICE_COMPILE__) && __HIP_DEVICE_COMPILE__
#if CK_USE_OCP_FP8
__assert_ocp_support
(
interp
);
#endif
#if CK_USE_FNUZ_FP8
__assert_fnuz_support
(
interp
);
#endif
#endif
}
#if CK_FP8_CVT_FAST_PATH
// The conversion function is from rocblas
// https://github.com/ROCm/rocBLAS/blob/9b7f692abe3c54b88d1e77e045a7db7f1f188b69/library/include/internal/rocblas_float8.h#L79
template
<
ck_fp8_interpretation_t
interpret
,
bool
saturate
,
bool
stochastic_rounding
=
false
>
static
__device__
fp8_storage_t
cast_to_f8_from_f32
(
float
v
,
unsigned
int
rng
=
0
)
{
fp8_storage_t
i8data
;
union
{
float
fval
;
unsigned
int
i32val
;
unsigned
char
i8val
[
4
];
// NOTE: not endian independent
}
val
;
unsigned
int
ival
=
0
;
val
.
fval
=
v
;
if
constexpr
(
saturate
)
{
if
constexpr
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
)
{
if
((
val
.
i32val
&
0x7F800000
)
!=
0x7F800000
)
{
/// propagate NAN/INF, no clipping
val
.
fval
=
__builtin_amdgcn_fmed3f
(
val
.
fval
,
240.0
,
-
240.0
);
}
}
else
if
constexpr
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
)
{
// OCP type
if
((
val
.
i32val
&
0x7F800000
)
!=
0x7F800000
)
{
/// propagate NAN/INF, no clipping
val
.
fval
=
__builtin_amdgcn_fmed3f
(
val
.
fval
,
448.0
,
-
448.0
);
}
}
else
{
if
((
val
.
i32val
&
0x7F800000
)
!=
0x7F800000
)
{
/// propagate NAN/INF, no clipping
val
.
fval
=
__builtin_amdgcn_fmed3f
(
val
.
fval
,
57344.0
,
-
57344.0
);
}
}
}
if
constexpr
(
stochastic_rounding
)
{
ival
=
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
)
||
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
)
?
__builtin_amdgcn_cvt_sr_fp8_f32
(
val
.
fval
,
rng
,
ival
,
0
)
:
__builtin_amdgcn_cvt_sr_bf8_f32
(
val
.
fval
,
rng
,
ival
,
0
);
// 0 pos
val
.
i32val
=
ival
;
i8data
=
val
.
i8val
[
0
];
// little endian
}
else
{
// RNE CVT
ival
=
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
)
||
(
interpret
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
)
?
__builtin_amdgcn_cvt_pk_fp8_f32
(
val
.
fval
,
val
.
fval
,
ival
,
false
)
:
__builtin_amdgcn_cvt_pk_bf8_f32
(
val
.
fval
,
val
.
fval
,
ival
,
false
);
// false -> WORD0
val
.
i32val
=
ival
;
i8data
=
val
.
i8val
[
0
];
}
return
i8data
;
}
#endif // CK_FP8_CVT_FAST_PATH
// The conversion function is from rocblas
// https://github.com/ROCm/rocBLAS/blob/9b7f692abe3c54b88d1e77e045a7db7f1f188b69/library/include/internal/rocblas_hip_f8_impl.h#L39
// This has been modified to add double types conversion as well
template
<
typename
T
,
int
wm
,
int
we
,
bool
is_fnuz
,
bool
clip
=
false
,
bool
stoch
=
false
>
__host__
__device__
static
inline
fp8_storage_t
cast_to_f8
(
T
_x
,
unsigned
int
rng
=
0
)
{
constexpr
bool
is_half
=
__hip_internal
::
is_same
<
T
,
_Float16
>::
value
;
constexpr
bool
is_float
=
__hip_internal
::
is_same
<
T
,
float
>::
value
;
constexpr
bool
is_double
=
__hip_internal
::
is_same
<
T
,
double
>::
value
;
static_assert
(
is_half
||
is_float
||
is_double
,
"Only half, float and double can be cast to f8"
);
constexpr
int
mfmt
=
(
sizeof
(
T
)
==
8
)
?
52
:
((
sizeof
(
T
)
==
4
)
?
23
:
10
);
using
T_bitwise
=
typename
__hip_internal
::
conditional
<
sizeof
(
T
)
==
2
,
unsigned
short
int
,
typename
__hip_internal
::
conditional
<
sizeof
(
T
)
==
4
,
unsigned
int
,
unsigned
long
long
>::
type
>::
type
;
T_bitwise
x_bitwise
=
bit_cast
<
T_bitwise
>
(
_x
);
unsigned
long
long
x
{
x_bitwise
};
unsigned
long
long
head
,
mantissa
;
int
exponent
,
bias
;
unsigned
int
sign
;
unsigned
long
long
fInf
,
mask
;
if
constexpr
(
sizeof
(
T
)
==
8
)
{
head
=
x
&
0xFFF0000000000000ull
;
mantissa
=
x
&
0xFFFFFFFFFFFFFull
;
exponent
=
(
head
>>
52
)
&
0x7FF
;
sign
=
head
>>
63
;
bias
=
1023
;
fInf
=
0x7FF0000000000000ull
;
mask
=
0x7FFFFFFFFFFFFFFFull
;
}
else
if
constexpr
(
sizeof
(
T
)
==
4
)
{
head
=
x
&
0xFF800000
;
mantissa
=
x
&
0x7FFFFF
;
exponent
=
(
head
>>
23
)
&
0xFF
;
sign
=
head
>>
31
;
bias
=
127
;
fInf
=
0x7F800000
;
mask
=
0x7FFFFFFF
;
}
else
{
head
=
x
&
0xFC00
;
mantissa
=
x
&
0x3FF
;
exponent
=
(
head
>>
10
)
&
0x1F
;
sign
=
head
>>
15
;
bias
=
15
;
fInf
=
0x7C00
;
mask
=
0x7FFF
;
}
unsigned
int
signed_inf
=
0
;
unsigned
int
nan
=
0
;
if
constexpr
(
is_fnuz
)
{
signed_inf
=
clip
?
((
sign
<<
7
)
+
0x7f
)
:
0x80
;
nan
=
0x80
;
}
else
{
if
constexpr
(
we
==
4
)
{
// e4m3
signed_inf
=
(
sign
<<
7
)
+
(
clip
?
0x7e
:
0x7f
);
}
else
{
// e5m2
signed_inf
=
(
sign
<<
7
)
+
(
clip
?
0x7b
:
0x7c
);
}
nan
=
(
sign
<<
7
)
+
0x7f
;
}
// Max values
unsigned
long
long
ifmax
=
0
;
if
constexpr
(
sizeof
(
T
)
==
8
)
{
if
constexpr
(
we
==
5
)
{
// 57344
ifmax
=
0x40EC000000000000ull
;
}
else
{
if
constexpr
(
is_fnuz
)
{
// 240
ifmax
=
0x406E000000000000ull
;
}
else
{
// 448
ifmax
=
0x407C000000000000ull
;
}
}
}
else
if
(
sizeof
(
T
)
==
4
)
{
if
constexpr
(
we
==
5
)
{
ifmax
=
0x47600000
;
}
else
{
if
constexpr
(
is_fnuz
)
{
ifmax
=
0x43700000
;
}
else
{
ifmax
=
0x43E00000
;
}
}
}
else
{
if
constexpr
(
we
==
5
)
{
ifmax
=
0x7B00
;
}
else
{
if
constexpr
(
is_fnuz
)
{
ifmax
=
0x5B80
;
}
else
{
ifmax
=
0x5F00
;
}
}
}
// Deal with inf and NaNs
if
((
x
&
fInf
)
==
fInf
)
{
if
constexpr
(
is_fnuz
)
return
signed_inf
;
return
mantissa
!=
0
?
nan
:
signed_inf
;
}
if
((
x
&
mask
)
>
ifmax
)
{
return
signed_inf
;
}
if
(
x
==
0
)
{
return
0
;
}
// First need to check if it is normal or denorm as there is a difference of
// implicit 1 Then need to adjust the exponent to align with the F8 exponent,
// in the meanwhile, shift The mantissa. Then for stochastic rounding, add rng
// to mantissa and truncate. And for RNE, no need to add rng. Then probably
// need to check whether there is carry and adjust exponent and mantissa again
// For IEEE bias mode, the bias is 2^(k-1) -1 where k is the width of exponent
// bits
const
int
f8_bias
=
(
1
<<
(
we
-
1
))
-
1
+
(
is_fnuz
?
1
:
0
);
const
int
f8_denormal_act_exponent
=
1
-
f8_bias
;
// actual exponent of f8 denormal
// act_exponent is the actual exponent of fp32/fp16 (after subtracting bias)
// f8_exponent is the converted f8 exponent with bias encoding
// exponent_diff is the diff between fp32/fp16 exponent and f8 exponent,
// the difference needs to be adjusted and mantissa shifted
int
act_exponent
,
f8_exponent
,
exponent_diff
;
if
(
exponent
==
0
)
{
// fp32/fp16 is in denormal.
/* fp32 denormal is below 2^-127 so it is usually not a concern here, we
mostly concern fp16 here. In this case, f8 is usually in denormal. But there
could be exceptions. fp16 denormal has exponent bias 15 while bf8 with NANOO has
exponent bias 16. It means that there are some numbers in fp16 denormal but they
are bf8 (NANOO) normals - smallest bf8 (NANOO) normal is 2^-15. fp16 numbers
where exponent==0 (actual exponent -14) and highest bit of mantissa is 1 are bf8
(NANOO) normal. In this case, the fp16 mantissa should be shift left by 1 */
act_exponent
=
exponent
-
bias
+
1
;
exponent_diff
=
f8_denormal_act_exponent
-
act_exponent
;
// actual exponent is exponent-bias+1 as it is denormal
}
else
{
// fp32/fp16 is normal with implicit 1
act_exponent
=
exponent
-
bias
;
if
(
act_exponent
<=
f8_denormal_act_exponent
)
{
/* This is the case where fp32/fp16 is normal but it is in f8 denormal
range. For example fp8 nanoo mode, denormal exponent is -7, but if the fp32/fp16
actual exponent is -7, it is actually larger due to the implicit 1,
Therefore it needs to be adjust to -6 and mantissa shift right by 1.
So for fp32/fp16, exponent -8 is the cut point to convert to fp8 nanoo */
exponent_diff
=
f8_denormal_act_exponent
-
act_exponent
;
}
else
{
// both fp32/fp16 and f8 are in normal range
exponent_diff
=
0
;
// exponent_diff=0 does not mean there is no difference
// for this case, act_exponent could be larger. Just
// that it does not need shift mantissa
}
mantissa
+=
(
1ull
<<
mfmt
);
// Add the implicit 1 into mantissa
}
bool
midpoint
=
(
mantissa
&
((
1ull
<<
(
mfmt
-
wm
+
exponent_diff
))
-
1
))
==
(
1ull
<<
(
mfmt
-
wm
+
exponent_diff
-
1
));
/* This part is a bit tricky. The judgment of whether it is a tie needs to be
done before we shift right as shift right could rip off some residual part and
make something not midpoint look like midpoint. For example, the fp16 number
0x1002 (0 00100 0000000010), it is larger than midpoint, but after shift right
by 4 bits, it would look like midpoint.
*/
if
(
exponent_diff
>
0
)
mantissa
>>=
exponent_diff
;
else
if
(
exponent_diff
==
-
1
)
mantissa
<<=
-
exponent_diff
;
bool
implicit_one
=
mantissa
&
(
1ull
<<
mfmt
);
// if there is no implicit 1, it means the f8 is denormal and need to adjust
// to denorm exponent
f8_exponent
=
(
act_exponent
+
exponent_diff
)
/*actual f8 exponent*/
+
f8_bias
-
(
implicit_one
?
0
:
1
);
// Now we have the exponent and mantissa adjusted
unsigned
long
long
drop_mask
=
(
1ull
<<
(
mfmt
-
wm
))
-
1
;
bool
odd
=
mantissa
&
(
1ull
<<
(
mfmt
-
wm
));
// if the least significant bit that is not truncated is 1
mantissa
+=
(
stoch
?
rng
:
(
midpoint
?
(
odd
?
mantissa
:
mantissa
-
1ull
)
:
mantissa
))
&
drop_mask
;
// Now we deal with overflow
if
(
f8_exponent
==
0
)
{
if
((
1ull
<<
mfmt
)
&
mantissa
)
{
f8_exponent
=
1
;
// denormal overflow to become normal, promote exponent
}
}
else
{
if
((
1ull
<<
(
mfmt
+
1
))
&
mantissa
)
{
mantissa
>>=
1
;
f8_exponent
++
;
}
}
mantissa
>>=
(
mfmt
-
wm
);
// above range: quantize to maximum possible float of the same sign
const
int
max_exp
=
(
1
<<
we
)
-
1
;
if
(
f8_exponent
>
max_exp
)
{
if
constexpr
(
clip
)
{
mantissa
=
(
1
<<
wm
)
-
1
;
f8_exponent
=
max_exp
;
}
else
{
return
signed_inf
;
}
}
if
(
f8_exponent
==
0
&&
mantissa
==
0
)
return
is_fnuz
?
0
:
(
sign
<<
7
);
mantissa
&=
(
1
<<
wm
)
-
1
;
return
(
sign
<<
7
)
|
(
f8_exponent
<<
wm
)
|
mantissa
;
}
/**
* \brief convert float to @p fp8_storage_t
*
* \tparam interp interpretation of fp8
* \tparam sat saturation of fp8
* \param f float number
* \return fp8_storage_t
*/
template
<
ck_fp8_interpretation_t
interp
,
ck_saturation_t
sat
=
ck_saturation_t
::
CK_SATFINITE
,
bool
stochastic_rounding
=
false
>
#if CK_FP8_CVT_FAST_PATH
__host__
__device__
static
inline
fp8_storage_t
cvt_float_to_fp8
(
const
float
f
)
{
__is_interpret_supported
(
interp
);
uint32_t
rng
=
0
;
if
constexpr
(
stochastic_rounding
)
{
constexpr
int
seed
=
1254739
;
rng
=
prand_generator
<
float
,
seed
>
(
reinterpret_cast
<
uintptr_t
>
(
&
f
),
f
);
}
return
cast_to_f8_from_f32
<
interp
,
sat
==
ck_saturation_t
::
CK_SATFINITE
,
stochastic_rounding
>
(
f
,
rng
);
#else
#if CK_USE_OCP_FP8
__host__
__device__
static
inline
fp8_storage_t
cvt_float_to_fp8
(
const
float
f
)
{
#else
__host__
static
inline
fp8_storage_t
cvt_float_to_fp8
(
const
float
f
)
{
#endif
uint32_t
rng
=
0
;
if
constexpr
(
stochastic_rounding
)
{
constexpr
int
seed
=
1254739
;
rng
=
prand_generator
<
float
,
seed
>
(
reinterpret_cast
<
uintptr_t
>
(
&
f
),
f
);
}
if
constexpr
(
interp
==
ck_fp8_interpretation_t
::
CK_E4M3_FNUZ
)
{
return
cast_to_f8
<
float
,
3
,
4
,
true
,
sat
==
ck_saturation_t
::
CK_SATFINITE
,
stochastic_rounding
>
(
f
,
rng
);
}
else
if
constexpr
(
interp
==
ck_fp8_interpretation_t
::
CK_E5M2_FNUZ
)
{
return
cast_to_f8
<
float
,
2
,
5
,
true
,
sat
==
ck_saturation_t
::
CK_SATFINITE
,
stochastic_rounding
>
(
f
,
rng
);
}
else
if
constexpr
(
interp
==
ck_fp8_interpretation_t
::
CK_E4M3_OCP
)
{
return
cast_to_f8
<
float
,
3
,
4
,
false
,
sat
==
ck_saturation_t
::
CK_SATFINITE
,
stochastic_rounding
>
(
f
,
rng
);
}
else
if
constexpr
(
interp
==
ck_fp8_interpretation_t
::
CK_E5M2_OCP
)
{
return
cast_to_f8
<
float
,
2
,
5
,
false
,
sat
==
ck_saturation_t
::
CK_SATFINITE
,
stochastic_rounding
>
(
f
,
rng
);
}
else
{
__hip_assert
(
false
&&
"FP8 type is not supported by current target device"
);
return
0
;
}
#endif // CK_FP8_CVT_FAST_PATH
}
/**
* \brief convert _Float16 to @p fp8_storage_t
*
* \tparam sat saturation of fp8
* \tparam interp interpretation of fp8
* \tparam stochastic_rounding switch between RNE and SR
* \param x _Float16 value
* \return fp8_storage_t
*/
template
<
ck_fp8_interpretation_t
interp
,
ck_saturation_t
sat
=
ck_saturation_t
::
CK_SATFINITE
,
bool
stochastic_rounding
=
false
>
#if CK_FP8_CVT_FAST_PATH || CK_USE_OCP_FP8
__host__
__device__
static
inline
fp8_storage_t
cvt_half_t_to_fp8
(
const
_Float16
x
)
#else
__host__
static
inline
fp8_storage_t
cvt_half_t_to_fp8
(
const
_Float16
x
)
#endif
{
return
cvt_float_to_fp8
<
interp
,
sat
,
stochastic_rounding
>
(
static_cast
<
float
>
(
x
));
}
}
// namespace fp8_impl
// Declare a template function for fp8 conversion using RNE
template
<
typename
Y
,
typename
X
>
__host__
__device__
constexpr
Y
f8_convert_rne
(
X
x
);
// convert fp32 to fp8 with rounding to nearest even
template
<
>
inline
__host__
__device__
f8_ocp_t
f8_convert_rne
<
f8_ocp_t
,
float
>
(
float
x
)
{
return
f8_ocp_t
{
fp8_impl
::
cvt_float_to_fp8
<
f8_ocp_t
::
default_interpret
,
f8_ocp_t
::
default_saturation
>
(
x
)};
}
// convert fp32 to bf8 with rounding to nearest even
template
<
>
inline
__host__
__device__
bf8_ocp_t
f8_convert_rne
<
bf8_ocp_t
,
float
>
(
float
x
)
{
return
bf8_ocp_t
{
fp8_impl
::
cvt_float_to_fp8
<
bf8_ocp_t
::
default_interpret
,
bf8_ocp_t
::
default_saturation
>
(
x
)};
}
// convert _Float16 to fp8 with rounding to nearest even
template
<
>
inline
__host__
__device__
f8_ocp_t
f8_convert_rne
<
f8_ocp_t
,
_Float16
>
(
_Float16
x
)
{
return
f8_ocp_t
{
fp8_impl
::
cvt_half_t_to_fp8
<
f8_ocp_t
::
default_interpret
,
f8_ocp_t
::
default_saturation
>
(
x
)};
}
template
<
>
inline
__host__
__device__
bf8_ocp_t
f8_convert_rne
<
bf8_ocp_t
,
_Float16
>
(
_Float16
x
)
{
return
bf8_ocp_t
{
fp8_impl
::
cvt_half_t_to_fp8
<
bf8_ocp_t
::
default_interpret
,
bf8_ocp_t
::
default_saturation
>
(
x
)};
}
// Declare a template function for fp8 conversion using RNE
template
<
typename
Y
,
typename
X
>
__host__
__device__
constexpr
Y
f8_convert_sr
(
X
x
);
// convert fp32 to fp8 with stochastic rounding
template
<
>
inline
__host__
__device__
f8_ocp_t
f8_convert_sr
<
f8_ocp_t
,
float
>
(
float
x
)
{
return
f8_ocp_t
{
fp8_impl
::
cvt_float_to_fp8
<
f8_ocp_t
::
default_interpret
,
f8_ocp_t
::
default_saturation
,
true
>
(
x
)};
}
// convert fp32 to bf8 with stochastic rounding
template
<
>
inline
__host__
__device__
bf8_ocp_t
f8_convert_sr
<
bf8_ocp_t
,
float
>
(
float
x
)
{
return
bf8_ocp_t
{
fp8_impl
::
cvt_float_to_fp8
<
bf8_ocp_t
::
default_interpret
,
bf8_ocp_t
::
default_saturation
,
true
>
(
x
)};
}
// convert _Float16 to fp8 with stochastic rounding
template
<
>
inline
__host__
__device__
f8_ocp_t
f8_convert_sr
<
f8_ocp_t
,
_Float16
>
(
_Float16
x
)
{
return
f8_ocp_t
{
fp8_impl
::
cvt_half_t_to_fp8
<
f8_ocp_t
::
default_interpret
,
f8_ocp_t
::
default_saturation
,
true
>
(
x
)};
}
// convert _Float16 to bf8 with stochastic rounding
template
<
>
inline
__host__
__device__
bf8_ocp_t
f8_convert_sr
<
bf8_ocp_t
,
_Float16
>
(
_Float16
x
)
{
return
bf8_ocp_t
{
fp8_impl
::
cvt_half_t_to_fp8
<
bf8_ocp_t
::
default_interpret
,
bf8_ocp_t
::
default_saturation
,
true
>
(
x
)};
}
#if CK_USE_OCP_FP8
using
f8_t
=
f8_ocp_t
;
using
bf8_t
=
bf8_ocp_t
;
#define CK_FP8_TYPE_FNUZ 0
#define CK_FP8_TYPE_OCP 1
#else
using
f8_t
=
f8_fnuz_t
;
using
bf8_t
=
bf8_fnuz_t
;
#define CK_FP8_TYPE_FNUZ 1
#define CK_FP8_TYPE_OCP 0
#endif
}
// namespace ck
include/ck/utility/amd_xdlops.hpp
View file @
8d2f2f8c
...
...
@@ -4,7 +4,7 @@
#pragma once
namespace
ck
{
// Define the common macro for
gfx94x
models
// Define the common macro for
MI300
models
#if defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)
#define __gfx94__
#endif
...
...
include/ck/utility/data_type.hpp
View file @
8d2f2f8c
...
...
@@ -3,6 +3,7 @@
#pragma once
#include "ck/utility/amd_ck_fp8.hpp"
#include "ck/utility/statically_indexed_array.hpp"
namespace
ck
{
...
...
@@ -10,8 +11,6 @@ namespace ck {
using
bhalf_t
=
ushort
;
using
half_t
=
_Float16
;
using
int4_t
=
_BitInt
(
4
);
using
f8_t
=
_BitInt
(
8
);
using
bf8_t
=
unsigned
_BitInt
(
8
);
inline
constexpr
auto
next_pow2
(
uint32_t
x
)
{
...
...
@@ -19,14 +18,15 @@ inline constexpr auto next_pow2(uint32_t x)
return
x
>
1u
?
(
1u
<<
(
32u
-
__builtin_clz
(
x
-
1u
)))
:
x
;
}
// native types: double, float, _Float16, ushort, int32_t, int8_t, uint8_t, f8_t, bf8_t, bool
// native types: double, float, _Float16, ushort, int32_t, int8_t, uint8_t, f8_fnuz_t, bf8_fnuz_t,
// native types: bool
template
<
typename
T
>
inline
constexpr
bool
is_native_type
()
{
return
is_same
<
T
,
double
>::
value
||
is_same
<
T
,
float
>::
value
||
is_same
<
T
,
half_t
>::
value
||
is_same
<
T
,
bhalf_t
>::
value
||
is_same
<
T
,
int32_t
>::
value
||
is_same
<
T
,
int8_t
>::
value
||
is_same
<
T
,
uint8_t
>::
value
||
is_same
<
T
,
f8_
t
>::
value
||
is_same
<
T
,
bf8
_t
>::
value
||
is_same
<
T
,
bool
>::
value
;
is_same
<
T
,
uint8_t
>::
value
||
is_same
<
T
,
f8_
fnuz
_t
>::
value
||
is_same
<
T
,
bf8_fnuz_t
>::
value
||
is_same
<
T
,
bool
>::
value
;
}
// vector_type
...
...
@@ -166,16 +166,30 @@ struct scalar_type<int4_t>
#endif
template
<
>
struct
scalar_type
<
f8_t
>
struct
scalar_type
<
f8_
fnuz_
t
>
{
using
type
=
f8_t
;
using
type
=
f8_
fnuz_
t
;
static
constexpr
index_t
vector_size
=
1
;
};
template
<
>
struct
scalar_type
<
bf8_t
>
struct
scalar_type
<
bf8_
fnuz_
t
>
{
using
type
=
bf8_t
;
using
type
=
bf8_fnuz_t
;
static
constexpr
index_t
vector_size
=
1
;
};
template
<
>
struct
scalar_type
<
f8_ocp_t
>
{
using
type
=
f8_ocp_t
::
data_type
;
static
constexpr
index_t
vector_size
=
1
;
};
template
<
>
struct
scalar_type
<
bf8_ocp_t
>
{
using
type
=
bf8_ocp_t
::
data_type
;
static
constexpr
index_t
vector_size
=
1
;
};
...
...
@@ -1010,60 +1024,203 @@ struct vector_type<T, 256, typename std::enable_if_t<is_native_type<T>()>>
}
};
template
<
typename
T
,
index_t
N
,
typename
Enable
=
void
>
struct
non_native_vector_base
;
template
<
typename
T
>
struct
nnvb_data_t_selector
{
using
type
=
unsigned
_BitInt
(
8
*
sizeof
(
T
));
};
template
<
>
struct
nnvb_data_t_selector
<
f8_ocp_t
>
{
using
type
=
f8_ocp_t
::
data_type
;
};
template
<
>
struct
nnvb_data_t_selector
<
bf8_ocp_t
>
{
using
type
=
bf8_ocp_t
::
data_type
;
};
template
<
typename
T
,
index_t
N
>
struct
non_native_vector_base
<
T
,
N
,
std
::
enable_if_t
<
sizeof
(
T
)
==
1
||
sizeof
(
T
)
==
2
||
sizeof
(
T
)
==
4
||
sizeof
(
T
)
==
8
>>
{
using
data_t
=
typename
nnvb_data_t_selector
<
T
>::
type
;
// select data_t based on the size of T
static_assert
(
sizeof
(
T
)
==
sizeof
(
data_t
),
"non_native_vector_base storage size mismatch"
);
using
data_v
=
data_t
__attribute__
((
ext_vector_type
(
N
)));
using
type
=
non_native_vector_base
<
T
,
N
>
;
union
alignas
(
next_pow2
(
N
*
sizeof
(
T
)))
{
data_v
dN
;
// storage vector;
StaticallyIndexedArray
<
data_t
,
N
>
dxN
;
StaticallyIndexedArray
<
T
,
N
>
dTxN
;
StaticallyIndexedArray
<
data_v
,
1
>
dNx1
;
}
data_
;
__host__
__device__
constexpr
non_native_vector_base
(
data_t
a
)
:
data_
{
data_v
(
a
)}
{}
__host__
__device__
constexpr
non_native_vector_base
(
T
f
)
:
non_native_vector_base
(
bit_cast
<
data_t
>
(
f
))
{
}
__host__
__device__
constexpr
non_native_vector_base
()
:
non_native_vector_base
(
T
{}){};
__host__
__device__
constexpr
non_native_vector_base
(
data_v
v
)
:
data_
{
v
}
{}
__host__
__device__
constexpr
operator
data_v
()
const
{
return
data_
.
dN
;
}
__host__
__device__
constexpr
operator
data_t
()
const
{
if
constexpr
(
N
==
1
)
{
return
data_
.
dxN
[
Number
<
0
>
{}];
}
else
{
return
data_
.
dxN
;
// XXX this should cause an error
}
}
__host__
__device__
constexpr
operator
T
()
const
{
if
constexpr
(
N
==
1
)
{
return
data_
.
dTxN
[
Number
<
0
>
{}];
}
else
{
return
data_
.
dTxN
;
// XXX this should cause an error
}
}
template
<
typename
X
>
__host__
__device__
constexpr
const
auto
&
AsType
()
const
{
static_assert
(
is_same_v
<
X
,
data_t
>
||
is_same_v
<
X
,
T
>
||
is_same_v
<
X
,
data_v
>
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same_v
<
X
,
data_t
>
)
{
return
data_
.
dxN
;
}
else
if
constexpr
(
is_same_v
<
X
,
T
>
)
{
return
data_
.
dTxN
;
}
else
if
constexpr
(
is_same_v
<
X
,
data_v
>
)
{
return
data_
.
dNx1
;
}
else
{
return
err
;
}
}
template
<
typename
X
>
__host__
__device__
constexpr
auto
&
AsType
()
{
static_assert
(
is_same_v
<
X
,
data_t
>
||
is_same_v
<
X
,
T
>
||
is_same_v
<
X
,
data_v
>
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same_v
<
X
,
data_t
>
)
{
return
data_
.
dxN
;
}
else
if
constexpr
(
is_same_v
<
X
,
T
>
)
{
return
data_
.
dTxN
;
}
else
if
constexpr
(
is_same_v
<
X
,
data_v
>
)
{
return
data_
.
dNx1
;
}
else
{
return
err
;
}
}
};
template
<
typename
T
,
index_t
N
>
struct
non_native_vector_base
struct
scalar_type
<
non_native_vector_base
<
T
,
N
>>
;
template
<
index_t
N
>
struct
scalar_type
<
non_native_vector_base
<
f8_ocp_t
,
N
>>
{
using
type
=
non_native_vector_base
<
T
,
N
>
;
using
type
=
typename
non_native_vector_base
<
f8_ocp_t
,
N
>::
data_t
;
static
constexpr
index_t
vector_size
=
N
;
};
__host__
__device__
non_native_vector_base
()
=
default
;
__host__
__device__
non_native_vector_base
(
const
type
&
)
=
default
;
__host__
__device__
non_native_vector_base
(
type
&&
)
=
default
;
__host__
__device__
~
non_native_vector_base
()
=
defaul
t
;
template
<
index_t
N
>
struct
scalar_type
<
non_native_vector_base
<
bf8_ocp_t
,
N
>>
{
using
type
=
typename
non_native_vector_base
<
bf8_ocp_t
,
N
>::
data_
t
;
T
d
[
N
]
;
static
constexpr
index_t
vector_size
=
N
;
};
// non-native vector_type implementation
template
<
typename
T
>
struct
vector_type
<
T
,
1
,
typename
std
::
enable_if_t
<!
is_native_type
<
T
>
()
>>
{
using
d1_t
=
T
;
using
type
=
d1_t
;
using
d1_t
=
T
;
using
d1_nnv_t
=
non_native_vector_base
<
T
,
1
>
;
using
type
=
d1_nnv_t
;
union
alignas
(
next_pow2
(
1
*
sizeof
(
T
)))
{
d1_t
d1_
;
StaticallyIndexedArray
<
d1_t
,
1
>
d1x1_
;
d1_nnv_t
d1_nnv_
;
}
data_
;
__host__
__device__
constexpr
vector_type
()
:
data_
{
type
{}}
{}
__host__
__device__
constexpr
vector_type
()
:
data_
{
d1_t
{}}
{}
__host__
__device__
constexpr
vector_type
(
type
v
)
:
data_
{
v
}
{}
template
<
typename
X
>
__host__
__device__
constexpr
const
auto
&
AsType
()
const
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
,
"Something went wrong, please check src and dst types."
);
return
data_
.
d1x1_
;
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x1_
;
}
else
{
return
err
;
}
}
template
<
typename
X
>
__host__
__device__
constexpr
auto
&
AsType
()
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
,
"Something went wrong, please check src and dst types."
);
return
data_
.
d1x1_
;
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x1_
;
}
else
{
return
err
;
}
}
};
template
<
typename
T
>
struct
vector_type
<
T
,
2
,
typename
std
::
enable_if_t
<!
is_native_type
<
T
>
()
>>
{
using
d1_t
=
T
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
d1_t
=
T
;
using
d1_nnv_t
=
non_native_vector_base
<
T
,
1
>
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
type
=
d2_t
;
...
...
@@ -1081,10 +1238,11 @@ struct vector_type<T, 2, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
const
auto
&
AsType
()
const
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x2_
;
}
...
...
@@ -1101,10 +1259,11 @@ struct vector_type<T, 2, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
auto
&
AsType
()
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x2_
;
}
...
...
@@ -1122,9 +1281,10 @@ struct vector_type<T, 2, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
T
>
struct
vector_type
<
T
,
4
,
typename
std
::
enable_if_t
<!
is_native_type
<
T
>
()
>>
{
using
d1_t
=
T
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
d4_t
=
non_native_vector_base
<
T
,
4
>
;
using
d1_t
=
T
;
using
d1_nnv_t
=
non_native_vector_base
<
T
,
1
>
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
d4_t
=
non_native_vector_base
<
T
,
4
>
;
using
type
=
d4_t
;
...
...
@@ -1143,10 +1303,11 @@ struct vector_type<T, 4, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
const
auto
&
AsType
()
const
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x4_
;
}
...
...
@@ -1167,10 +1328,11 @@ struct vector_type<T, 4, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
auto
&
AsType
()
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x4_
;
}
...
...
@@ -1192,10 +1354,11 @@ struct vector_type<T, 4, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
T
>
struct
vector_type
<
T
,
8
,
typename
std
::
enable_if_t
<!
is_native_type
<
T
>
()
>>
{
using
d1_t
=
T
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
d4_t
=
non_native_vector_base
<
T
,
4
>
;
using
d8_t
=
non_native_vector_base
<
T
,
8
>
;
using
d1_t
=
T
;
using
d1_nnv_t
=
non_native_vector_base
<
T
,
1
>
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
d4_t
=
non_native_vector_base
<
T
,
4
>
;
using
d8_t
=
non_native_vector_base
<
T
,
8
>
;
using
type
=
d8_t
;
...
...
@@ -1215,11 +1378,12 @@ struct vector_type<T, 8, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
const
auto
&
AsType
()
const
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
||
is_same
<
X
,
d8_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
||
is_same
<
X
,
d8_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x8_
;
}
...
...
@@ -1244,11 +1408,12 @@ struct vector_type<T, 8, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
auto
&
AsType
()
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
||
is_same
<
X
,
d8_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
||
is_same
<
X
,
d2_t
>::
value
||
is_same
<
X
,
d4_t
>::
value
||
is_same
<
X
,
d8_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x8_
;
}
...
...
@@ -1274,11 +1439,12 @@ struct vector_type<T, 8, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
T
>
struct
vector_type
<
T
,
16
,
typename
std
::
enable_if_t
<!
is_native_type
<
T
>
()
>>
{
using
d1_t
=
T
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
d4_t
=
non_native_vector_base
<
T
,
4
>
;
using
d8_t
=
non_native_vector_base
<
T
,
8
>
;
using
d16_t
=
non_native_vector_base
<
T
,
16
>
;
using
d1_t
=
T
;
using
d1_nnv_t
=
non_native_vector_base
<
T
,
1
>
;
using
d2_t
=
non_native_vector_base
<
T
,
2
>
;
using
d4_t
=
non_native_vector_base
<
T
,
4
>
;
using
d8_t
=
non_native_vector_base
<
T
,
8
>
;
using
d16_t
=
non_native_vector_base
<
T
,
16
>
;
using
type
=
d16_t
;
...
...
@@ -1299,12 +1465,12 @@ struct vector_type<T, 16, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
const
auto
&
AsType
()
const
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d
2
_t
>::
value
||
is_same
<
X
,
d
4
_t
>::
value
||
is_same
<
X
,
d
8
_t
>::
value
||
is_same
<
X
,
d16_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d
1_nnv
_t
>::
value
||
is_same
<
X
,
d
2
_t
>::
value
||
is_same
<
X
,
d
4
_t
>::
value
||
is_same
<
X
,
d8_t
>::
value
||
is_same
<
X
,
d16_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x16_
;
}
...
...
@@ -1333,12 +1499,12 @@ struct vector_type<T, 16, typename std::enable_if_t<!is_native_type<T>()>>
template
<
typename
X
>
__host__
__device__
constexpr
auto
&
AsType
()
{
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d
2
_t
>::
value
||
is_same
<
X
,
d
4
_t
>::
value
||
is_same
<
X
,
d
8
_t
>::
value
||
is_same
<
X
,
d16_t
>::
value
,
static_assert
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d
1_nnv
_t
>::
value
||
is_same
<
X
,
d
2
_t
>::
value
||
is_same
<
X
,
d
4
_t
>::
value
||
is_same
<
X
,
d8_t
>::
value
||
is_same
<
X
,
d16_t
>::
value
,
"Something went wrong, please check src and dst types."
);
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
)
if
constexpr
(
is_same
<
X
,
d1_t
>::
value
||
is_same
<
X
,
d1_nnv_t
>::
value
)
{
return
data_
.
d1x16_
;
}
...
...
@@ -1632,20 +1798,70 @@ using int8x32_t = typename vector_type<int8_t, 32>::type;
using
int8x64_t
=
typename
vector_type
<
int8_t
,
64
>::
type
;
// f8
using
f8x2_t
=
typename
vector_type
<
f8_t
,
2
>::
type
;
using
f8x4_t
=
typename
vector_type
<
f8_t
,
4
>::
type
;
using
f8x8_t
=
typename
vector_type
<
f8_t
,
8
>::
type
;
using
f8x16_t
=
typename
vector_type
<
f8_t
,
16
>::
type
;
using
f8x32_t
=
typename
vector_type
<
f8_t
,
32
>::
type
;
using
f8x64_t
=
typename
vector_type
<
f8_t
,
64
>::
type
;
using
f8x2_
fnuz_
t
=
typename
vector_type
<
f8_
fnuz_
t
,
2
>::
type
;
using
f8x4_
fnuz_
t
=
typename
vector_type
<
f8_
fnuz_
t
,
4
>::
type
;
using
f8x8_
fnuz_
t
=
typename
vector_type
<
f8_
fnuz_
t
,
8
>::
type
;
using
f8x16_
fnuz_
t
=
typename
vector_type
<
f8_
fnuz_
t
,
16
>::
type
;
using
f8x32_
fnuz_
t
=
typename
vector_type
<
f8_
fnuz_
t
,
32
>::
type
;
using
f8x64_
fnuz_
t
=
typename
vector_type
<
f8_
fnuz_
t
,
64
>::
type
;
// bf8
using
bf8x2_t
=
typename
vector_type
<
bf8_t
,
2
>::
type
;
using
bf8x4_t
=
typename
vector_type
<
bf8_t
,
4
>::
type
;
using
bf8x8_t
=
typename
vector_type
<
bf8_t
,
8
>::
type
;
using
bf8x16_t
=
typename
vector_type
<
bf8_t
,
16
>::
type
;
using
bf8x32_t
=
typename
vector_type
<
bf8_t
,
32
>::
type
;
using
bf8x64_t
=
typename
vector_type
<
bf8_t
,
64
>::
type
;
using
bf8x2_fnuz_t
=
typename
vector_type
<
bf8_fnuz_t
,
2
>::
type
;
using
bf8x4_fnuz_t
=
typename
vector_type
<
bf8_fnuz_t
,
4
>::
type
;
using
bf8x8_fnuz_t
=
typename
vector_type
<
bf8_fnuz_t
,
8
>::
type
;
using
bf8x16_fnuz_t
=
typename
vector_type
<
bf8_fnuz_t
,
16
>::
type
;
using
bf8x32_fnuz_t
=
typename
vector_type
<
bf8_fnuz_t
,
32
>::
type
;
using
bf8x64_fnuz_t
=
typename
vector_type
<
bf8_fnuz_t
,
64
>::
type
;
// f8
using
f8x2_ocp_t
=
typename
vector_type
<
f8_ocp_t
,
2
>::
type
;
using
f8x4_ocp_t
=
typename
vector_type
<
f8_ocp_t
,
4
>::
type
;
using
f8x8_ocp_t
=
typename
vector_type
<
f8_ocp_t
,
8
>::
type
;
using
f8x16_ocp_t
=
typename
vector_type
<
f8_ocp_t
,
16
>::
type
;
using
f8x32_ocp_t
=
typename
vector_type
<
f8_ocp_t
,
32
>::
type
;
using
f8x64_ocp_t
=
typename
vector_type
<
f8_ocp_t
,
64
>::
type
;
// bf8
using
bf8x2_ocp_t
=
typename
vector_type
<
bf8_ocp_t
,
2
>::
type
;
using
bf8x4_ocp_t
=
typename
vector_type
<
bf8_ocp_t
,
4
>::
type
;
using
bf8x8_ocp_t
=
typename
vector_type
<
bf8_ocp_t
,
8
>::
type
;
using
bf8x16_ocp_t
=
typename
vector_type
<
bf8_ocp_t
,
16
>::
type
;
using
bf8x32_ocp_t
=
typename
vector_type
<
bf8_ocp_t
,
32
>::
type
;
using
bf8x64_ocp_t
=
typename
vector_type
<
bf8_ocp_t
,
64
>::
type
;
#if CK_FP8_TYPE_OCP
// f8
using
f8x2_t
=
f8x2_ocp_t
;
using
f8x4_t
=
f8x4_ocp_t
;
using
f8x8_t
=
f8x8_ocp_t
;
using
f8x16_t
=
f8x16_ocp_t
;
using
f8x32_t
=
f8x32_ocp_t
;
using
f8x64_t
=
f8x64_ocp_t
;
// bf8
using
bf8x2_t
=
bf8x2_ocp_t
;
using
bf8x4_t
=
bf8x4_ocp_t
;
using
bf8x8_t
=
bf8x8_ocp_t
;
using
bf8x16_t
=
bf8x16_ocp_t
;
using
bf8x32_t
=
bf8x32_ocp_t
;
using
bf8x64_t
=
bf8x64_ocp_t
;
#elif CK_FP8_TYPE_FNUZ
// f8
using
f8x2_t
=
f8x2_fnuz_t
;
using
f8x4_t
=
f8x4_fnuz_t
;
using
f8x8_t
=
f8x8_fnuz_t
;
using
f8x16_t
=
f8x16_fnuz_t
;
using
f8x32_t
=
f8x32_fnuz_t
;
using
f8x64_t
=
f8x64_fnuz_t
;
// bf8
using
bf8x2_t
=
bf8x2_fnuz_t
;
using
bf8x4_t
=
bf8x4_fnuz_t
;
using
bf8x8_t
=
bf8x8_fnuz_t
;
using
bf8x16_t
=
bf8x16_fnuz_t
;
using
bf8x32_t
=
bf8x32_fnuz_t
;
using
bf8x64_t
=
bf8x64_fnuz_t
;
#endif
// u8
using
uint8x2_t
=
typename
vector_type
<
uint8_t
,
2
>::
type
;
...
...
@@ -1702,7 +1918,7 @@ struct NumericLimits<int4_t>
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
template
<
>
struct
NumericLimits
<
f8_t
>
struct
NumericLimits
<
f8_
fnuz_
t
>
{
// negative zero nan mode with exp bias = 8
static
constexpr
uint8_t
binary_min
=
0x08
;
// 0b00001000
...
...
@@ -1715,17 +1931,17 @@ struct NumericLimits<f8_t>
// static constexpr uint8_t binary_lowest = 0xF7; // 0b11110111
// static constexpr uint8_t binary_qnan = 0x79; // any sign, exp=1111, mant!=0
__host__
__device__
static
constexpr
f8_t
Min
()
{
return
f8_t
(
binary_min
);
}
__host__
__device__
static
constexpr
f8_
fnuz_
t
Min
()
{
return
f8_
fnuz_
t
(
binary_min
);
}
__host__
__device__
static
constexpr
f8_t
Max
()
{
return
f8_t
(
binary_max
);
}
__host__
__device__
static
constexpr
f8_
fnuz_
t
Max
()
{
return
f8_
fnuz_
t
(
binary_max
);
}
__host__
__device__
static
constexpr
f8_t
Lowest
()
{
return
f8_t
(
binary_lowest
);
}
__host__
__device__
static
constexpr
f8_
fnuz_
t
Lowest
()
{
return
f8_
fnuz_
t
(
binary_lowest
);
}
__host__
__device__
static
constexpr
f8_t
QuietNaN
()
{
return
f8_t
(
binary_qnan
);
}
__host__
__device__
static
constexpr
f8_
fnuz_
t
QuietNaN
()
{
return
f8_
fnuz_
t
(
binary_qnan
);
}
};
template
<
>
struct
NumericLimits
<
bf8_t
>
struct
NumericLimits
<
bf8_
fnuz_
t
>
{
// negative zero nan mode with exp bias = 16
static
constexpr
uint8_t
binary_min
=
0x04
;
// 0b00000100
...
...
@@ -1738,13 +1954,59 @@ struct NumericLimits<bf8_t>
// static constexpr uint8_t binary_lowest = 0xFB; // 0b11111011
// static constexpr uint8_t binary_qnan = 0x79; // any sign, exp=1111, mant!=
__host__
__device__
static
constexpr
bf8_t
Min
()
{
return
bf8_t
(
binary_min
);
}
__host__
__device__
static
constexpr
bf8_
fnuz_
t
Min
()
{
return
bf8_
fnuz_
t
(
binary_min
);
}
__host__
__device__
static
constexpr
bf8_t
Max
()
{
return
bf8_t
(
binary_max
);
}
__host__
__device__
static
constexpr
bf8_
fnuz_
t
Max
()
{
return
bf8_
fnuz_
t
(
binary_max
);
}
__host__
__device__
static
constexpr
bf8_t
Lowest
()
{
return
bf8_t
(
binary_lowest
);
}
__host__
__device__
static
constexpr
bf8_
fnuz_
t
Lowest
()
{
return
bf8_
fnuz_
t
(
binary_lowest
);
}
__host__
__device__
static
constexpr
bf8_t
QuietNaN
()
{
return
bf8_t
(
binary_qnan
);
}
__host__
__device__
static
constexpr
bf8_fnuz_t
QuietNaN
()
{
return
bf8_fnuz_t
(
binary_qnan
);
}
};
template
<
>
struct
NumericLimits
<
f8_ocp_t
>
{
static
constexpr
uint8_t
binary_min
=
0x08
;
// 0b00001000 = 2^-6
static
constexpr
uint8_t
binary_max
=
0x7E
;
// 0b01111110 = 448
static
constexpr
uint8_t
binary_lowest
=
0xFE
;
// 0b11111110 = -448
static
constexpr
uint8_t
binary_qnan
=
0x7F
;
// 0b01111111
__host__
__device__
static
constexpr
f8_ocp_t
Min
()
{
return
bit_cast
<
f8_ocp_t
>
(
binary_min
);
}
__host__
__device__
static
constexpr
f8_ocp_t
Max
()
{
return
bit_cast
<
f8_ocp_t
>
(
binary_max
);
}
__host__
__device__
static
constexpr
f8_ocp_t
Lowest
()
{
return
bit_cast
<
f8_ocp_t
>
(
binary_lowest
);
}
__host__
__device__
static
constexpr
f8_ocp_t
QuietNaN
()
{
return
bit_cast
<
f8_ocp_t
>
(
binary_qnan
);
}
};
template
<
>
struct
NumericLimits
<
bf8_ocp_t
>
{
static
constexpr
uint8_t
binary_min
=
0x04
;
// 0b00000100 = 2^-14
static
constexpr
uint8_t
binary_max
=
0x7B
;
// 0b01111011 = 57344
static
constexpr
uint8_t
binary_lowest
=
0xFB
;
// 0b11111011 = -57344
static
constexpr
uint8_t
binary_qnan
=
0x7D
;
// 0b01111101
__host__
__device__
static
constexpr
bf8_ocp_t
Min
()
{
return
bit_cast
<
bf8_ocp_t
>
(
binary_min
);
}
__host__
__device__
static
constexpr
bf8_ocp_t
Max
()
{
return
bit_cast
<
bf8_ocp_t
>
(
binary_max
);
}
__host__
__device__
static
constexpr
bf8_ocp_t
Lowest
()
{
return
bit_cast
<
bf8_ocp_t
>
(
binary_lowest
);
}
__host__
__device__
static
constexpr
bf8_ocp_t
QuietNaN
()
{
return
bit_cast
<
bf8_ocp_t
>
(
binary_qnan
);
}
};
template
<
typename
T
>
...
...
@@ -1787,7 +2049,7 @@ struct NumericUtils<half_t>
};
template
<
>
struct
NumericUtils
<
f8_t
>
struct
NumericUtils
<
f8_
fnuz_
t
>
{
static
constexpr
int
exp
=
4
;
static
constexpr
int
mant
=
3
;
...
...
@@ -1796,13 +2058,28 @@ struct NumericUtils<f8_t>
};
template
<
>
struct
NumericUtils
<
bf8_t
>
struct
NumericUtils
<
bf8_
fnuz_
t
>
{
static
constexpr
int
exp
=
5
;
static
constexpr
int
mant
=
2
;
static
constexpr
int
bias
=
16
;
// negative zero nan mode
// static constexpr int bias = 15; // ieee mode
};
template
<
>
struct
NumericUtils
<
f8_ocp_t
>
{
static
constexpr
int
exp
=
4
;
static
constexpr
int
mant
=
3
;
static
constexpr
int
bias
=
7
;
};
template
<
>
struct
NumericUtils
<
bf8_ocp_t
>
{
static
constexpr
int
exp
=
5
;
static
constexpr
int
mant
=
2
;
static
constexpr
int
bias
=
15
;
};
template
<
>
struct
NumericUtils
<
bhalf_t
>
...
...
include/ck/utility/math_v2.hpp
View file @
8d2f2f8c
...
...
@@ -80,7 +80,7 @@ static inline __host__ bool isnan(half_t x)
return
(
xx
&
0x7FFF
)
>
0x7C00
;
};
static
inline
__host__
bool
isnan
(
f8_t
x
)
{
return
(
x
&
0x80
);
};
static
inline
__host__
bool
isnan
(
f8_t
x
)
{
return
ck
::
fp8_is_nan
(
x
);
};
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
static
inline
__host__
bool
isnan
(
int4_t
x
)
...
...
@@ -531,7 +531,7 @@ static inline __device__ bool isnan(half_t x)
return
(
xx
&
0x7FFF
)
>
0x7C00
;
};
static
inline
__device__
bool
isnan
(
f8_t
x
)
{
return
(
x
&
0x80
);
};
static
inline
__device__
bool
isnan
(
f8_t
x
)
{
return
ck
::
fp8_is_nan
(
x
);
};
static
inline
__device__
half_t
sqrt
(
half_t
x
)
{
...
...
include/ck/utility/random_gen.hpp
View file @
8d2f2f8c
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/ck.hpp"
namespace
ck
{
// Pseudo random number generator
...
...
@@ -23,7 +25,7 @@ __host__ __device__ uint32_t prand_generator(index_t id, T val, uint32_t seed =
}
// version for fp16
template
<
typename
T
,
uint32_t
seed_t
,
std
::
enable_if_t
<
std
::
is_same
<
half_t
,
T
>{},
bool
>
=
false
>
template
<
typename
T
,
uint32_t
seed_t
,
std
::
enable_if_t
<
std
::
is_same
<
_Float16
,
T
>{},
bool
>
=
false
>
__host__
__device__
uint32_t
prand_generator
(
index_t
id
,
T
val
,
uint32_t
seed
=
seed_t
)
{
uint16_t
x
=
*
(
reinterpret_cast
<
uint16_t
*>
(
&
val
));
...
...
@@ -38,9 +40,10 @@ __host__ __device__ uint32_t prand_generator(index_t id, T val, uint32_t seed =
}
// return 0 if data is not fp16 or fp32
template
<
typename
T
,
uint32_t
seed_t
,
std
::
enable_if_t
<!
(
std
::
is_same
<
float
,
T
>{}
||
std
::
is_same
<
half_t
,
T
>
{}),
bool
>
=
false
>
template
<
typename
T
,
uint32_t
seed_t
,
std
::
enable_if_t
<!
(
std
::
is_same
<
float
,
T
>{}
||
std
::
is_same
<
_Float16
,
T
>
{}),
bool
>
=
false
>
__host__
__device__
uint32_t
prand_generator
(
int
id
,
T
val
,
uint32_t
seed
=
seed_t
)
{
std
::
ignore
=
id
;
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment