Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
08e057ee
"experiments/vscode:/vscode.git/clone" did not exist on "ea903dda8cb58b4bdbef7fc5fea6c63741c16450"
Commit
08e057ee
authored
Jan 08, 2025
by
Po Yen, Chen
Browse files
Add depedency headers
parent
5f053ccf
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
2543 additions
and
0 deletions
+2543
-0
example/ck_tile/18_paged_attention/include/attention/attention_dtypes.h
...e/18_paged_attention/include/attention/attention_dtypes.h
+7
-0
example/ck_tile/18_paged_attention/include/attention/attention_generic.cuh
...8_paged_attention/include/attention/attention_generic.cuh
+65
-0
example/ck_tile/18_paged_attention/include/attention/dtype_bfloat16.cuh
...e/18_paged_attention/include/attention/dtype_bfloat16.cuh
+463
-0
example/ck_tile/18_paged_attention/include/attention/dtype_float16.cuh
...le/18_paged_attention/include/attention/dtype_float16.cuh
+504
-0
example/ck_tile/18_paged_attention/include/attention/dtype_float32.cuh
...le/18_paged_attention/include/attention/dtype_float32.cuh
+251
-0
example/ck_tile/18_paged_attention/include/attention/dtype_fp8.cuh
...k_tile/18_paged_attention/include/attention/dtype_fp8.cuh
+41
-0
example/ck_tile/18_paged_attention/include/cuda_compat.h
example/ck_tile/18_paged_attention/include/cuda_compat.h
+49
-0
example/ck_tile/18_paged_attention/include/quantization/fp8/amd/hip_float8.h
...paged_attention/include/quantization/fp8/amd/hip_float8.h
+137
-0
example/ck_tile/18_paged_attention/include/quantization/fp8/amd/hip_float8_impl.h
..._attention/include/quantization/fp8/amd/hip_float8_impl.h
+316
-0
example/ck_tile/18_paged_attention/include/quantization/fp8/amd/quant_utils.cuh
...ed_attention/include/quantization/fp8/amd/quant_utils.cuh
+710
-0
No files found.
example/ck_tile/18_paged_attention/include/attention/attention_dtypes.h
0 → 100644
View file @
08e057ee
#pragma once
#include "attention_generic.cuh"
#include "dtype_float16.cuh"
#include "dtype_float32.cuh"
#include "dtype_bfloat16.cuh"
#include "dtype_fp8.cuh"
example/ck_tile/18_paged_attention/include/attention/attention_generic.cuh
0 → 100644
View file @
08e057ee
/*
* Adapted from
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h
* Copyright (c) 2023, The vLLM team.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
namespace
vllm
{
// A vector type to store Q, K, V elements.
template
<
typename
T
,
int
VEC_SIZE
>
struct
Vec
{};
// A vector type to store FP32 accumulators.
template
<
typename
T
>
struct
FloatVec
{};
// Template vector operations.
template
<
typename
Acc
,
typename
A
,
typename
B
>
inline
__device__
Acc
mul
(
A
a
,
B
b
);
template
<
typename
T
>
inline
__device__
float
sum
(
T
v
);
template
<
typename
T
>
inline
__device__
float
dot
(
T
a
,
T
b
)
{
return
sum
(
mul
<
T
,
T
,
T
>
(
a
,
b
));
}
template
<
typename
A
,
typename
T
>
inline
__device__
float
dot
(
T
a
,
T
b
)
{
return
sum
(
mul
<
A
,
T
,
T
>
(
a
,
b
));
}
template
<
typename
T
>
inline
__device__
void
zero
(
T
&
dst
)
{
constexpr
int
WORDS
=
sizeof
(
T
)
/
4
;
union
{
T
raw
;
uint32_t
words
[
WORDS
];
}
tmp
;
#pragma unroll
for
(
int
ii
=
0
;
ii
<
WORDS
;
++
ii
)
{
tmp
.
words
[
ii
]
=
0u
;
}
dst
=
tmp
.
raw
;
}
}
// namespace vllm
example/ck_tile/18_paged_attention/include/attention/dtype_bfloat16.cuh
0 → 100644
View file @
08e057ee
/*
* Adapted from
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp
* and
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h
* Copyright (c) 2023, The vLLM team.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "attention_generic.cuh"
#include "dtype_float32.cuh"
#ifndef USE_ROCM
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#else
#include <hip/hip_bf16.h>
#include <hip/hip_fp16.h>
typedef
__hip_bfloat162
__nv_bfloat162
;
typedef
__hip_bfloat16
__nv_bfloat16
;
#endif
#include <stdint.h>
namespace
vllm
{
// Define custom BF16 vector data types.
struct
bf16_4_t
{
__nv_bfloat162
x
;
__nv_bfloat162
y
;
};
struct
bf16_8_t
{
__nv_bfloat162
x
;
__nv_bfloat162
y
;
__nv_bfloat162
z
;
__nv_bfloat162
w
;
};
// BF16 vector types for Q, K, V.
template
<
>
struct
Vec
<
__nv_bfloat16
,
1
>
{
using
Type
=
__nv_bfloat16
;
};
template
<
>
struct
Vec
<
__nv_bfloat16
,
2
>
{
using
Type
=
__nv_bfloat162
;
};
template
<
>
struct
Vec
<
__nv_bfloat16
,
4
>
{
using
Type
=
bf16_4_t
;
};
template
<
>
struct
Vec
<
__nv_bfloat16
,
8
>
{
using
Type
=
bf16_8_t
;
};
// FP32 accumulator vector types corresponding to Vec.
template
<
>
struct
FloatVec
<
__nv_bfloat16
>
{
using
Type
=
float
;
};
template
<
>
struct
FloatVec
<
__nv_bfloat162
>
{
using
Type
=
float2
;
};
template
<
>
struct
FloatVec
<
bf16_4_t
>
{
using
Type
=
Float4_
;
};
template
<
>
struct
FloatVec
<
bf16_8_t
>
{
using
Type
=
Float8_
;
};
// Utility functions for type conversions.
inline
__device__
float2
bf1622float2
(
const
__nv_bfloat162
val
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
return
__bfloat1622float2
(
val
);
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
inline
__device__
__nv_bfloat162
bf162bf162
(
const
__nv_bfloat16
val
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
return
__bfloat162bfloat162
(
val
);
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
// Vector addition.
inline
__device__
__nv_bfloat16
add
(
__nv_bfloat16
a
,
__nv_bfloat16
b
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
#ifndef USE_ROCM
return
a
+
b
;
#else
return
__hadd
(
a
,
b
);
#endif
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
inline
__device__
__nv_bfloat162
add
(
__nv_bfloat162
a
,
__nv_bfloat162
b
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
return
__hadd2
(
a
,
b
);
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
inline
__device__
bf16_4_t
add
(
bf16_4_t
a
,
bf16_4_t
b
)
{
bf16_4_t
c
;
c
.
x
=
add
(
a
.
x
,
b
.
x
);
c
.
y
=
add
(
a
.
y
,
b
.
y
);
return
c
;
}
inline
__device__
bf16_8_t
add
(
bf16_8_t
a
,
bf16_8_t
b
)
{
bf16_8_t
c
;
c
.
x
=
add
(
a
.
x
,
b
.
x
);
c
.
y
=
add
(
a
.
y
,
b
.
y
);
c
.
z
=
add
(
a
.
z
,
b
.
z
);
c
.
w
=
add
(
a
.
w
,
b
.
w
);
return
c
;
}
inline
__device__
float2
add
(
__nv_bfloat162
a
,
float2
fb
)
{
float2
fa
=
bf1622float2
(
a
);
return
add
(
fa
,
fb
);
}
inline
__device__
Float4_
add
(
bf16_4_t
a
,
Float4_
fb
)
{
Float4_
fc
;
fc
.
x
=
add
(
a
.
x
,
fb
.
x
);
fc
.
y
=
add
(
a
.
y
,
fb
.
y
);
return
fc
;
}
inline
__device__
Float8_
add
(
bf16_8_t
a
,
Float8_
fb
)
{
Float8_
fc
;
fc
.
x
=
add
(
a
.
x
,
fb
.
x
);
fc
.
y
=
add
(
a
.
y
,
fb
.
y
);
fc
.
z
=
add
(
a
.
z
,
fb
.
z
);
fc
.
w
=
add
(
a
.
w
,
fb
.
w
);
return
fc
;
}
// Vector multiplication.
template
<
>
inline
__device__
__nv_bfloat16
mul
(
__nv_bfloat16
a
,
__nv_bfloat16
b
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
return
__hmul
(
a
,
b
);
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
template
<
>
inline
__device__
__nv_bfloat162
mul
(
__nv_bfloat162
a
,
__nv_bfloat162
b
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
return
__hmul2
(
a
,
b
);
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
template
<
>
inline
__device__
__nv_bfloat162
mul
(
__nv_bfloat16
a
,
__nv_bfloat162
b
)
{
return
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
bf162bf162
(
a
),
b
);
}
template
<
>
inline
__device__
bf16_4_t
mul
(
bf16_4_t
a
,
bf16_4_t
b
)
{
bf16_4_t
c
;
c
.
x
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
x
,
b
.
x
);
c
.
y
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
y
,
b
.
y
);
return
c
;
}
template
<
>
inline
__device__
bf16_4_t
mul
(
__nv_bfloat16
a
,
bf16_4_t
b
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
bf16_4_t
c
;
c
.
x
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
x
);
c
.
y
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
y
);
return
c
;
}
template
<
>
inline
__device__
bf16_8_t
mul
(
bf16_8_t
a
,
bf16_8_t
b
)
{
bf16_8_t
c
;
c
.
x
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
x
,
b
.
x
);
c
.
y
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
y
,
b
.
y
);
c
.
z
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
z
,
b
.
z
);
c
.
w
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
w
,
b
.
w
);
return
c
;
}
template
<
>
inline
__device__
bf16_8_t
mul
(
__nv_bfloat16
a
,
bf16_8_t
b
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
bf16_8_t
c
;
c
.
x
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
x
);
c
.
y
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
y
);
c
.
z
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
z
);
c
.
w
=
mul
<
__nv_bfloat162
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
w
);
return
c
;
}
template
<
>
inline
__device__
float
mul
(
__nv_bfloat16
a
,
__nv_bfloat16
b
)
{
float
fa
=
__bfloat162float
(
a
);
float
fb
=
__bfloat162float
(
b
);
return
fa
*
fb
;
}
template
<
>
inline
__device__
float2
mul
(
__nv_bfloat162
a
,
__nv_bfloat162
b
)
{
float2
fa
=
bf1622float2
(
a
);
float2
fb
=
bf1622float2
(
b
);
return
mul
<
float2
,
float2
,
float2
>
(
fa
,
fb
);
}
template
<
>
inline
__device__
float2
mul
(
__nv_bfloat16
a
,
__nv_bfloat162
b
)
{
return
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
bf162bf162
(
a
),
b
);
}
template
<
>
inline
__device__
Float4_
mul
(
bf16_4_t
a
,
bf16_4_t
b
)
{
Float4_
fc
;
fc
.
x
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
x
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
y
,
b
.
y
);
return
fc
;
}
template
<
>
inline
__device__
Float4_
mul
(
__nv_bfloat16
a
,
bf16_4_t
b
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
Float4_
fc
;
fc
.
x
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
y
);
return
fc
;
}
template
<
>
inline
__device__
Float8_
mul
(
bf16_8_t
a
,
bf16_8_t
b
)
{
Float8_
fc
;
fc
.
x
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
x
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
y
,
b
.
y
);
fc
.
z
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
z
,
b
.
z
);
fc
.
w
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
a
.
w
,
b
.
w
);
return
fc
;
}
template
<
>
inline
__device__
Float8_
mul
(
__nv_bfloat16
a
,
bf16_8_t
b
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
Float8_
fc
;
fc
.
x
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
y
);
fc
.
z
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
z
);
fc
.
w
=
mul
<
float2
,
__nv_bfloat162
,
__nv_bfloat162
>
(
s
,
b
.
w
);
return
fc
;
}
// Vector fused multiply-add.
inline
__device__
__nv_bfloat162
fma
(
__nv_bfloat162
a
,
__nv_bfloat162
b
,
__nv_bfloat162
c
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
return
__hfma2
(
a
,
b
,
c
);
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
inline
__device__
__nv_bfloat162
fma
(
__nv_bfloat16
a
,
__nv_bfloat162
b
,
__nv_bfloat162
c
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
return
__hfma2
(
bf162bf162
(
a
),
b
,
c
);
#endif
__builtin_unreachable
();
// Suppress missing return statement warning
}
inline
__device__
bf16_4_t
fma
(
bf16_4_t
a
,
bf16_4_t
b
,
bf16_4_t
c
)
{
bf16_4_t
d
;
d
.
x
=
fma
(
a
.
x
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
.
y
,
b
.
y
,
c
.
y
);
return
d
;
}
inline
__device__
bf16_4_t
fma
(
__nv_bfloat16
a
,
bf16_4_t
b
,
bf16_4_t
c
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
bf16_4_t
d
;
d
.
x
=
fma
(
s
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
s
,
b
.
y
,
c
.
y
);
return
d
;
}
inline
__device__
bf16_8_t
fma
(
bf16_8_t
a
,
bf16_8_t
b
,
bf16_8_t
c
)
{
bf16_8_t
d
;
d
.
x
=
fma
(
a
.
x
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
.
y
,
b
.
y
,
c
.
y
);
d
.
z
=
fma
(
a
.
z
,
b
.
z
,
c
.
z
);
d
.
w
=
fma
(
a
.
w
,
b
.
w
,
c
.
w
);
return
d
;
}
inline
__device__
bf16_8_t
fma
(
__nv_bfloat16
a
,
bf16_8_t
b
,
bf16_8_t
c
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
bf16_8_t
d
;
d
.
x
=
fma
(
s
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
s
,
b
.
y
,
c
.
y
);
d
.
z
=
fma
(
s
,
b
.
z
,
c
.
z
);
d
.
w
=
fma
(
s
,
b
.
w
,
c
.
w
);
return
d
;
}
inline
__device__
float
fma
(
__nv_bfloat16
a
,
__nv_bfloat16
b
,
float
fc
)
{
return
__bfloat162float
(
a
)
*
__bfloat162float
(
b
)
+
fc
;
}
inline
__device__
float2
fma
(
__nv_bfloat162
a
,
__nv_bfloat162
b
,
float2
fc
)
{
float2
fa
=
bf1622float2
(
a
);
float2
fb
=
bf1622float2
(
b
);
return
fma
(
fa
,
fb
,
fc
);
}
inline
__device__
float2
fma
(
__nv_bfloat16
a
,
__nv_bfloat162
b
,
float2
fc
)
{
return
fma
(
bf162bf162
(
a
),
b
,
fc
);
}
inline
__device__
Float4_
fma
(
bf16_4_t
a
,
bf16_4_t
b
,
Float4_
fc
)
{
Float4_
fd
;
fd
.
x
=
fma
(
a
.
x
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
a
.
y
,
b
.
y
,
fc
.
y
);
return
fd
;
}
inline
__device__
Float4_
fma
(
__nv_bfloat16
a
,
bf16_4_t
b
,
Float4_
fc
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
Float4_
fd
;
fd
.
x
=
fma
(
s
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
s
,
b
.
y
,
fc
.
y
);
return
fd
;
}
inline
__device__
Float8_
fma
(
bf16_8_t
a
,
bf16_8_t
b
,
Float8_
fc
)
{
Float8_
fd
;
fd
.
x
=
fma
(
a
.
x
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
a
.
y
,
b
.
y
,
fc
.
y
);
fd
.
z
=
fma
(
a
.
z
,
b
.
z
,
fc
.
z
);
fd
.
w
=
fma
(
a
.
w
,
b
.
w
,
fc
.
w
);
return
fd
;
}
inline
__device__
Float8_
fma
(
__nv_bfloat16
a
,
bf16_8_t
b
,
Float8_
fc
)
{
__nv_bfloat162
s
=
bf162bf162
(
a
);
Float8_
fd
;
fd
.
x
=
fma
(
s
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
s
,
b
.
y
,
fc
.
y
);
fd
.
z
=
fma
(
s
,
b
.
z
,
fc
.
z
);
fd
.
w
=
fma
(
s
,
b
.
w
,
fc
.
w
);
return
fd
;
}
// Vector sum.
template
<
>
inline
__device__
float
sum
(
__nv_bfloat16
v
)
{
return
__bfloat162float
(
v
);
}
template
<
>
inline
__device__
float
sum
(
__nv_bfloat162
v
)
{
float2
vf
=
bf1622float2
(
v
);
return
vf
.
x
+
vf
.
y
;
}
template
<
>
inline
__device__
float
sum
(
bf16_4_t
v
)
{
return
sum
(
v
.
x
)
+
sum
(
v
.
y
);
}
template
<
>
inline
__device__
float
sum
(
bf16_8_t
v
)
{
return
sum
(
v
.
x
)
+
sum
(
v
.
y
)
+
sum
(
v
.
z
)
+
sum
(
v
.
w
);
}
// From float32 to bfloat16.
inline
__device__
void
from_float
(
__nv_bfloat16
&
dst
,
float
src
)
{
dst
=
__float2bfloat16
(
src
);
}
inline
__device__
void
from_float
(
__nv_bfloat162
&
dst
,
float2
src
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
dst
=
__float22bfloat162_rn
(
src
);
#endif
}
inline
__device__
void
from_float
(
bf16_4_t
&
dst
,
Float4_
src
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
dst
.
x
=
__float22bfloat162_rn
(
src
.
x
);
dst
.
y
=
__float22bfloat162_rn
(
src
.
y
);
#endif
}
inline
__device__
void
from_float
(
bf16_8_t
&
dst
,
Float8_
src
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
dst
.
x
=
__float22bfloat162_rn
(
src
.
x
);
dst
.
y
=
__float22bfloat162_rn
(
src
.
y
);
dst
.
z
=
__float22bfloat162_rn
(
src
.
z
);
dst
.
w
=
__float22bfloat162_rn
(
src
.
w
);
#endif
}
// From bfloat16 to float32.
inline
__device__
float
to_float
(
__nv_bfloat16
u
)
{
return
__bfloat162float
(
u
);
}
// Zero-out a variable.
inline
__device__
void
zero
(
__nv_bfloat16
&
dst
)
{
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
assert
(
false
);
#else
// Same as CUDART_ZERO_BF16 introduced in CUDA 12.2.
dst
=
__ushort_as_bfloat16
((
unsigned
short
)
0x0000U
);
#endif
}
}
// namespace vllm
example/ck_tile/18_paged_attention/include/attention/dtype_float16.cuh
0 → 100644
View file @
08e057ee
/*
* Adapted from
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp
* and
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h
* Copyright (c) 2023, The vLLM team.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "attention_generic.cuh"
#include "dtype_float32.cuh"
#ifdef USE_ROCM
#include <hip/hip_fp16.h>
#endif
#include <stdint.h>
namespace
vllm
{
// FP16 vector types for Q, K, V.
template
<
>
struct
Vec
<
uint16_t
,
1
>
{
using
Type
=
uint16_t
;
};
template
<
>
struct
Vec
<
uint16_t
,
2
>
{
using
Type
=
uint32_t
;
};
template
<
>
struct
Vec
<
uint16_t
,
4
>
{
using
Type
=
uint2
;
};
template
<
>
struct
Vec
<
uint16_t
,
8
>
{
using
Type
=
uint4
;
};
// FP32 accumulator vector types corresponding to Vec.
template
<
>
struct
FloatVec
<
uint16_t
>
{
using
Type
=
float
;
};
template
<
>
struct
FloatVec
<
uint32_t
>
{
using
Type
=
float2
;
};
template
<
>
struct
FloatVec
<
uint2
>
{
using
Type
=
Float4_
;
};
template
<
>
struct
FloatVec
<
uint4
>
{
using
Type
=
Float8_
;
};
// Utility functions for type conversions.
inline
__device__
uint32_t
h0_h0
(
uint16_t
a
)
{
#ifndef USE_ROCM
uint32_t
b
;
asm
volatile
(
"mov.b32 %0, {%1, %1};"
:
"=r"
(
b
)
:
"h"
(
a
));
return
b
;
#else
union
{
uint32_t
u32
;
uint16_t
u16
[
2
];
}
tmp
;
tmp
.
u16
[
0
]
=
a
;
tmp
.
u16
[
1
]
=
a
;
return
tmp
.
u32
;
#endif
}
inline
__device__
float
half_to_float
(
uint16_t
h
)
{
float
f
;
#ifndef USE_ROCM
asm
volatile
(
"cvt.f32.f16 %0, %1;
\n
"
:
"=f"
(
f
)
:
"h"
(
h
));
#else
asm
volatile
(
"v_cvt_f32_f16 %0, %1;"
:
"=v"
(
f
)
:
"v"
(
h
));
#endif
return
f
;
}
inline
__device__
float2
half2_to_float2
(
uint32_t
v
)
{
#ifndef USE_ROCM
uint16_t
lo
,
hi
;
asm
volatile
(
"mov.b32 {%0, %1}, %2;
\n
"
:
"=h"
(
lo
),
"=h"
(
hi
)
:
"r"
(
v
));
return
make_float2
(
half_to_float
(
lo
),
half_to_float
(
hi
));
#else
union
{
uint32_t
u32
;
uint16_t
u16
[
2
];
}
tmp
;
tmp
.
u32
=
v
;
float2
ret
;
ret
.
x
=
half_to_float
(
tmp
.
u16
[
0
]);
ret
.
y
=
half_to_float
(
tmp
.
u16
[
1
]);
return
ret
;
#endif
}
inline
__device__
uint16_t
float_to_half
(
float
f
)
{
union
{
uint32_t
u32
;
uint16_t
u16
[
2
];
}
tmp
;
#ifndef USE_ROCM
asm
volatile
(
"cvt.rn.f16.f32 %0, %1;
\n
"
:
"=h"
(
tmp
.
u16
[
0
])
:
"f"
(
f
));
#else
asm
volatile
(
"v_cvt_f16_f32 %0, %1;
\n
"
:
"=v"
(
tmp
.
u32
)
:
"v"
(
f
));
#endif
return
tmp
.
u16
[
0
];
}
inline
__device__
uint32_t
float2_to_half2
(
float2
f
)
{
union
{
uint32_t
u32
;
uint16_t
u16
[
2
];
}
tmp
;
#ifndef USE_ROCM
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
asm
volatile
(
"cvt.rn.f16x2.f32 %0, %1, %2;
\n
"
:
"=r"
(
tmp
.
u32
)
:
"f"
(
f
.
y
),
"f"
(
f
.
x
));
#else
asm
volatile
(
"cvt.rn.f16.f32 %0, %1;
\n
"
:
"=h"
(
tmp
.
u16
[
0
])
:
"f"
(
f
.
x
));
asm
volatile
(
"cvt.rn.f16.f32 %0, %1;
\n
"
:
"=h"
(
tmp
.
u16
[
1
])
:
"f"
(
f
.
y
));
#endif
#else
tmp
.
u16
[
0
]
=
float_to_half
(
f
.
x
);
tmp
.
u16
[
1
]
=
float_to_half
(
f
.
y
);
#endif
return
tmp
.
u32
;
}
// Vector addition.
inline
__device__
uint16_t
add
(
uint16_t
a
,
uint16_t
b
)
{
uint16_t
c
;
#ifndef USE_ROCM
asm
volatile
(
"add.f16 %0, %1, %2;
\n
"
:
"=h"
(
c
)
:
"h"
(
a
),
"h"
(
b
));
#else
asm
volatile
(
"v_add_f16 %0, %1, %2;
\n
"
:
"=v"
(
c
)
:
"v"
(
a
),
"v"
(
b
));
#endif
return
c
;
}
inline
__device__
uint32_t
add
(
uint32_t
a
,
uint32_t
b
)
{
uint32_t
c
;
#ifndef USE_ROCM
asm
volatile
(
"add.f16x2 %0, %1, %2;
\n
"
:
"=r"
(
c
)
:
"r"
(
a
),
"r"
(
b
));
#else
asm
volatile
(
"v_pk_add_f16 %0, %1, %2;
\n
"
:
"=v"
(
c
)
:
"v"
(
a
),
"v"
(
b
));
#endif
return
c
;
}
inline
__device__
uint2
add
(
uint2
a
,
uint2
b
)
{
uint2
c
;
c
.
x
=
add
(
a
.
x
,
b
.
x
);
c
.
y
=
add
(
a
.
y
,
b
.
y
);
return
c
;
}
inline
__device__
uint4
add
(
uint4
a
,
uint4
b
)
{
uint4
c
;
c
.
x
=
add
(
a
.
x
,
b
.
x
);
c
.
y
=
add
(
a
.
y
,
b
.
y
);
c
.
z
=
add
(
a
.
z
,
b
.
z
);
c
.
w
=
add
(
a
.
w
,
b
.
w
);
return
c
;
}
inline
__device__
float2
add
(
uint32_t
a
,
float2
fb
)
{
float2
fa
=
half2_to_float2
(
a
);
return
add
(
fa
,
fb
);
}
inline
__device__
Float4_
add
(
uint2
a
,
Float4_
fb
)
{
Float4_
fc
;
fc
.
x
=
add
(
a
.
x
,
fb
.
x
);
fc
.
y
=
add
(
a
.
y
,
fb
.
y
);
return
fc
;
}
inline
__device__
Float8_
add
(
uint4
a
,
Float8_
fb
)
{
Float8_
fc
;
fc
.
x
=
add
(
a
.
x
,
fb
.
x
);
fc
.
y
=
add
(
a
.
y
,
fb
.
y
);
fc
.
z
=
add
(
a
.
z
,
fb
.
z
);
fc
.
w
=
add
(
a
.
w
,
fb
.
w
);
return
fc
;
}
// Vector multiplication.
template
<
>
inline
__device__
uint16_t
mul
(
uint16_t
a
,
uint16_t
b
)
{
uint16_t
c
;
#ifndef USE_ROCM
asm
volatile
(
"mul.f16 %0, %1, %2;
\n
"
:
"=h"
(
c
)
:
"h"
(
a
),
"h"
(
b
));
#else
asm
volatile
(
"v_mul_f16 %0, %1, %2;
\n
"
:
"=v"
(
c
)
:
"v"
(
a
),
"v"
(
b
));
#endif
return
c
;
}
template
<
>
inline
__device__
uint32_t
mul
(
uint32_t
a
,
uint32_t
b
)
{
uint32_t
c
;
#ifndef USE_ROCM
asm
volatile
(
"mul.f16x2 %0, %1, %2;
\n
"
:
"=r"
(
c
)
:
"r"
(
a
),
"r"
(
b
));
#else
asm
volatile
(
"v_pk_mul_f16 %0, %1, %2;
\n
"
:
"=v"
(
c
)
:
"v"
(
a
),
"v"
(
b
));
#endif
return
c
;
}
template
<
>
inline
__device__
uint32_t
mul
(
uint16_t
a
,
uint32_t
b
)
{
return
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
h0_h0
(
a
),
b
);
}
template
<
>
inline
__device__
uint2
mul
(
uint2
a
,
uint2
b
)
{
uint2
c
;
c
.
x
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
a
.
x
,
b
.
x
);
c
.
y
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
a
.
y
,
b
.
y
);
return
c
;
}
template
<
>
inline
__device__
uint2
mul
(
uint16_t
a
,
uint2
b
)
{
uint32_t
s
=
h0_h0
(
a
);
uint2
c
;
c
.
x
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
s
,
b
.
x
);
c
.
y
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
s
,
b
.
y
);
return
c
;
}
template
<
>
inline
__device__
uint4
mul
(
uint4
a
,
uint4
b
)
{
uint4
c
;
c
.
x
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
a
.
x
,
b
.
x
);
c
.
y
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
a
.
y
,
b
.
y
);
c
.
z
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
a
.
z
,
b
.
z
);
c
.
w
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
a
.
w
,
b
.
w
);
return
c
;
}
template
<
>
inline
__device__
uint4
mul
(
uint16_t
a
,
uint4
b
)
{
uint32_t
s
=
h0_h0
(
a
);
uint4
c
;
c
.
x
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
s
,
b
.
x
);
c
.
y
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
s
,
b
.
y
);
c
.
z
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
s
,
b
.
z
);
c
.
w
=
mul
<
uint32_t
,
uint32_t
,
uint32_t
>
(
s
,
b
.
w
);
return
c
;
}
template
<
>
inline
__device__
float
mul
(
uint16_t
a
,
uint16_t
b
)
{
float
fa
=
half_to_float
(
a
);
float
fb
=
half_to_float
(
b
);
return
fa
*
fb
;
}
template
<
>
inline
__device__
float2
mul
(
uint32_t
a
,
uint32_t
b
)
{
float2
fa
=
half2_to_float2
(
a
);
float2
fb
=
half2_to_float2
(
b
);
return
mul
<
float2
,
float2
,
float2
>
(
fa
,
fb
);
}
template
<
>
inline
__device__
float2
mul
(
uint16_t
a
,
uint32_t
b
)
{
return
mul
<
float2
,
uint32_t
,
uint32_t
>
(
h0_h0
(
a
),
b
);
}
template
<
>
inline
__device__
Float4_
mul
(
uint2
a
,
uint2
b
)
{
Float4_
fc
;
fc
.
x
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
a
.
x
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
a
.
y
,
b
.
y
);
return
fc
;
}
template
<
>
inline
__device__
Float4_
mul
(
uint16_t
a
,
uint2
b
)
{
uint32_t
s
=
h0_h0
(
a
);
Float4_
fc
;
fc
.
x
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
s
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
s
,
b
.
y
);
return
fc
;
}
template
<
>
inline
__device__
Float8_
mul
(
uint4
a
,
uint4
b
)
{
Float8_
fc
;
fc
.
x
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
a
.
x
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
a
.
y
,
b
.
y
);
fc
.
z
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
a
.
z
,
b
.
z
);
fc
.
w
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
a
.
w
,
b
.
w
);
return
fc
;
}
template
<
>
inline
__device__
Float8_
mul
(
uint16_t
a
,
uint4
b
)
{
uint32_t
s
=
h0_h0
(
a
);
Float8_
fc
;
fc
.
x
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
s
,
b
.
x
);
fc
.
y
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
s
,
b
.
y
);
fc
.
z
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
s
,
b
.
z
);
fc
.
w
=
mul
<
float2
,
uint32_t
,
uint32_t
>
(
s
,
b
.
w
);
return
fc
;
}
// Vector fused multiply-add.
inline
__device__
uint32_t
fma
(
uint32_t
a
,
uint32_t
b
,
uint32_t
c
)
{
uint32_t
d
;
#ifndef USE_ROCM
asm
volatile
(
"fma.rn.f16x2 %0, %1, %2, %3;
\n
"
:
"=r"
(
d
)
:
"r"
(
a
),
"r"
(
b
),
"r"
(
c
));
#else
asm
volatile
(
"v_pk_fma_f16 %0, %1, %2, %3;
\n
"
:
"=v"
(
d
)
:
"v"
(
a
),
"v"
(
b
),
"v"
(
c
));
#endif
return
d
;
}
inline
__device__
uint32_t
fma
(
uint16_t
a
,
uint32_t
b
,
uint32_t
c
)
{
return
fma
(
h0_h0
(
a
),
b
,
c
);
}
inline
__device__
uint2
fma
(
uint2
a
,
uint2
b
,
uint2
c
)
{
uint2
d
;
d
.
x
=
fma
(
a
.
x
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
.
y
,
b
.
y
,
c
.
y
);
return
d
;
}
inline
__device__
uint2
fma
(
uint16_t
a
,
uint2
b
,
uint2
c
)
{
uint32_t
s
=
h0_h0
(
a
);
uint2
d
;
d
.
x
=
fma
(
s
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
s
,
b
.
y
,
c
.
y
);
return
d
;
}
inline
__device__
uint4
fma
(
uint4
a
,
uint4
b
,
uint4
c
)
{
uint4
d
;
d
.
x
=
fma
(
a
.
x
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
.
y
,
b
.
y
,
c
.
y
);
d
.
z
=
fma
(
a
.
z
,
b
.
z
,
c
.
z
);
d
.
w
=
fma
(
a
.
w
,
b
.
w
,
c
.
w
);
return
d
;
}
inline
__device__
uint4
fma
(
uint16_t
a
,
uint4
b
,
uint4
c
)
{
uint32_t
s
=
h0_h0
(
a
);
uint4
d
;
d
.
x
=
fma
(
s
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
s
,
b
.
y
,
c
.
y
);
d
.
z
=
fma
(
s
,
b
.
z
,
c
.
z
);
d
.
w
=
fma
(
s
,
b
.
w
,
c
.
w
);
return
d
;
}
inline
__device__
float
fma
(
uint16_t
a
,
uint16_t
b
,
float
fc
)
{
float
fa
=
half_to_float
(
a
);
float
fb
=
half_to_float
(
b
);
return
fa
*
fb
+
fc
;
}
inline
__device__
float2
fma
(
uint32_t
a
,
uint32_t
b
,
float2
fc
)
{
float2
fa
=
half2_to_float2
(
a
);
float2
fb
=
half2_to_float2
(
b
);
return
fma
(
fa
,
fb
,
fc
);
}
inline
__device__
float2
fma
(
uint16_t
a
,
uint32_t
b
,
float2
fc
)
{
return
fma
(
h0_h0
(
a
),
b
,
fc
);
}
inline
__device__
Float4_
fma
(
uint2
a
,
uint2
b
,
Float4_
fc
)
{
Float4_
fd
;
fd
.
x
=
fma
(
a
.
x
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
a
.
y
,
b
.
y
,
fc
.
y
);
return
fd
;
}
inline
__device__
Float4_
fma
(
uint16_t
a
,
uint2
b
,
Float4_
fc
)
{
uint32_t
s
=
h0_h0
(
a
);
Float4_
fd
;
fd
.
x
=
fma
(
s
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
s
,
b
.
y
,
fc
.
y
);
return
fd
;
}
inline
__device__
Float8_
fma
(
uint4
a
,
uint4
b
,
Float8_
fc
)
{
Float8_
fd
;
fd
.
x
=
fma
(
a
.
x
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
a
.
y
,
b
.
y
,
fc
.
y
);
fd
.
z
=
fma
(
a
.
z
,
b
.
z
,
fc
.
z
);
fd
.
w
=
fma
(
a
.
w
,
b
.
w
,
fc
.
w
);
return
fd
;
}
inline
__device__
Float8_
fma
(
uint16_t
a
,
uint4
b
,
Float8_
fc
)
{
uint32_t
s
=
h0_h0
(
a
);
Float8_
fd
;
fd
.
x
=
fma
(
s
,
b
.
x
,
fc
.
x
);
fd
.
y
=
fma
(
s
,
b
.
y
,
fc
.
y
);
fd
.
z
=
fma
(
s
,
b
.
z
,
fc
.
z
);
fd
.
w
=
fma
(
s
,
b
.
w
,
fc
.
w
);
return
fd
;
}
// Vector sum.
template
<
>
inline
__device__
float
sum
(
uint16_t
v
)
{
return
half_to_float
(
v
);
}
template
<
>
inline
__device__
float
sum
(
uint32_t
v
)
{
float2
tmp
=
half2_to_float2
(
v
);
return
tmp
.
x
+
tmp
.
y
;
}
template
<
>
inline
__device__
float
sum
(
uint2
v
)
{
uint32_t
c
=
add
(
v
.
x
,
v
.
y
);
return
sum
(
c
);
}
template
<
>
inline
__device__
float
sum
(
uint4
v
)
{
uint32_t
c
=
add
(
v
.
x
,
v
.
y
);
c
=
add
(
c
,
v
.
z
);
c
=
add
(
c
,
v
.
w
);
return
sum
(
c
);
}
// From float32 to float16.
inline
__device__
void
from_float
(
uint16_t
&
dst
,
float
src
)
{
dst
=
float_to_half
(
src
);
}
inline
__device__
void
from_float
(
uint32_t
&
dst
,
float2
src
)
{
dst
=
float2_to_half2
(
src
);
}
inline
__device__
void
from_float
(
uint2
&
dst
,
Float4_
src
)
{
dst
.
x
=
float2_to_half2
(
src
.
x
);
dst
.
y
=
float2_to_half2
(
src
.
y
);
}
inline
__device__
void
from_float
(
uint4
&
dst
,
Float8_
src
)
{
dst
.
x
=
float2_to_half2
(
src
.
x
);
dst
.
y
=
float2_to_half2
(
src
.
y
);
dst
.
z
=
float2_to_half2
(
src
.
z
);
dst
.
w
=
float2_to_half2
(
src
.
w
);
}
// From float16 to float32.
inline
__device__
float
to_float
(
uint16_t
u
)
{
return
half_to_float
(
u
);
}
inline
__device__
float2
to_float
(
uint32_t
u
)
{
return
half2_to_float2
(
u
);
}
inline
__device__
Float4_
to_float
(
uint2
u
)
{
Float4_
tmp
;
tmp
.
x
=
half2_to_float2
(
u
.
x
);
tmp
.
y
=
half2_to_float2
(
u
.
y
);
return
tmp
;
}
inline
__device__
Float8_
to_float
(
uint4
u
)
{
Float8_
tmp
;
tmp
.
x
=
half2_to_float2
(
u
.
x
);
tmp
.
y
=
half2_to_float2
(
u
.
y
);
tmp
.
z
=
half2_to_float2
(
u
.
z
);
tmp
.
w
=
half2_to_float2
(
u
.
w
);
return
tmp
;
}
// Zero-out a variable.
inline
__device__
void
zero
(
uint16_t
&
dst
)
{
dst
=
uint16_t
(
0
);
}
}
// namespace vllm
example/ck_tile/18_paged_attention/include/attention/dtype_float32.cuh
0 → 100644
View file @
08e057ee
/*
* Adapted from
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp
* and
* https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h
* Copyright (c) 2023, The vLLM team.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "attention_generic.cuh"
#include <stdint.h>
namespace
vllm
{
// Define custom FP32 vector data types.
struct
Float4_
{
float2
x
;
float2
y
;
};
struct
Float8_
{
float2
x
;
float2
y
;
float2
z
;
float2
w
;
};
// FP32 vector types for Q, K, V.
template
<
>
struct
Vec
<
float
,
1
>
{
using
Type
=
float
;
};
template
<
>
struct
Vec
<
float
,
2
>
{
using
Type
=
float2
;
};
template
<
>
struct
Vec
<
float
,
4
>
{
using
Type
=
float4
;
};
// FP32 accumulator vector types corresponding to Vec.
template
<
>
struct
FloatVec
<
float
>
{
using
Type
=
float
;
};
template
<
>
struct
FloatVec
<
float2
>
{
using
Type
=
float2
;
};
template
<
>
struct
FloatVec
<
float4
>
{
using
Type
=
float4
;
};
// Vector addition.
inline
__device__
float
add
(
float
a
,
float
b
)
{
return
a
+
b
;
}
inline
__device__
float2
add
(
float2
a
,
float2
b
)
{
float2
c
;
c
.
x
=
add
(
a
.
x
,
b
.
x
);
c
.
y
=
add
(
a
.
y
,
b
.
y
);
return
c
;
}
inline
__device__
float4
add
(
float4
a
,
float4
b
)
{
float4
c
;
c
.
x
=
add
(
a
.
x
,
b
.
x
);
c
.
y
=
add
(
a
.
y
,
b
.
y
);
c
.
z
=
add
(
a
.
z
,
b
.
z
);
c
.
w
=
add
(
a
.
w
,
b
.
w
);
return
c
;
}
// Vector multiplication.
template
<
>
inline
__device__
float
mul
<
float
,
float
>
(
float
a
,
float
b
)
{
return
a
*
b
;
}
template
<
>
inline
__device__
float2
mul
(
float2
a
,
float2
b
)
{
float2
c
;
c
.
x
=
a
.
x
*
b
.
x
;
c
.
y
=
a
.
y
*
b
.
y
;
return
c
;
}
template
<
>
inline
__device__
float2
mul
(
float
a
,
float2
b
)
{
float2
c
;
c
.
x
=
a
*
b
.
x
;
c
.
y
=
a
*
b
.
y
;
return
c
;
}
template
<
>
inline
__device__
float4
mul
(
float4
a
,
float4
b
)
{
float4
c
;
c
.
x
=
a
.
x
*
b
.
x
;
c
.
y
=
a
.
y
*
b
.
y
;
c
.
z
=
a
.
z
*
b
.
z
;
c
.
w
=
a
.
w
*
b
.
w
;
return
c
;
}
template
<
>
inline
__device__
float4
mul
(
float
a
,
float4
b
)
{
float4
c
;
c
.
x
=
a
*
b
.
x
;
c
.
y
=
a
*
b
.
y
;
c
.
z
=
a
*
b
.
z
;
c
.
w
=
a
*
b
.
w
;
return
c
;
}
// Vector fused multiply-add.
inline
__device__
float
fma
(
float
a
,
float
b
,
float
c
)
{
return
a
*
b
+
c
;
}
inline
__device__
float2
fma
(
float2
a
,
float2
b
,
float2
c
)
{
float2
d
;
d
.
x
=
fma
(
a
.
x
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
.
y
,
b
.
y
,
c
.
y
);
return
d
;
}
inline
__device__
float2
fma
(
float
a
,
float2
b
,
float2
c
)
{
float2
d
;
d
.
x
=
fma
(
a
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
,
b
.
y
,
c
.
y
);
return
d
;
}
inline
__device__
float4
fma
(
float4
a
,
float4
b
,
float4
c
)
{
float4
d
;
d
.
x
=
fma
(
a
.
x
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
.
y
,
b
.
y
,
c
.
y
);
d
.
z
=
fma
(
a
.
z
,
b
.
z
,
c
.
z
);
d
.
w
=
fma
(
a
.
w
,
b
.
w
,
c
.
w
);
return
d
;
}
inline
__device__
float4
fma
(
float
a
,
float4
b
,
float4
c
)
{
float4
d
;
d
.
x
=
fma
(
a
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
,
b
.
y
,
c
.
y
);
d
.
z
=
fma
(
a
,
b
.
z
,
c
.
z
);
d
.
w
=
fma
(
a
,
b
.
w
,
c
.
w
);
return
d
;
}
inline
__device__
Float4_
fma
(
float
a
,
Float4_
b
,
Float4_
c
)
{
Float4_
d
;
d
.
x
=
fma
(
a
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
,
b
.
y
,
c
.
y
);
return
d
;
}
inline
__device__
Float8_
fma
(
float
a
,
Float8_
b
,
Float8_
c
)
{
Float8_
d
;
d
.
x
=
fma
(
a
,
b
.
x
,
c
.
x
);
d
.
y
=
fma
(
a
,
b
.
y
,
c
.
y
);
d
.
z
=
fma
(
a
,
b
.
z
,
c
.
z
);
d
.
w
=
fma
(
a
,
b
.
w
,
c
.
w
);
return
d
;
}
// Vector sum.
template
<
>
inline
__device__
float
sum
(
float
v
)
{
return
v
;
}
template
<
>
inline
__device__
float
sum
(
float2
v
)
{
return
v
.
x
+
v
.
y
;
}
template
<
>
inline
__device__
float
sum
(
float4
v
)
{
return
v
.
x
+
v
.
y
+
v
.
z
+
v
.
w
;
}
template
<
>
inline
__device__
float
sum
(
Float4_
v
)
{
return
v
.
x
.
x
+
v
.
x
.
y
+
v
.
y
.
x
+
v
.
y
.
y
;
}
template
<
>
inline
__device__
float
sum
(
Float8_
v
)
{
return
v
.
x
.
x
+
v
.
x
.
y
+
v
.
y
.
x
+
v
.
y
.
y
+
v
.
z
.
x
+
v
.
z
.
y
+
v
.
w
.
x
+
v
.
w
.
y
;
}
// Vector dot product.
inline
__device__
float
dot
(
float
a
,
float
b
)
{
return
a
*
b
;
}
inline
__device__
float
dot
(
float2
a
,
float2
b
)
{
float2
c
=
mul
<
float2
,
float2
,
float2
>
(
a
,
b
);
return
c
.
x
+
c
.
y
;
}
inline
__device__
float
dot
(
Float4_
a
,
Float4_
b
)
{
float2
acc
=
mul
<
float2
,
float2
,
float2
>
(
a
.
x
,
b
.
x
);
acc
=
fma
(
a
.
y
,
b
.
y
,
acc
);
return
acc
.
x
+
acc
.
y
;
}
inline
__device__
float
dot
(
Float8_
a
,
Float8_
b
)
{
float2
acc
=
mul
<
float2
,
float2
,
float2
>
(
a
.
x
,
b
.
x
);
acc
=
fma
(
a
.
y
,
b
.
y
,
acc
);
acc
=
fma
(
a
.
z
,
b
.
z
,
acc
);
acc
=
fma
(
a
.
w
,
b
.
w
,
acc
);
return
acc
.
x
+
acc
.
y
;
}
// From float to float.
inline
__device__
void
from_float
(
float
&
dst
,
float
src
)
{
dst
=
src
;
}
inline
__device__
void
from_float
(
float2
&
dst
,
float2
src
)
{
dst
=
src
;
}
inline
__device__
void
from_float
(
float4
&
dst
,
float4
src
)
{
dst
=
src
;
}
// From float to float.
inline
__device__
float
to_float
(
float
u
)
{
return
u
;
}
inline
__device__
float2
to_float
(
float2
u
)
{
return
u
;
}
inline
__device__
float4
to_float
(
float4
u
)
{
return
u
;
}
inline
__device__
Float4_
to_float
(
Float4_
u
)
{
return
u
;
}
inline
__device__
Float8_
to_float
(
Float8_
u
)
{
return
u
;
}
// Zero-out a variable.
inline
__device__
void
zero
(
float
&
dst
)
{
dst
=
0.
f
;
}
}
// namespace vllm
example/ck_tile/18_paged_attention/include/attention/dtype_fp8.cuh
0 → 100644
View file @
08e057ee
#pragma once
#include "attention_generic.cuh"
#include <stdint.h>
#ifdef ENABLE_FP8
#ifndef USE_ROCM
#include <cuda_fp8.h>
#endif // USE_ROCM
#endif // ENABLE_FP8
namespace
vllm
{
enum
class
Fp8KVCacheDataType
{
kAuto
=
0
,
kFp8E4M3
=
1
,
kFp8E5M2
=
2
,
};
// fp8 vector types for quantization of kv cache
template
<
>
struct
Vec
<
uint8_t
,
1
>
{
using
Type
=
uint8_t
;
};
template
<
>
struct
Vec
<
uint8_t
,
2
>
{
using
Type
=
uint16_t
;
};
template
<
>
struct
Vec
<
uint8_t
,
4
>
{
using
Type
=
uint32_t
;
};
template
<
>
struct
Vec
<
uint8_t
,
8
>
{
using
Type
=
uint2
;
};
}
// namespace vllm
example/ck_tile/18_paged_attention/include/cuda_compat.h
0 → 100644
View file @
08e057ee
#pragma once
#ifdef USE_ROCM
#include <hip/hip_runtime.h>
#endif
#ifndef USE_ROCM
#define WARP_SIZE 32
#else
#define WARP_SIZE warpSize
#endif
#ifndef USE_ROCM
#define VLLM_LDG(arg) __ldg(arg)
#else
#define VLLM_LDG(arg) *(arg)
#endif
#ifndef USE_ROCM
#define VLLM_SHFL_XOR_SYNC(var, lane_mask) \
__shfl_xor_sync(uint32_t(-1), var, lane_mask)
#define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \
__shfl_xor_sync(uint32_t(-1), var, lane_mask, width)
#else
#define VLLM_SHFL_XOR_SYNC(var, lane_mask) __shfl_xor(var, lane_mask)
#define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \
__shfl_xor(var, lane_mask, width)
#endif
#ifndef USE_ROCM
#define VLLM_SHFL_SYNC(var, src_lane) __shfl_sync(uint32_t(-1), var, src_lane)
#else
#define VLLM_SHFL_SYNC(var, src_lane) __shfl(var, src_lane)
#endif
#ifndef USE_ROCM
#define VLLM_SHFL_DOWN_SYNC(var, lane_delta) \
__shfl_down_sync(uint32_t(-1), var, lane_delta)
#else
#define VLLM_SHFL_DOWN_SYNC(var, lane_delta) __shfl_down(var, lane_delta)
#endif
#ifndef USE_ROCM
#define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
cudaFuncSetAttribute(FUNC, cudaFuncAttributeMaxDynamicSharedMemorySize, VAL)
#else
#define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
hipFuncSetAttribute(FUNC, hipFuncAttributeMaxDynamicSharedMemorySize, VAL)
#endif
example/ck_tile/18_paged_attention/include/quantization/fp8/amd/hip_float8.h
0 → 100644
View file @
08e057ee
#pragma once
#ifdef __HIPCC__
#include <hip/hip_runtime.h>
#else
#include <type_traits>
#include <stdint.h>
#include <math.h>
#include <iostream>
#endif
#include "hip_float8_impl.h"
struct
alignas
(
1
)
hip_fp8
{
struct
from_bits_t
{};
HIP_FP8_HOST_DEVICE
static
constexpr
from_bits_t
from_bits
()
{
return
from_bits_t
();
}
uint8_t
data
;
hip_fp8
()
=
default
;
HIP_FP8_HOST_DEVICE
constexpr
hip_fp8
(
const
hip_fp8
&
)
=
default
;
HIP_FP8_HOST_DEVICE
constexpr
hip_fp8
(
uint8_t
v
)
=
delete
;
explicit
HIP_FP8_HOST_DEVICE
constexpr
hip_fp8
(
uint8_t
v
,
from_bits_t
)
:
data
(
v
)
{}
#ifdef __HIP__MI300__
// NOTE: ON-DEVICE... always optimal bias
explicit
HIP_FP8_DEVICE
hip_fp8
(
float
v
)
:
data
(
hip_fp8_impl
::
to_fp8_from_fp32
(
v
))
{}
explicit
HIP_FP8_DEVICE
hip_fp8
(
_Float16
v
)
:
hip_fp8
(
static_cast
<
float
>
(
v
))
{}
// Host only implementation using s/w simulation
explicit
HIP_FP8_HOST
#else // __HIP__MI300__
// both Host and DEVICE for non-MI300 using s/w simulation
explicit
HIP_FP8_HOST_DEVICE
#endif // __HIP__MI300__
hip_fp8
(
float
v
)
{
data
=
hip_fp8_impl
::
to_float8
<
4
,
3
,
float
,
true
/*negative_zero_nan*/
,
true
/*clip*/
>
(
v
);
}
explicit
HIP_FP8_HOST_DEVICE
hip_fp8
(
double
v
)
:
hip_fp8
(
static_cast
<
float
>
(
v
))
{}
#ifdef __HIP__MI300__
// upcast using device specific intrinsic
explicit
inline
HIP_FP8_DEVICE
operator
float
()
const
{
float
fval
;
uint32_t
i32val
=
static_cast
<
uint32_t
>
(
data
);
// upcast
asm
volatile
(
"v_cvt_f32_fp8 %0, %1 src0_sel:BYTE_0"
:
"=v"
(
fval
)
:
"v"
(
i32val
));
return
fval
;
}
explicit
inline
HIP_FP8_HOST
operator
float
()
const
#else // __HIP__MI300__
explicit
inline
HIP_FP8_HOST_DEVICE
operator
float
()
const
#endif // __HIP__MI300__
{
return
hip_fp8_impl
::
from_float8
<
4
,
3
,
float
,
true
/*negative_zero_nan*/
>
(
data
);
}
};
namespace
std
{
inline
hip_fp8
sin
(
hip_fp8
a
)
{
return
hip_fp8
(
sinf
(
float
(
a
)));
}
inline
hip_fp8
cos
(
hip_fp8
a
)
{
return
hip_fp8
(
cosf
(
float
(
a
)));
}
HIP_FP8_HOST_DEVICE
constexpr
hip_fp8
real
(
const
hip_fp8
&
a
)
{
return
a
;
}
}
// namespace std
// Special operator overloading
inline
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
hip_fp8
&
f8
)
{
return
os
<<
float
(
f8
);
}
// all + operator overloading with mixed types
// mixed types, always converts to f32, does computation in f32, and returns
// float
inline
HIP_FP8_HOST_DEVICE
float
operator
+
(
const
float
fa
,
hip_fp8
b
)
{
return
(
fa
+
float
(
b
));
}
inline
HIP_FP8_HOST_DEVICE
float
operator
+
(
hip_fp8
a
,
const
float
fb
)
{
return
(
float
(
a
)
+
fb
);
}
inline
HIP_FP8_HOST_DEVICE
hip_fp8
operator
+
(
hip_fp8
a
,
hip_fp8
b
)
{
return
hip_fp8
(
float
(
a
)
+
float
(
b
));
}
inline
HIP_FP8_HOST_DEVICE
hip_fp8
&
operator
+=
(
hip_fp8
&
a
,
hip_fp8
b
)
{
return
a
=
hip_fp8
(
float
(
a
)
+
float
(
b
));
}
// overloading multiplication, always returns float,
inline
HIP_FP8_HOST_DEVICE
float
operator
*
(
hip_fp8
a
,
hip_fp8
b
)
{
return
float
(
a
)
*
float
(
b
);
}
inline
HIP_FP8_HOST_DEVICE
float
operator
*
(
float
a
,
hip_fp8
b
)
{
return
(
a
*
float
(
b
));
}
inline
HIP_FP8_HOST_DEVICE
float
operator
*
(
hip_fp8
a
,
float
b
)
{
return
(
float
(
a
)
*
b
);
}
inline
HIP_FP8_HOST_DEVICE
float
operator
*
(
int32_t
a
,
hip_fp8
b
)
{
return
((
float
)
a
*
float
(
b
));
}
inline
HIP_FP8_HOST_DEVICE
float
operator
*
(
double
a
,
hip_fp8
b
)
{
return
((
float
)
a
*
float
(
b
));
}
// overloading for compare
inline
HIP_FP8_HOST_DEVICE
bool
operator
==
(
hip_fp8
a
,
hip_fp8
b
)
{
return
(
a
.
data
==
b
.
data
);
}
inline
HIP_FP8_HOST_DEVICE
bool
operator
!=
(
hip_fp8
a
,
hip_fp8
b
)
{
return
(
a
.
data
!=
b
.
data
);
}
inline
HIP_FP8_HOST_DEVICE
bool
operator
>=
(
hip_fp8
a
,
hip_fp8
b
)
{
return
static_cast
<
float
>
(
a
)
>=
static_cast
<
float
>
(
b
);
}
inline
HIP_FP8_HOST_DEVICE
bool
operator
>
(
hip_fp8
a
,
hip_fp8
b
)
{
return
static_cast
<
float
>
(
a
)
>
static_cast
<
float
>
(
b
);
}
example/ck_tile/18_paged_attention/include/quantization/fp8/amd/hip_float8_impl.h
0 → 100644
View file @
08e057ee
#pragma once
#if defined(__HIPCC__) && \
(defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__))
#define __HIP__MI300__
#endif
#ifdef __HIPCC__
#define HIP_FP8_HOST_DEVICE __host__ __device__
#define HIP_FP8_HOST __host__
#define HIP_FP8_DEVICE __device__
#else
#define HIP_FP8_HOST_DEVICE
#define HIP_FP8_HOST
#define HIP_FP8_DEVICE
#endif
namespace
hip_fp8_impl
{
#ifdef __HIP__MI300__
HIP_FP8_DEVICE
uint8_t
to_fp8_from_fp32
(
float
v
)
{
uint8_t
i8data
;
union
{
float
fval
;
uint32_t
i32val
;
uint8_t
i8val
[
4
];
// NOTE: not endian independent
}
val
;
uint32_t
ival
=
0
;
val
.
fval
=
v
;
if
((
val
.
i32val
&
0x7F800000
)
!=
0x7F800000
)
{
/// propagate NAN/INF, no clipping
val
.
fval
=
__builtin_amdgcn_fmed3f
(
val
.
fval
,
240.0
,
-
240.0
);
}
ival
=
__builtin_amdgcn_cvt_pk_fp8_f32
(
val
.
fval
,
val
.
fval
,
ival
,
false
);
// false -> WORD0
val
.
i32val
=
ival
;
i8data
=
val
.
i8val
[
0
];
return
i8data
;
}
#endif // __HIP__MI300__
HIP_FP8_HOST
inline
int
clz
(
uint32_t
x
)
{
return
__builtin_clz
(
x
);
}
#if defined(__HIPCC__) || defined(__CUDA_ARCH__)
HIP_FP8_DEVICE
inline
int
clz
(
uint32_t
x
)
{
return
__clz
(
x
);
}
#endif
template
<
int
we
,
int
wm
,
typename
T
,
bool
negative_zero_nan
,
bool
clip
>
HIP_FP8_HOST_DEVICE
uint8_t
to_float8
(
T
_x
,
bool
stoch
=
false
,
uint32_t
rng
=
0
)
{
#ifdef __HIPCC__
constexpr
bool
is_half
=
std
::
is_same
<
T
,
_Float16
>::
value
;
#else
constexpr
bool
is_half
=
false
;
#endif
constexpr
bool
is_float
=
std
::
is_same
<
T
,
float
>::
value
;
static_assert
(
wm
+
we
==
7
,
"wm+we==7"
);
static_assert
(
is_half
||
is_float
,
"Only half and float can be cast to f8"
);
const
int
mfmt
=
(
sizeof
(
T
)
==
4
)
?
23
:
10
;
uint32_t
x
;
if
(
sizeof
(
T
)
==
4
)
{
x
=
reinterpret_cast
<
uint32_t
&>
(
_x
);
}
else
{
x
=
reinterpret_cast
<
uint16_t
&>
(
_x
);
}
uint32_t
head
,
mantissa
;
int
exponent
,
bias
;
uint32_t
sign
;
if
(
sizeof
(
T
)
==
4
)
{
head
=
x
&
0xFF800000
;
mantissa
=
x
&
0x7FFFFF
;
exponent
=
(
head
>>
23
)
&
0xFF
;
sign
=
head
>>
31
;
bias
=
127
;
}
else
{
head
=
x
&
0xFC00
;
mantissa
=
x
&
0x3FF
;
exponent
=
(
head
>>
10
)
&
0x1F
;
sign
=
head
>>
15
;
bias
=
15
;
}
uint32_t
signed_inf
=
(
sign
<<
7
)
+
(((
1
<<
we
)
-
1
)
<<
wm
);
// Deal with inf and NaNs
if
(
negative_zero_nan
)
{
if
(
sizeof
(
T
)
==
4
)
{
if
((
x
&
0x7F800000
)
==
0x7F800000
)
{
return
0x80
;
}
}
else
{
// if(__hisinf(x) || __hisnan(x))
if
((
x
&
0x7C00
)
==
0x7C00
)
{
return
0x80
;
}
}
}
else
{
if
(
sizeof
(
T
)
==
4
)
{
if
((
x
&
0x7F800000
)
==
0x7F800000
)
{
return
signed_inf
+
(
mantissa
!=
0
?
1
:
0
);
}
}
else
{
if
((
x
&
0x7C00
)
==
0x7C00
)
{
return
signed_inf
+
(
mantissa
!=
0
?
1
:
0
);
}
}
}
if
(
x
==
0
)
{
return
0
;
}
// First need to check if it is normal or denorm as there is a difference of
// implicit 1 Then need to adjust the exponent to align with the F8 exponent,
// in the meanwhile, shift The mantissa. Then for stochastic rounding, add rng
// to mantissa and truncate. And for RNE, no need to add rng. Then probably
// need to check whether there is carry and adjust exponent and mantissa again
// For IEEE bias mode, the bias is 2^(k-1) -1 where k is the width of exponent
// bits
const
int
f8_bias
=
(
1
<<
(
we
-
1
))
-
1
+
(
negative_zero_nan
?
1
:
0
);
const
int
f8_denormal_act_exponent
=
1
-
f8_bias
;
// actual exponent of f8 denormal
// act_exponent is the actual exponent of fp32/fp16 (after subtracting bias)
// f8_exponent is the converted f8 exponent with bias encoding
// exponent_diff is the diff between fp32/fp16 exponent and f8 exponent,
// the difference needs to be adjusted and mantissa shifted
int
act_exponent
,
f8_exponent
,
exponent_diff
;
if
(
exponent
==
0
)
{
// fp32/fp16 is in denormal.
/* fp32 denormal is below 2^-127 so it is usually not a concern here, we
mostly concern fp16 here. In this case, f8 is usually in denormal. But there
could be exceptions. fp16 denormal has exponent bias 15 while bf8 with NANOO has
exponent bias 16. It means that there are some numbers in fp16 denormal but they
are bf8 (NANOO) normals - smallest bf8 (NANOO) normal is 2^-15. fp16 numbers
where exponent==0 (actual exponent -14) and highest bit of mantissa is 1 are bf8
(NANOO) normal. In this case, the fp16 mantissa should be shift left by 1 */
act_exponent
=
exponent
-
bias
+
1
;
exponent_diff
=
f8_denormal_act_exponent
-
act_exponent
;
// actual exponent is exponent-bias+1 as it is denormal
}
else
{
// fp32/fp16 is normal with implicit 1
act_exponent
=
exponent
-
bias
;
if
(
act_exponent
<=
f8_denormal_act_exponent
)
{
/* This is the case where fp32/fp16 is normal but it is in f8 denormal
range. For example fp8 nanoo mode, denormal exponent is -7, but if the
fp32/fp16 actual exponent is -7, it is actually larger due to the implicit 1,
Therefore it needs to be adjust to -6 and mantissa shift right by 1.
So for fp32/fp16, exponent -8 is the cut point to convert to fp8 nanoo */
exponent_diff
=
f8_denormal_act_exponent
-
act_exponent
;
}
else
{
// both fp32/fp16 and f8 are in normal range
exponent_diff
=
0
;
// exponent_diff=0 does not mean there is no
// difference for this case, act_exponent could be
// larger. Just that it does not need shift mantissa
}
mantissa
+=
(
1
<<
mfmt
);
// Add the implicit 1 into mantissa
}
bool
midpoint
=
(
mantissa
&
((
1
<<
(
mfmt
-
wm
+
exponent_diff
))
-
1
))
==
static_cast
<
uint32_t
>
(
1
<<
(
mfmt
-
wm
+
exponent_diff
-
1
));
/* This part is a bit tricky. The judgment of whether it is a tie needs to be
done before we shift right as shift right could rip off some residual part
and make something not midpoint look like midpoint. For example, the fp16
number 0x1002 (0 00100 0000000010), it is larger than midpoint, but after
shift right by 4 bits, it would look like midpoint.
*/
if
(
exponent_diff
>
0
)
{
mantissa
>>=
exponent_diff
;
}
else
if
(
exponent_diff
==
-
1
)
{
mantissa
<<=
-
exponent_diff
;
}
bool
implicit_one
=
mantissa
&
(
1
<<
mfmt
);
// if there is no implicit 1, it means the f8 is denormal and need to adjust
// to denorm exponent
f8_exponent
=
(
act_exponent
+
exponent_diff
)
/*actual f8 exponent*/
+
f8_bias
-
(
implicit_one
?
0
:
1
);
// Now we have the exponent and mantissa adjusted
uint32_t
drop_mask
=
(
1
<<
(
mfmt
-
wm
))
-
1
;
bool
odd
=
mantissa
&
(
1
<<
(
mfmt
-
wm
));
// if the least significant bit
// that is not truncated is 1
mantissa
+=
(
stoch
?
rng
:
(
midpoint
?
(
odd
?
mantissa
:
mantissa
-
1
)
:
mantissa
))
&
drop_mask
;
// Now we deal with overflow
if
(
f8_exponent
==
0
)
{
if
((
1
<<
mfmt
)
&
mantissa
)
{
f8_exponent
=
1
;
// denormal overflow to become normal, promote exponent
}
}
else
{
if
((
1
<<
(
mfmt
+
1
))
&
mantissa
)
{
mantissa
>>=
1
;
f8_exponent
++
;
}
}
mantissa
>>=
(
mfmt
-
wm
);
// above range: quantize to maximum possible float of the same sign
const
int
max_exp
=
(
1
<<
we
)
-
(
negative_zero_nan
?
1
:
2
);
if
(
f8_exponent
>
max_exp
)
{
if
(
clip
)
{
mantissa
=
(
1
<<
wm
)
-
1
;
f8_exponent
=
max_exp
;
}
else
{
return
signed_inf
;
}
}
if
(
f8_exponent
==
0
&&
mantissa
==
0
)
{
return
negative_zero_nan
?
0
:
(
sign
<<
7
);
}
mantissa
&=
(
1
<<
wm
)
-
1
;
return
(
sign
<<
7
)
|
(
f8_exponent
<<
wm
)
|
mantissa
;
}
template
<
int
we
,
int
wm
,
typename
T
=
float
,
bool
negative_zero_nan
=
true
>
inline
HIP_FP8_HOST_DEVICE
T
from_float8
(
uint8_t
x
)
{
#ifdef __HIPCC__
constexpr
bool
is_half
=
std
::
is_same
<
T
,
_Float16
>::
value
;
#else
constexpr
bool
is_half
=
false
;
#endif
constexpr
bool
is_float
=
std
::
is_same
<
T
,
float
>::
value
;
static_assert
(
is_half
||
is_float
,
"only half and float are supported"
);
constexpr
int
weo
=
is_half
?
5
:
8
;
constexpr
int
wmo
=
is_half
?
10
:
(
is_float
?
23
:
7
);
T
fInf
,
fNegInf
,
fNaN
,
fNeg0
;
#ifdef __HIPCC__
if
(
is_half
)
{
const
uint16_t
ihInf
=
0x7C00
;
const
uint16_t
ihNegInf
=
0xFC00
;
const
uint16_t
ihNaN
=
0x7C01
;
const
uint16_t
ihNeg0
=
0x8000
;
fInf
=
reinterpret_cast
<
const
_Float16
&>
(
ihInf
);
fNegInf
=
reinterpret_cast
<
const
_Float16
&>
(
ihNegInf
);
fNaN
=
reinterpret_cast
<
const
_Float16
&>
(
ihNaN
);
fNeg0
=
reinterpret_cast
<
const
_Float16
&>
(
ihNeg0
);
}
else
#endif
if
(
is_float
)
{
const
uint32_t
ifInf
=
0x7F800000
;
const
uint32_t
ifNegInf
=
0xFF800000
;
const
uint32_t
ifNaN
=
0x7F800001
;
const
uint32_t
ifNeg0
=
0x80000000
;
fInf
=
reinterpret_cast
<
const
float
&>
(
ifInf
);
fNegInf
=
reinterpret_cast
<
const
float
&>
(
ifNegInf
);
fNaN
=
reinterpret_cast
<
const
float
&>
(
ifNaN
);
fNeg0
=
reinterpret_cast
<
const
float
&>
(
ifNeg0
);
}
if
(
x
==
0
)
{
return
0
;
}
uint32_t
sign
=
x
>>
7
;
uint32_t
mantissa
=
x
&
((
1
<<
wm
)
-
1
);
int
exponent
=
(
x
&
0x7F
)
>>
wm
;
if
(
negative_zero_nan
)
{
if
(
x
==
0x80
)
{
return
fNaN
;
}
}
else
{
if
(
x
==
0x80
)
{
return
fNeg0
;
}
if
(
exponent
==
((
1
<<
we
)
-
1
))
{
return
(
mantissa
==
0
)
?
(
sign
?
fNegInf
:
fInf
)
:
fNaN
;
}
}
typename
std
::
conditional
<
sizeof
(
T
)
==
2
,
uint16_t
,
uint32_t
>::
type
retval
;
if
(
we
==
5
&&
is_half
&&
!
negative_zero_nan
)
{
retval
=
x
<<
8
;
return
reinterpret_cast
<
const
T
&>
(
retval
);
}
const
int
exp_low_cutoff
=
(
1
<<
(
weo
-
1
))
-
(
1
<<
(
we
-
1
))
+
1
-
(
negative_zero_nan
?
1
:
0
);
// subnormal input
if
(
exponent
==
0
)
{
// guaranteed mantissa!=0 since cases 0x0 and 0x80 are handled above
int
sh
=
1
+
clz
(
mantissa
)
-
(
32
-
wm
);
mantissa
<<=
sh
;
exponent
+=
1
-
sh
;
mantissa
&=
((
1
<<
wm
)
-
1
);
}
exponent
+=
exp_low_cutoff
-
1
;
mantissa
<<=
wmo
-
wm
;
// subnormal output (occurs when T=half, we=5, negative_zero_nan=true)
if
(
exponent
<=
0
)
{
mantissa
|=
1
<<
wmo
;
mantissa
>>=
1
-
exponent
;
exponent
=
0
;
}
if
(
sizeof
(
T
)
==
2
)
{
retval
=
(
sign
<<
15
)
|
(
exponent
<<
10
)
|
mantissa
;
}
else
{
retval
=
(
sign
<<
31
)
|
(
exponent
<<
23
)
|
mantissa
;
}
return
reinterpret_cast
<
const
T
&>
(
retval
);
}
}
// namespace hip_fp8_impl
example/ck_tile/18_paged_attention/include/quantization/fp8/amd/quant_utils.cuh
0 → 100644
View file @
08e057ee
#pragma once
#include "hip_float8.h"
#include <hip/hip_fp16.h>
#include <hip/hip_bf16.h>
#include <hip/hip_bfloat16.h>
#include "../../../attention/attention_dtypes.h"
namespace
vllm
{
#ifdef USE_ROCM
namespace
fp8
{
#ifdef ENABLE_FP8
template
<
typename
Tout
,
typename
Tin
>
__inline__
__device__
Tout
vec_conversion
(
const
Tin
&
x
)
{
return
x
;
}
template
<
typename
Tout
,
typename
Tin
>
__inline__
__device__
Tout
scaled_vec_conversion
(
const
Tin
&
x
,
const
float
scale
)
{
return
x
;
}
// fp8 -> half
template
<
>
__inline__
__device__
uint16_t
vec_conversion
<
uint16_t
,
uint8_t
>
(
const
uint8_t
&
a
)
{
hip_fp8
f8
{
a
,
hip_fp8
::
from_bits
()};
__half_raw
res
;
res
.
data
=
static_cast
<
float
>
(
f8
);
return
res
.
x
;
}
// fp8x2 -> half2
template
<
>
__inline__
__device__
uint32_t
vec_conversion
<
uint32_t
,
uint16_t
>
(
const
uint16_t
&
a
)
{
#if defined(__HIP__MI300__)
const
auto
&
f2
=
__builtin_amdgcn_cvt_pk_f32_fp8
(
a
,
0
);
union
{
__half2_raw
h2r
;
uint32_t
ui32
;
}
tmp
;
tmp
.
h2r
.
x
.
data
=
f2
[
0
];
tmp
.
h2r
.
y
.
data
=
f2
[
1
];
return
tmp
.
ui32
;
#else
union
{
uint16_t
u16
[
2
];
uint32_t
u32
;
}
tmp
;
tmp
.
u16
[
0
]
=
vec_conversion
<
uint16_t
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
));
tmp
.
u16
[
1
]
=
vec_conversion
<
uint16_t
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
>>
8U
));
return
tmp
.
u32
;
#endif
}
// fp8x4 -> half2x2
template
<
>
__inline__
__device__
uint2
vec_conversion
<
uint2
,
uint32_t
>
(
const
uint32_t
&
a
)
{
union
{
uint2
u32x2
;
uint32_t
u32
[
2
];
}
tmp
;
tmp
.
u32
[
0
]
=
vec_conversion
<
uint32_t
,
uint16_t
>
((
uint16_t
)
a
);
tmp
.
u32
[
1
]
=
vec_conversion
<
uint32_t
,
uint16_t
>
((
uint16_t
)(
a
>>
16U
));
return
tmp
.
u32x2
;
}
// fp8x8 -> half2x4
template
<
>
__inline__
__device__
uint4
vec_conversion
<
uint4
,
uint2
>
(
const
uint2
&
a
)
{
union
{
uint4
u64x2
;
uint2
u64
[
2
];
}
tmp
;
tmp
.
u64
[
0
]
=
vec_conversion
<
uint2
,
uint32_t
>
(
a
.
x
);
tmp
.
u64
[
1
]
=
vec_conversion
<
uint2
,
uint32_t
>
(
a
.
y
);
return
tmp
.
u64x2
;
}
using
__nv_bfloat16
=
__hip_bfloat16
;
// fp8 -> __nv_bfloat16
template
<
>
__inline__
__device__
__nv_bfloat16
vec_conversion
<
__nv_bfloat16
,
uint8_t
>
(
const
uint8_t
&
a
)
{
hip_fp8
f8
{
a
,
hip_fp8
::
from_bits
()};
float
f
{
f8
};
return
__float2bfloat16
(
f
);
}
using
__nv_bfloat162
=
__hip_bfloat162
;
// fp8x2 -> __nv_bfloat162
template
<
>
__inline__
__device__
__nv_bfloat162
vec_conversion
<
__nv_bfloat162
,
uint16_t
>
(
const
uint16_t
&
a
)
{
__nv_bfloat162
res
;
res
.
x
=
vec_conversion
<
__nv_bfloat16
,
uint8_t
>
((
uint8_t
)
a
);
res
.
y
=
vec_conversion
<
__nv_bfloat16
,
uint8_t
>
((
uint8_t
)(
a
>>
8U
));
return
res
;
}
// fp8x4 -> bf16_4_t
template
<
>
__inline__
__device__
bf16_4_t
vec_conversion
<
bf16_4_t
,
uint32_t
>
(
const
uint32_t
&
a
)
{
bf16_4_t
res
;
res
.
x
=
vec_conversion
<
__nv_bfloat162
,
uint16_t
>
((
uint16_t
)
a
);
res
.
y
=
vec_conversion
<
__nv_bfloat162
,
uint16_t
>
((
uint16_t
)(
a
>>
16U
));
return
res
;
}
// fp8x8 -> bf16_8_t
template
<
>
__inline__
__device__
bf16_8_t
vec_conversion
<
bf16_8_t
,
uint2
>
(
const
uint2
&
a
)
{
bf16_4_t
tmp1
,
tmp2
;
tmp1
=
vec_conversion
<
bf16_4_t
,
uint32_t
>
(
a
.
x
);
tmp2
=
vec_conversion
<
bf16_4_t
,
uint32_t
>
(
a
.
y
);
bf16_8_t
res
;
res
.
x
=
tmp1
.
x
;
res
.
y
=
tmp1
.
y
;
res
.
z
=
tmp2
.
x
;
res
.
w
=
tmp2
.
y
;
return
res
;
}
// fp8 -> float
template
<
>
__inline__
__device__
float
vec_conversion
<
float
,
uint8_t
>
(
const
uint8_t
&
a
)
{
hip_fp8
fp8
{
a
,
hip_fp8
::
from_bits
()};
return
static_cast
<
float
>
(
fp8
);
}
// fp8x2 -> float2
template
<
>
__inline__
__device__
float2
vec_conversion
<
float2
,
uint16_t
>
(
const
uint16_t
&
a
)
{
#if defined(__HIP__MI300__)
float2
res
;
const
auto
&
f2
=
__builtin_amdgcn_cvt_pk_f32_fp8
(
a
,
0
);
res
.
x
=
f2
[
0
];
res
.
y
=
f2
[
1
];
return
res
;
#else
float2
res
;
res
.
x
=
vec_conversion
<
float
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
));
res
.
y
=
vec_conversion
<
float
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
>>
8U
));
return
res
;
#endif
}
// fp8x4 -> float4
template
<
>
__inline__
__device__
Float4_
vec_conversion
<
Float4_
,
uint32_t
>
(
const
uint32_t
&
a
)
{
Float4_
res
;
res
.
x
=
vec_conversion
<
float2
,
uint16_t
>
((
uint16_t
)
a
);
res
.
y
=
vec_conversion
<
float2
,
uint16_t
>
((
uint16_t
)(
a
>>
16U
));
return
res
;
}
// fp8x8 -> float8
template
<
>
__inline__
__device__
Float8_
vec_conversion
<
Float8_
,
uint2
>
(
const
uint2
&
a
)
{
Float4_
tmp1
,
tmp2
;
tmp1
=
vec_conversion
<
Float4_
,
uint32_t
>
(
a
.
x
);
tmp2
=
vec_conversion
<
Float4_
,
uint32_t
>
(
a
.
y
);
Float8_
res
;
res
.
x
=
tmp1
.
x
;
res
.
y
=
tmp1
.
y
;
res
.
z
=
tmp2
.
x
;
res
.
w
=
tmp2
.
y
;
return
res
;
}
// half -> fp8
template
<
>
__inline__
__device__
uint8_t
vec_conversion
<
uint8_t
,
uint16_t
>
(
const
uint16_t
&
a
)
{
__half_raw
tmp
;
tmp
.
x
=
a
;
hip_fp8
f8
{
static_cast
<
float
>
(
tmp
.
data
)};
return
f8
.
data
;
}
// bf16 -> fp8
template
<
>
__inline__
__device__
uint8_t
vec_conversion
<
uint8_t
,
__nv_bfloat16
>
(
const
__nv_bfloat16
&
a
)
{
hip_fp8
res
{
__bfloat162float
(
a
)};
return
res
.
data
;
}
// float -> fp8
template
<
>
__inline__
__device__
uint8_t
vec_conversion
<
uint8_t
,
float
>
(
const
float
&
a
)
{
hip_fp8
f8
(
a
);
return
f8
.
data
;
}
// fp8x4 -> float4
template
<
>
__inline__
__device__
float4
vec_conversion
<
float4
,
uint32_t
>
(
const
uint32_t
&
a
)
{
Float4_
tmp
=
vec_conversion
<
Float4_
,
uint32_t
>
(
a
);
float4
res
=
make_float4
(
tmp
.
x
.
x
,
tmp
.
x
.
y
,
tmp
.
y
.
x
,
tmp
.
y
.
y
);
return
res
;
}
// float2 -> half2
template
<
>
__inline__
__device__
uint32_t
vec_conversion
<
uint32_t
,
float2
>
(
const
float2
&
a
)
{
union
{
half2
float16
;
uint32_t
uint32
;
};
float16
=
__float22half2_rn
(
a
);
return
uint32
;
}
// Float4 -> half2x2
template
<
>
__inline__
__device__
uint2
vec_conversion
<
uint2
,
Float4_
>
(
const
Float4_
&
a
)
{
uint2
b
;
float2
val
;
val
.
x
=
a
.
x
.
x
;
val
.
y
=
a
.
x
.
y
;
b
.
x
=
vec_conversion
<
uint32_t
,
float2
>
(
val
);
val
.
x
=
a
.
y
.
x
;
val
.
y
=
a
.
y
.
y
;
b
.
y
=
vec_conversion
<
uint32_t
,
float2
>
(
val
);
return
b
;
}
// Float4 -> float4
template
<
>
__inline__
__device__
float4
vec_conversion
<
float4
,
Float4_
>
(
const
Float4_
&
a
)
{
float4
b
;
b
.
x
=
a
.
x
.
x
;
b
.
y
=
a
.
x
.
y
;
b
.
z
=
a
.
y
.
x
;
b
.
w
=
a
.
y
.
y
;
return
b
;
}
// Float8 -> half2x4
template
<
>
__inline__
__device__
uint4
vec_conversion
<
uint4
,
Float8_
>
(
const
Float8_
&
a
)
{
uint4
b
;
b
.
x
=
vec_conversion
<
uint32_t
,
float2
>
(
a
.
x
);
b
.
y
=
vec_conversion
<
uint32_t
,
float2
>
(
a
.
y
);
b
.
z
=
vec_conversion
<
uint32_t
,
float2
>
(
a
.
z
);
b
.
w
=
vec_conversion
<
uint32_t
,
float2
>
(
a
.
w
);
return
b
;
}
// float2 -> bfloat162
template
<
>
__inline__
__device__
__nv_bfloat162
vec_conversion
<
__nv_bfloat162
,
float2
>
(
const
float2
&
a
)
{
__nv_bfloat162
b
=
__float22bfloat162_rn
(
a
);
return
b
;
}
// Float4 -> bfloat162x2
template
<
>
__inline__
__device__
bf16_4_t
vec_conversion
<
bf16_4_t
,
Float4_
>
(
const
Float4_
&
a
)
{
bf16_4_t
b
;
b
.
x
=
__float22bfloat162_rn
(
a
.
x
);
b
.
y
=
__float22bfloat162_rn
(
a
.
y
);
return
b
;
}
// Float8 -> bfloat162x4
template
<
>
__inline__
__device__
bf16_8_t
vec_conversion
<
bf16_8_t
,
Float8_
>
(
const
Float8_
&
a
)
{
bf16_8_t
b
;
b
.
x
=
__float22bfloat162_rn
(
a
.
x
);
b
.
y
=
__float22bfloat162_rn
(
a
.
y
);
b
.
z
=
__float22bfloat162_rn
(
a
.
z
);
b
.
w
=
__float22bfloat162_rn
(
a
.
w
);
return
b
;
}
/* Scaled and vectorized conversions, for data exchange between high and low
precision domains
Convention of the scale in API, e.g: FP8_data = Quantization(
High_Precision_data / scale ) s.t. Quantize(HP / scale) => FP8 Dequant(FP8) *
scale => HP
*/
// fp8 -> half
template
<
>
__inline__
__device__
uint16_t
scaled_vec_conversion
<
uint16_t
,
uint8_t
>
(
const
uint8_t
&
a
,
float
scale
)
{
hip_fp8
f8
{
a
,
hip_fp8
::
from_bits
()};
__half_raw
res
;
res
.
data
=
static_cast
<
float
>
(
f8
)
*
scale
;
return
res
.
x
;
}
// fp8x2 -> half2
template
<
>
__inline__
__device__
uint32_t
scaled_vec_conversion
<
uint32_t
,
uint16_t
>
(
const
uint16_t
&
a
,
float
scale
)
{
#if defined(__HIP__MI300__)
const
auto
&
f2
=
__builtin_amdgcn_cvt_pk_f32_fp8
(
a
,
0
);
union
{
__half2_raw
h2r
;
uint32_t
ui32
;
}
tmp
;
tmp
.
h2r
.
x
.
data
=
f2
[
0
]
*
scale
;
tmp
.
h2r
.
y
.
data
=
f2
[
1
]
*
scale
;
return
tmp
.
ui32
;
#else
union
{
uint16_t
u16
[
2
];
uint32_t
u32
;
}
tmp
;
tmp
.
u16
[
0
]
=
scaled_vec_conversion
<
uint16_t
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
),
scale
);
tmp
.
u16
[
1
]
=
scaled_vec_conversion
<
uint16_t
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
>>
8U
),
scale
);
return
tmp
.
u32
;
#endif
}
// fp8x4 -> half2x2
template
<
>
__inline__
__device__
uint2
scaled_vec_conversion
<
uint2
,
uint32_t
>
(
const
uint32_t
&
a
,
float
scale
)
{
union
{
uint2
u32x2
;
uint32_t
u32
[
2
];
}
tmp
;
tmp
.
u32
[
0
]
=
scaled_vec_conversion
<
uint32_t
,
uint16_t
>
((
uint16_t
)
a
,
scale
);
tmp
.
u32
[
1
]
=
scaled_vec_conversion
<
uint32_t
,
uint16_t
>
((
uint16_t
)(
a
>>
16U
),
scale
);
return
tmp
.
u32x2
;
}
// fp8x8 -> half2x4
template
<
>
__inline__
__device__
uint4
scaled_vec_conversion
<
uint4
,
uint2
>
(
const
uint2
&
a
,
float
scale
)
{
union
{
uint4
u64x2
;
uint2
u64
[
2
];
}
tmp
;
tmp
.
u64
[
0
]
=
scaled_vec_conversion
<
uint2
,
uint32_t
>
(
a
.
x
,
scale
);
tmp
.
u64
[
1
]
=
scaled_vec_conversion
<
uint2
,
uint32_t
>
(
a
.
y
,
scale
);
return
tmp
.
u64x2
;
}
using
__nv_bfloat16
=
__hip_bfloat16
;
// fp8 -> __nv_bfloat16
template
<
>
__inline__
__device__
__nv_bfloat16
scaled_vec_conversion
<
__nv_bfloat16
,
uint8_t
>
(
const
uint8_t
&
a
,
float
scale
)
{
hip_fp8
f8
{
a
,
hip_fp8
::
from_bits
()};
float
f
{
f8
};
return
__float2bfloat16
(
f
*
scale
);
}
// fp8x2 -> __nv_bfloat162
template
<
>
__inline__
__device__
__nv_bfloat162
scaled_vec_conversion
<
__nv_bfloat162
,
uint16_t
>
(
const
uint16_t
&
a
,
float
scale
)
{
__nv_bfloat162
res
;
res
.
x
=
scaled_vec_conversion
<
__nv_bfloat16
,
uint8_t
>
((
uint8_t
)
a
,
scale
);
res
.
y
=
scaled_vec_conversion
<
__nv_bfloat16
,
uint8_t
>
((
uint8_t
)(
a
>>
8U
),
scale
);
return
res
;
}
// fp8x4 -> bf16_4_t
template
<
>
__inline__
__device__
bf16_4_t
scaled_vec_conversion
<
bf16_4_t
,
uint32_t
>
(
const
uint32_t
&
a
,
float
scale
)
{
bf16_4_t
res
;
res
.
x
=
scaled_vec_conversion
<
__nv_bfloat162
,
uint16_t
>
((
uint16_t
)
a
,
scale
);
res
.
y
=
scaled_vec_conversion
<
__nv_bfloat162
,
uint16_t
>
((
uint16_t
)(
a
>>
16U
),
scale
);
return
res
;
}
// fp8x8 -> bf16_8_t
template
<
>
__inline__
__device__
bf16_8_t
scaled_vec_conversion
<
bf16_8_t
,
uint2
>
(
const
uint2
&
a
,
float
scale
)
{
bf16_4_t
tmp1
,
tmp2
;
tmp1
=
scaled_vec_conversion
<
bf16_4_t
,
uint32_t
>
(
a
.
x
,
scale
);
tmp2
=
scaled_vec_conversion
<
bf16_4_t
,
uint32_t
>
(
a
.
y
,
scale
);
bf16_8_t
res
;
res
.
x
=
tmp1
.
x
;
res
.
y
=
tmp1
.
y
;
res
.
z
=
tmp2
.
x
;
res
.
w
=
tmp2
.
y
;
return
res
;
}
// fp8 -> float
template
<
>
__inline__
__device__
float
scaled_vec_conversion
<
float
,
uint8_t
>
(
const
uint8_t
&
a
,
float
scale
)
{
hip_fp8
fp8
{
a
,
hip_fp8
::
from_bits
()};
return
static_cast
<
float
>
(
fp8
)
*
scale
;
}
// fp8x2 -> float2
template
<
>
__inline__
__device__
float2
scaled_vec_conversion
<
float2
,
uint16_t
>
(
const
uint16_t
&
a
,
float
scale
)
{
#if defined(__HIP__MI300__)
float2
res
;
const
auto
&
f2
=
__builtin_amdgcn_cvt_pk_f32_fp8
(
a
,
0
);
res
.
x
=
f2
[
0
]
*
scale
;
res
.
y
=
f2
[
1
]
*
scale
;
return
res
;
#else
float2
res
;
res
.
x
=
scaled_vec_conversion
<
float
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
),
scale
);
res
.
y
=
scaled_vec_conversion
<
float
,
uint8_t
>
(
static_cast
<
uint8_t
>
(
a
>>
8U
),
scale
);
return
res
;
#endif
}
// fp8x4 -> float4
template
<
>
__inline__
__device__
Float4_
scaled_vec_conversion
<
Float4_
,
uint32_t
>
(
const
uint32_t
&
a
,
const
float
scale
)
{
Float4_
res
;
res
.
x
=
scaled_vec_conversion
<
float2
,
uint16_t
>
((
uint16_t
)
a
,
scale
);
res
.
y
=
scaled_vec_conversion
<
float2
,
uint16_t
>
((
uint16_t
)(
a
>>
16U
),
scale
);
return
res
;
}
// fp8x4 -> float4
template
<
>
__inline__
__device__
float4
scaled_vec_conversion
<
float4
,
uint32_t
>
(
const
uint32_t
&
a
,
float
scale
)
{
Float4_
res
=
scaled_vec_conversion
<
Float4_
,
uint32_t
>
(
a
,
scale
);
return
{
res
.
x
.
x
,
res
.
x
.
y
,
res
.
y
.
x
,
res
.
y
.
y
};
}
// fp8x8 -> float8
template
<
>
__inline__
__device__
Float8_
scaled_vec_conversion
<
Float8_
,
uint2
>
(
const
uint2
&
a
,
float
scale
)
{
Float4_
tmp1
,
tmp2
;
tmp1
=
scaled_vec_conversion
<
Float4_
,
uint32_t
>
(
a
.
x
,
scale
);
tmp2
=
scaled_vec_conversion
<
Float4_
,
uint32_t
>
(
a
.
y
,
scale
);
Float8_
res
;
res
.
x
=
tmp1
.
x
;
res
.
y
=
tmp1
.
y
;
res
.
z
=
tmp2
.
x
;
res
.
w
=
tmp2
.
y
;
return
res
;
}
// half -> fp8
template
<
>
__inline__
__device__
uint8_t
scaled_vec_conversion
<
uint8_t
,
uint16_t
>
(
const
uint16_t
&
a
,
float
scale
)
{
__half_raw
tmp
;
tmp
.
x
=
a
;
hip_fp8
f8
{
static_cast
<
float
>
(
tmp
.
data
/
scale
)};
return
f8
.
data
;
}
// halfx2 -> fp8x2
template
<
>
__inline__
__device__
uint16_t
scaled_vec_conversion
<
uint16_t
,
uint32_t
>
(
const
uint32_t
&
a
,
float
scale
)
{
#ifdef __HIP__MI300__
union
{
uint32_t
ui32
;
__half2_raw
h2r
;
}
tmp
;
tmp
.
ui32
=
a
;
union
{
uint32_t
ui32
;
float
f
;
}
f1
,
f2
;
f1
.
f
=
tmp
.
h2r
.
x
.
data
/
scale
;
f2
.
f
=
tmp
.
h2r
.
y
.
data
/
scale
;
if
((
f1
.
ui32
&
0x7F800000
)
!=
0x7F800000
)
{
f1
.
f
=
__builtin_amdgcn_fmed3f
(
f1
.
f
,
240.0
,
-
240.0
);
}
if
((
f2
.
ui32
&
0x7F800000
)
!=
0x7F800000
)
{
f2
.
f
=
__builtin_amdgcn_fmed3f
(
f2
.
f
,
240.0
,
-
240.0
);
}
return
__builtin_amdgcn_cvt_pk_fp8_f32
(
f1
.
f
,
f2
.
f
,
0
,
0
);
#else
union
{
uint32_t
ui32
;
__half2_raw
h2r
;
}
tmp
;
tmp
.
ui32
=
a
;
union
{
uint8_t
ui8
[
2
];
uint16_t
ui16
;
}
res
;
res
.
ui8
[
0
]
=
scaled_vec_conversion
<
uint8_t
,
uint16_t
>
(
tmp
.
h2r
.
x
.
x
,
scale
);
res
.
ui8
[
1
]
=
scaled_vec_conversion
<
uint8_t
,
uint16_t
>
(
tmp
.
h2r
.
y
.
x
,
scale
);
return
res
.
ui16
;
#endif
}
// half2x2 -> fp8x4
template
<
>
__inline__
__device__
uint32_t
scaled_vec_conversion
<
uint32_t
,
uint2
>
(
const
uint2
&
a
,
float
scale
)
{
union
{
uint16_t
ui16
[
2
];
uint32_t
ui32
;
}
tmp
;
tmp
.
ui16
[
0
]
=
scaled_vec_conversion
<
uint16_t
,
uint32_t
>
(
a
.
x
,
scale
);
tmp
.
ui16
[
1
]
=
scaled_vec_conversion
<
uint16_t
,
uint32_t
>
(
a
.
y
,
scale
);
return
tmp
.
ui32
;
}
// half2x4 -> fp8x8
template
<
>
__inline__
__device__
uint2
scaled_vec_conversion
<
uint2
,
uint4
>
(
const
uint4
&
a
,
float
scale
)
{
union
{
uint2
ui2
[
2
];
uint4
ui4
;
}
tmp
;
tmp
.
ui4
=
a
;
uint2
res
;
res
.
x
=
scaled_vec_conversion
<
uint32_t
,
uint2
>
(
tmp
.
ui2
[
0
],
scale
);
res
.
y
=
scaled_vec_conversion
<
uint32_t
,
uint2
>
(
tmp
.
ui2
[
1
],
scale
);
return
res
;
}
// bf16 -> fp8
template
<
>
__inline__
__device__
uint8_t
scaled_vec_conversion
<
uint8_t
,
__nv_bfloat16
>
(
const
__nv_bfloat16
&
a
,
float
scale
)
{
hip_fp8
res
{
__bfloat162float
(
a
)
/
scale
};
return
res
.
data
;
}
// bf16x2 -> fp8x2
template
<
>
__inline__
__device__
uint16_t
scaled_vec_conversion
<
uint16_t
,
__nv_bfloat162
>
(
const
__nv_bfloat162
&
a
,
float
scale
)
{
union
{
uint8_t
ui8
[
2
];
uint16_t
ui16
;
}
tmp
;
tmp
.
ui8
[
0
]
=
scaled_vec_conversion
<
uint8_t
,
__nv_bfloat16
>
(
a
.
x
,
scale
);
tmp
.
ui8
[
1
]
=
scaled_vec_conversion
<
uint8_t
,
__nv_bfloat16
>
(
a
.
y
,
scale
);
return
tmp
.
ui16
;
}
// bf16x4 -> fp8x4
template
<
>
__inline__
__device__
uint32_t
scaled_vec_conversion
<
uint32_t
,
bf16_4_t
>
(
const
bf16_4_t
&
a
,
float
scale
)
{
union
{
uint16_t
ui16
[
2
];
uint32_t
ui32
;
}
tmp
;
tmp
.
ui16
[
0
]
=
scaled_vec_conversion
<
uint16_t
,
__nv_bfloat162
>
(
a
.
x
,
scale
);
tmp
.
ui16
[
1
]
=
scaled_vec_conversion
<
uint16_t
,
__nv_bfloat162
>
(
a
.
y
,
scale
);
return
tmp
.
ui32
;
}
// bf16x8 -> fp8x8
template
<
>
__inline__
__device__
uint2
scaled_vec_conversion
<
uint2
,
bf16_8_t
>
(
const
bf16_8_t
&
a
,
float
scale
)
{
uint2
res
;
res
.
x
=
scaled_vec_conversion
<
uint32_t
,
bf16_4_t
>
({
a
.
x
,
a
.
y
},
scale
);
res
.
y
=
scaled_vec_conversion
<
uint32_t
,
bf16_4_t
>
({
a
.
z
,
a
.
w
},
scale
);
return
res
;
}
// float -> fp8
template
<
>
__inline__
__device__
uint8_t
scaled_vec_conversion
<
uint8_t
,
float
>
(
const
float
&
a
,
float
scale
)
{
hip_fp8
f8
(
a
);
return
f8
.
data
;
}
// floatx2 -> fp8x2
template
<
>
__inline__
__device__
uint16_t
scaled_vec_conversion
<
uint16_t
,
float2
>
(
const
float2
&
a
,
float
scale
)
{
#ifdef __HIP__MI300__
union
{
uint32_t
ui32
;
float
f
;
}
f1
,
f2
;
f1
.
f
=
a
.
x
/
scale
;
f2
.
f
=
a
.
y
/
scale
;
if
((
f1
.
ui32
&
0x7F800000
)
!=
0x7F800000
)
{
f1
.
f
=
__builtin_amdgcn_fmed3f
(
f1
.
f
,
240.0
,
-
240.0
);
}
if
((
f2
.
ui32
&
0x7F800000
)
!=
0x7F800000
)
{
f2
.
f
=
__builtin_amdgcn_fmed3f
(
f2
.
f
,
240.0
,
-
240.0
);
}
return
__builtin_amdgcn_cvt_pk_fp8_f32
(
f1
.
f
,
f2
.
f
,
0
,
0
);
#else
union
{
uint8_t
ui8
[
2
];
uint16_t
ui16
;
}
tmp
;
tmp
.
ui8
[
0
]
=
scaled_vec_conversion
<
uint8_t
,
float
>
(
a
.
x
,
scale
);
tmp
.
ui8
[
1
]
=
scaled_vec_conversion
<
uint8_t
,
float
>
(
a
.
y
,
scale
);
return
tmp
.
ui16
;
#endif
}
// floatx4 -> fp8x4
template
<
>
__inline__
__device__
uint32_t
scaled_vec_conversion
<
uint32_t
,
float4
>
(
const
float4
&
a
,
float
scale
)
{
union
{
uint16_t
ui16
[
2
];
uint32_t
ui32
;
}
tmp
;
tmp
.
ui16
[
0
]
=
scaled_vec_conversion
<
uint16_t
,
float2
>
({
a
.
x
,
a
.
y
},
scale
);
tmp
.
ui16
[
1
]
=
scaled_vec_conversion
<
uint16_t
,
float2
>
({
a
.
z
,
a
.
w
},
scale
);
return
tmp
.
ui32
;
}
#endif // ENABLE_FP8
template
<
typename
Tout
,
typename
Tin
,
Fp8KVCacheDataType
kv_dt
>
__inline__
__device__
Tout
convert
(
const
Tin
&
x
)
{
#ifdef ENABLE_FP8
if
constexpr
(
kv_dt
==
Fp8KVCacheDataType
::
kFp8E4M3
)
{
return
vec_conversion
<
Tout
,
Tin
>
(
x
);
}
#endif
assert
(
false
);
return
{};
// Squash missing return statement warning
}
template
<
typename
Tout
,
typename
Tin
,
Fp8KVCacheDataType
kv_dt
>
__inline__
__device__
Tout
scaled_convert
(
const
Tin
&
x
,
const
float
scale
)
{
#ifdef ENABLE_FP8
if
constexpr
(
kv_dt
==
Fp8KVCacheDataType
::
kFp8E4M3
)
{
return
scaled_vec_conversion
<
Tout
,
Tin
>
(
x
,
scale
);
}
#endif
assert
(
false
);
return
{};
// Squash missing return statement warning
}
// The following macro is used to dispatch the conversion function based on
// the data type of the key and value cache. The FN is a macro that calls a
// function with template<typename scalar_t, typename cache_t,
// Fp8KVCacheDataType kv_dt>.
#define DISPATCH_BY_KV_CACHE_DTYPE(SRC_DTYPE, KV_DTYPE, FN) \
if (KV_DTYPE == "auto") { \
if (SRC_DTYPE == at::ScalarType::Float) { \
FN(float, float, vllm::Fp8KVCacheDataType::kAuto); \
} else if (SRC_DTYPE == at::ScalarType::Half) { \
FN(uint16_t, uint16_t, vllm::Fp8KVCacheDataType::kAuto); \
} else if (SRC_DTYPE == at::ScalarType::BFloat16) { \
FN(__nv_bfloat16, __nv_bfloat16, vllm::Fp8KVCacheDataType::kAuto); \
} else { \
TORCH_CHECK(false, "Unsupported input type of kv cache: ", SRC_DTYPE); \
} \
} else { \
if (KV_DTYPE == "fp8" || KV_DTYPE == "fp8_e4m3") { \
if (SRC_DTYPE == at::ScalarType::Float) { \
FN(float, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3); \
} else if (SRC_DTYPE == at::ScalarType::Half) { \
FN(uint16_t, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3); \
} else if (SRC_DTYPE == at::ScalarType::BFloat16) { \
FN(__nv_bfloat16, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3); \
} else { \
TORCH_CHECK(false, \
"Unsupported input type of kv cache: ", SRC_DTYPE); \
} \
} else { \
TORCH_CHECK(false, "Unsupported data type of kv cache: ", KV_DTYPE); \
} \
}
}
// namespace fp8
#endif // USE_ROCM
}
// namespace vllm
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment