Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
7ab84948
"vscode:/vscode.git/clone" did not exist on "2d3dd3c830cbd95cb2e9b0ec9e5170290608a8a9"
Unverified
Commit
7ab84948
authored
Feb 04, 2025
by
Wen-Heng (Jack) Chung
Committed by
GitHub
Feb 04, 2025
Browse files
[ROCm] Logic to decide whether to used manually unrolled kernel. (#3306)
parent
4885b908
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
17 additions
and
3 deletions
+17
-3
python/sglang/srt/layers/quantization/fp8_kernel.py
python/sglang/srt/layers/quantization/fp8_kernel.py
+10
-3
python/sglang/srt/utils.py
python/sglang/srt/utils.py
+7
-0
No files found.
python/sglang/srt/layers/quantization/fp8_kernel.py
View file @
7ab84948
...
@@ -22,7 +22,7 @@ import torch
...
@@ -22,7 +22,7 @@ import torch
import
triton
import
triton
import
triton.language
as
tl
import
triton.language
as
tl
from
sglang.srt.utils
import
get_device_name
,
is_hip
from
sglang.srt.utils
import
get_device_core_count
,
get_device_name
,
is_hip
is_hip_
=
is_hip
()
is_hip_
=
is_hip
()
fp8_type_
=
torch
.
float8_e4m3fnuz
if
is_hip_
else
torch
.
float8_e4m3fn
fp8_type_
=
torch
.
float8_e4m3fnuz
if
is_hip_
else
torch
.
float8_e4m3fn
...
@@ -450,9 +450,16 @@ def w8a8_block_fp8_matmul(
...
@@ -450,9 +450,16 @@ def w8a8_block_fp8_matmul(
triton
.
cdiv
(
M
,
META
[
"BLOCK_SIZE_M"
])
*
triton
.
cdiv
(
N
,
META
[
"BLOCK_SIZE_N"
]),
triton
.
cdiv
(
M
,
META
[
"BLOCK_SIZE_M"
])
*
triton
.
cdiv
(
N
,
META
[
"BLOCK_SIZE_N"
]),
)
)
# Use manually unrolledx4 kernel on AMD GPU.
# Use manually unrolledx4 kernel on AMD GPU when the grid size is small.
# Empirical testing shows the sweet spot lies when it's less than the # of
# compute units available on the device.
num_workgroups
=
triton
.
cdiv
(
M
,
config
[
"BLOCK_SIZE_M"
])
*
triton
.
cdiv
(
N
,
config
[
"BLOCK_SIZE_N"
]
)
kernel
=
(
kernel
=
(
_w8a8_block_fp8_matmul_unrolledx4
if
is_hip_
==
True
else
_w8a8_block_fp8_matmul
_w8a8_block_fp8_matmul_unrolledx4
if
(
is_hip_
==
True
and
num_workgroups
<=
get_device_core_count
())
else
_w8a8_block_fp8_matmul
)
)
kernel
[
grid
](
kernel
[
grid
](
...
...
python/sglang/srt/utils.py
View file @
7ab84948
...
@@ -1046,6 +1046,13 @@ def get_device_name(device_id: int = 0) -> str:
...
@@ -1046,6 +1046,13 @@ def get_device_name(device_id: int = 0) -> str:
return
torch
.
hpu
.
get_device_name
(
device_id
)
return
torch
.
hpu
.
get_device_name
(
device_id
)
def
get_device_core_count
(
device_id
:
int
=
0
)
->
int
:
if
hasattr
(
torch
,
"cuda"
)
and
torch
.
cuda
.
is_available
():
return
torch
.
cuda
.
get_device_properties
(
device_id
).
multi_processor_count
return
0
def
get_device_capability
(
device_id
:
int
=
0
)
->
Tuple
[
int
,
int
]:
def
get_device_capability
(
device_id
:
int
=
0
)
->
Tuple
[
int
,
int
]:
major
,
minor
=
None
,
None
major
,
minor
=
None
,
None
if
hasattr
(
torch
,
"cuda"
)
and
torch
.
cuda
.
is_available
():
if
hasattr
(
torch
,
"cuda"
)
and
torch
.
cuda
.
is_available
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment