Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xuwx1
LightX2V
Commits
22484a22
Unverified
Commit
22484a22
authored
Dec 09, 2025
by
fuheaven
Committed by
GitHub
Dec 09, 2025
Browse files
Dcu: format code (#588)
parent
1f7bad54
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
9 additions
and
18 deletions
+9
-18
lightx2v_platform/base/dcu.py
lightx2v_platform/base/dcu.py
+6
-6
lightx2v_platform/ops/attn/dcu/__init__.py
lightx2v_platform/ops/attn/dcu/__init__.py
+0
-1
lightx2v_platform/ops/attn/dcu/flash_attn.py
lightx2v_platform/ops/attn/dcu/flash_attn.py
+3
-11
No files found.
lightx2v_platform/base/dcu.py
View file @
22484a22
...
...
@@ -8,17 +8,18 @@ from lightx2v_platform.registry_factory import PLATFORM_DEVICE_REGISTER
class
DcuDevice
:
"""
DCU (AMD GPU) Device implementation for LightX2V.
DCU uses ROCm which provides CUDA-compatible APIs through HIP.
Most PyTorch operations work transparently through the ROCm backend.
"""
name
=
"dcu"
@
staticmethod
def
is_available
()
->
bool
:
"""
Check if DCU is available.
DCU uses the standard CUDA API through ROCm's HIP compatibility layer.
Returns:
bool: True if DCU/CUDA is available
...
...
@@ -32,10 +33,10 @@ class DcuDevice:
def
get_device
()
->
str
:
"""
Get the device type string.
Returns "cuda" because DCU uses CUDA-compatible APIs through ROCm.
This allows seamless integration with existing PyTorch code.
Returns:
str: "cuda" for ROCm compatibility
"""
...
...
@@ -45,11 +46,10 @@ class DcuDevice:
def
init_parallel_env
():
"""
Initialize distributed parallel environment for DCU.
Uses RCCL (ROCm Collective Communications Library) which is
compatible with NCCL APIs for multi-GPU communication.
"""
# RCCL is compatible with NCCL backend
dist
.
init_process_group
(
backend
=
"nccl"
)
torch
.
cuda
.
set_device
(
dist
.
get_rank
())
lightx2v_platform/ops/attn/dcu/__init__.py
View file @
22484a22
from
.flash_attn
import
*
lightx2v_platform/ops/attn/dcu/flash_attn.py
View file @
22484a22
...
...
@@ -20,15 +20,14 @@ except ImportError:
class
FlashAttnDcu
(
AttnWeightTemplate
):
"""
DCU Flash Attention implementation.
Uses AMD ROCm version of Flash Attention 2.6.1 when available.
Falls back to PyTorch SDPA (Scaled Dot Product Attention) if Flash Attention is not installed.
Tested Environment:
- PyTorch: 2.7.1
- Python: 3.10
- Flash Attention: 2.6.1 (ROCm)
Reference: https://developer.sourcefind.cn/codes/modelzoo/wan2.1_pytorch/-/blob/master/wan/modules/attention.py
"""
...
...
@@ -56,7 +55,6 @@ class FlashAttnDcu(AttnWeightTemplate):
):
"""
Execute Flash Attention computation.
Args:
q: [B, Lq, Nq, C1] Query tensor
k: [B, Lk, Nk, C1] Key tensor
...
...
@@ -68,7 +66,6 @@ class FlashAttnDcu(AttnWeightTemplate):
causal: Whether to apply causal mask
window_size: Sliding window size tuple (left, right)
deterministic: Whether to use deterministic algorithm
Returns:
Output tensor: [B, Lq, Nq, C2]
"""
...
...
@@ -129,14 +126,12 @@ class FlashAttnDcu(AttnWeightTemplate):
def
_sdpa_fallback
(
self
,
q
,
k
,
v
,
causal
=
False
,
dropout_p
=
0.0
):
"""
Fallback to PyTorch Scaled Dot Product Attention.
Args:
q: [B, Lq, Nq, C] Query tensor
k: [B, Lk, Nk, C] Key tensor
v: [B, Lk, Nk, C] Value tensor
causal: Whether to apply causal mask
dropout_p: Dropout probability
Returns:
Output tensor: [B, Lq, Nq, C]
"""
...
...
@@ -145,10 +140,7 @@ class FlashAttnDcu(AttnWeightTemplate):
k
=
k
.
transpose
(
1
,
2
)
v
=
v
.
transpose
(
1
,
2
)
out
=
torch
.
nn
.
functional
.
scaled_dot_product_attention
(
q
,
k
,
v
,
attn_mask
=
None
,
is_causal
=
causal
,
dropout_p
=
dropout_p
)
out
=
torch
.
nn
.
functional
.
scaled_dot_product_attention
(
q
,
k
,
v
,
attn_mask
=
None
,
is_causal
=
causal
,
dropout_p
=
dropout_p
)
# Transpose back to [B, Lq, Nq, C]
return
out
.
transpose
(
1
,
2
).
contiguous
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment