Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
flash-attention
Commits
b8ccd200
Commit
b8ccd200
authored
Nov 22, 2022
by
Tri Dao
Browse files
[Triton] Fix variable name from qkv to kv (h/t FrankZijlstra)
parent
05481617
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
3 deletions
+3
-3
flash_attn/flash_attn_triton.py
flash_attn/flash_attn_triton.py
+3
-3
No files found.
flash_attn/flash_attn_triton.py
View file @
b8ccd200
...
@@ -212,8 +212,8 @@ def _fwd_kernel(
...
@@ -212,8 +212,8 @@ def _fwd_kernel(
lse_ptrs
=
Lse
+
off_hb
*
seqlen_q_rounded
+
offs_m
lse_ptrs
=
Lse
+
off_hb
*
seqlen_q_rounded
+
offs_m
tl
.
store
(
lse_ptrs
,
lse_i
)
tl
.
store
(
lse_ptrs
,
lse_i
)
# initialize pointers to output
# initialize pointers to output
offs_
n
=
tl
.
arange
(
0
,
BLOCK_HEADDIM
)
offs_
d
=
tl
.
arange
(
0
,
BLOCK_HEADDIM
)
out_ptrs
=
Out
+
off_b
*
stride_ob
+
off_h
*
stride_oh
+
(
offs_m
[:,
None
]
*
stride_om
+
offs_
n
[
None
,
:])
out_ptrs
=
Out
+
off_b
*
stride_ob
+
off_h
*
stride_oh
+
(
offs_m
[:,
None
]
*
stride_om
+
offs_
d
[
None
,
:])
if
EVEN_M
:
if
EVEN_M
:
if
EVEN_HEADDIM
:
if
EVEN_HEADDIM
:
tl
.
store
(
out_ptrs
,
acc_o
)
tl
.
store
(
out_ptrs
,
acc_o
)
...
@@ -789,7 +789,7 @@ class FlashAttnKVPackedFunc(torch.autograd.Function):
...
@@ -789,7 +789,7 @@ class FlashAttnKVPackedFunc(torch.autograd.Function):
with
torch
.
inference_mode
():
with
torch
.
inference_mode
():
dq
=
torch
.
empty_like
(
q
)
dq
=
torch
.
empty_like
(
q
)
dkv
=
torch
.
empty_like
(
kv
)
dkv
=
torch
.
empty_like
(
kv
)
_flash_attn_backward
(
do
,
q
,
q
kv
[:,
:,
0
],
q
kv
[:,
:,
1
],
o
,
lse
,
_flash_attn_backward
(
do
,
q
,
kv
[:,
:,
0
],
kv
[:,
:,
1
],
o
,
lse
,
dq
,
dkv
[:,
:,
0
],
dkv
[:,
:,
1
],
dq
,
dkv
[:,
:,
0
],
dkv
[:,
:,
1
],
bias
=
bias
,
causal
=
ctx
.
causal
,
softmax_scale
=
ctx
.
softmax_scale
)
bias
=
bias
,
causal
=
ctx
.
causal
,
softmax_scale
=
ctx
.
softmax_scale
)
return
dq
,
dkv
,
None
,
None
,
None
return
dq
,
dkv
,
None
,
None
,
None
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment