Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
flash-attention
Commits
d562aa63
"official/projects/simclr/multitask_train.py" did not exist on "3e8f117843c38914560501bf0a623b1f97298a40"
Unverified
Commit
d562aa63
authored
Jul 31, 2024
by
Woosuk Kwon
Committed by
GitHub
Jul 31, 2024
Browse files
Sync with FA v2.6.0 to support soft capping (#13)
parent
12375706
Changes
81
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
80 additions
and
10 deletions
+80
-10
csrc/flash_attn/src/flash_fwd_split_hdim128_bf16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim128_bf16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim128_fp16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim160_bf16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim160_fp16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim192_bf16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim192_fp16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim224_bf16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim224_fp16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim256_bf16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_causal_sm80.cu
...lash_attn/src/flash_fwd_split_hdim256_fp16_causal_sm80.cu
+7
-0
csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_sm80.cu
csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_sm80.cu
+1
-1
csrc/flash_attn/src/flash_fwd_split_hdim32_bf16_causal_sm80.cu
...flash_attn/src/flash_fwd_split_hdim32_bf16_causal_sm80.cu
+7
-0
No files found.
csrc/flash_attn/src/flash_fwd_split_hdim128_bf16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
128
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
128
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
128
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
128
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
128
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
160
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
160
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
160
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
160
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
160
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
160
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
192
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
192
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
192
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
192
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
192
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
192
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
224
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
224
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
224
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
224
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
224
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
224
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
256
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
256
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
256
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
256
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_sm80.cu
View file @
d562aa63
...
...
@@ -4,4 +4,4 @@
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
256
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
half_t
,
256
,
false
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
csrc/flash_attn/src/flash_fwd_split_hdim32_bf16_causal_sm80.cu
0 → 100644
View file @
d562aa63
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template
void
run_mha_fwd_splitkv_dispatch
<
cutlass
::
bfloat16_t
,
32
,
true
>(
Flash_fwd_params
&
params
,
cudaStream_t
stream
);
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment