Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
flash-attention
Commits
26f4b5fb
"ppstructure/MANIFEST.in" did not exist on "3927a96063a2a5136adb09d70308ac89686967c9"
Commit
26f4b5fb
authored
Jul 31, 2024
by
Woosuk Kwon
Browse files
Merge branch 'main' into Dao-AILab/main
parents
5018ac6a
12375706
Pipeline
#2015
failed with stages
in 0 seconds
Changes
95
Pipelines
1
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
0 additions
and
1028 deletions
+0
-1028
csrc/flash_attn/src/flash_bwd_hdim192_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim192_fp16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim224_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim224_bf16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim224_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim224_fp16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim256_bf16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim256_bf16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim256_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim256_bf16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim256_fp16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim256_fp16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim256_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim256_fp16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim32_bf16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim32_bf16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim32_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim32_bf16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim32_fp16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim32_fp16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim32_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim32_fp16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim64_bf16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim64_bf16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim64_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim64_bf16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim64_fp16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim64_fp16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim64_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim64_fp16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim96_bf16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim96_bf16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim96_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim96_bf16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim96_fp16_causal_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim96_fp16_causal_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_hdim96_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim96_fp16_sm80.cu
+0
-10
csrc/flash_attn/src/flash_bwd_kernel.h
csrc/flash_attn/src/flash_bwd_kernel.h
+0
-838
No files found.
csrc/flash_attn/src/flash_bwd_hdim192_fp16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
192
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim192
<
cutlass
::
half_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim224_bf16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
224
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim224
<
cutlass
::
bfloat16_t
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim224_fp16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
224
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim224
<
cutlass
::
half_t
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim256_bf16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
256
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim256
<
cutlass
::
bfloat16_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim256_bf16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
256
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim256
<
cutlass
::
bfloat16_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim256_fp16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
256
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim256
<
cutlass
::
half_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim256_fp16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
256
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim256
<
cutlass
::
half_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim32_bf16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
32
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim32
<
cutlass
::
bfloat16_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim32_bf16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
32
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim32
<
cutlass
::
bfloat16_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim32_fp16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
32
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim32
<
cutlass
::
half_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim32_fp16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
32
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim32
<
cutlass
::
half_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim64_bf16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
64
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim64
<
cutlass
::
bfloat16_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim64_bf16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
64
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim64
<
cutlass
::
bfloat16_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim64_fp16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
64
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim64
<
cutlass
::
half_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim64_fp16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
64
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim64
<
cutlass
::
half_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim96_bf16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
96
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim96
<
cutlass
::
bfloat16_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim96_bf16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
96
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim96
<
cutlass
::
bfloat16_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim96_fp16_causal_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
96
,
true
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim96
<
cutlass
::
half_t
,
true
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_hdim96_fp16_sm80.cu
deleted
100644 → 0
View file @
5018ac6a
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
96
,
false
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_bwd_hdim96
<
cutlass
::
half_t
,
false
>
(
params
,
stream
);
}
csrc/flash_attn/src/flash_bwd_kernel.h
deleted
100644 → 0
View file @
5018ac6a
This diff is collapsed.
Click to expand it.
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment