Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
flash-attention
Commits
7a983df7
Commit
7a983df7
authored
Aug 28, 2023
by
Tri Dao
Browse files
Use generate_kernels.py script from Driss Guessous
parent
c3f2a632
Changes
33
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
40 additions
and
186 deletions
+40
-186
csrc/flash_attn/src/flash_bwd_hdim128_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim128_bf16_sm80.cu
+2
-12
csrc/flash_attn/src/flash_bwd_hdim128_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim128_fp16_sm80.cu
+2
-18
csrc/flash_attn/src/flash_bwd_hdim160_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim160_bf16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim160_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim160_fp16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim192_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim192_bf16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim192_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim192_fp16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim224_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim224_bf16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim224_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim224_fp16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim256_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim256_bf16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim256_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim256_fp16_sm80.cu
+2
-2
csrc/flash_attn/src/flash_bwd_hdim32_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim32_bf16_sm80.cu
+2
-8
csrc/flash_attn/src/flash_bwd_hdim32_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim32_fp16_sm80.cu
+2
-8
csrc/flash_attn/src/flash_bwd_hdim64_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim64_bf16_sm80.cu
+2
-8
csrc/flash_attn/src/flash_bwd_hdim64_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim64_fp16_sm80.cu
+2
-27
csrc/flash_attn/src/flash_bwd_hdim96_bf16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim96_bf16_sm80.cu
+2
-12
csrc/flash_attn/src/flash_bwd_hdim96_fp16_sm80.cu
csrc/flash_attn/src/flash_bwd_hdim96_fp16_sm80.cu
+2
-14
csrc/flash_attn/src/flash_fwd_hdim128_bf16_sm80.cu
csrc/flash_attn/src/flash_fwd_hdim128_bf16_sm80.cu
+2
-11
csrc/flash_attn/src/flash_fwd_hdim128_fp16_sm80.cu
csrc/flash_attn/src/flash_fwd_hdim128_fp16_sm80.cu
+2
-24
csrc/flash_attn/src/flash_fwd_hdim160_bf16_sm80.cu
csrc/flash_attn/src/flash_fwd_hdim160_bf16_sm80.cu
+2
-9
csrc/flash_attn/src/flash_fwd_hdim160_fp16_sm80.cu
csrc/flash_attn/src/flash_fwd_hdim160_fp16_sm80.cu
+2
-19
No files found.
csrc/flash_attn/src/flash_bwd_hdim128_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::bfloat16_t, 128>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::bfloat16_t;
// if (params.h == params.h_k) {
// run_flash_bwd<Flash_bwd_kernel_traits<128, 64, 128, 8, 2, 4, 2, false, false, elem_type>>(params, stream, configure);
// } else {
// run_flash_bwd_seqq_parallel<Flash_bwd_kernel_traits<128, 128, 64, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// }
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
128
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim128
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim128_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::half_t, 128>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::half_t;
// if (params.h == params.h_k) {
// // run_flash_bwd<Flash_bwd_kernel_traits<128, 32, 128, 8, 2, 2, 2, false, false, elem_type>>(params, stream, configure);
// // This is faster, in the case of sequence-parallel bwd (where we need fewer registers).
// // Out of these three, the 2nd one is slightly faster (2% faster than the first). Idk why.
// // run_flash_bwd<Flash_bwd_kernel_traits<128, 64, 128, 8, 2, 2, 2, false, false, elem_type>>(params, stream, configure);
// run_flash_bwd<Flash_bwd_kernel_traits<128, 64, 128, 8, 2, 4, 2, false, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<128, 64, 128, 8, 2, 4, 4, false, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<128, 128, 64, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// } else {
// run_flash_bwd_seqq_parallel<Flash_bwd_kernel_traits<128, 128, 64, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// }
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
128
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim128
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim160_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
160
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim160
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim160_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
160
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim160
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim192_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
192
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim192
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim192_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
192
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim192
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim224_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
224
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim224
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim224_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
224
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim224
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim256_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
256
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim256
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim256_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
256
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim256
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim32_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::bfloat16_t, 32>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::bfloat16_t;
// run_flash_bwd<Flash_bwd_kernel_traits<32, 128, 128, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
32
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim32
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim32_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::half_t, 32>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::half_t;
// run_flash_bwd<Flash_bwd_kernel_traits<32, 128, 128, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
32
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim32
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim64_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::bfloat16_t, 64>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::bfloat16_t;
// run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 128, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
64
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim64
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim64_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::half_t, 64>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::half_t;
// // Changing AtomLayoutMdQ from 2 to 4 takes the same time
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 64, 128, 8, 2, 4, 2, false, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 64, 128, 8, 2, 4, 2, true, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 128, 8, 2, 4, 4, false, false, elem_type>>(params, stream, configure);
// // This is slightly faster. We want to split M more so we need fewer registers to store LSE.
// run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 128, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 64, 8, 4, 2, 4, true, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 64, 64, 4, 2, 2, 2, true, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 32, 128, 4, 1, 4, 1, false, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 16, 128, 4, 1, 4, 1, false, false, elem_type>>(params, stream, configure);
// // M=128, N=64 is quite slow, I think because we need to read/write dQaccum twice as many times
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 64, 8, 2, 2, 2, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 64, 8, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 16, 256, 8, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 128, 8, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 64, 64, 4, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 256, 64, 8, 8, 4, 8, false, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 256, 64, 8, 8, 4, 4, false, false, elem_type>>(params, stream, configure);
// // run_flash_bwd<Flash_bwd_kernel_traits<64, 128, 64, 4, 4, 2, 4, false, false, elem_type>>(params, stream, configure);
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
64
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim64
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim96_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::bfloat16_t, 96>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::bfloat16_t;
// if (params.h == params.h_k) {
// run_flash_bwd<Flash_bwd_kernel_traits<96, 64, 128, 8, 2, 4, 4, false, false, elem_type>>(params, stream, configure);
// } else {
// run_flash_bwd_seqq_parallel<Flash_bwd_kernel_traits<96, 128, 64, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// }
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
bfloat16_t
,
96
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim96
<
cutlass
::
bfloat16_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_bwd_hdim96_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_bwd_launch_template.h"
// template<>
// void run_mha_bwd_<cutlass::half_t, 96>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
// using elem_type = cutlass::half_t;
// if (params.h == params.h_k) {
// // run_flash_bwd<Flash_bwd_kernel_traits<96, 64, 128, 8, 2, 4, 4, true, false, elem_type>>(params, stream, configure);
// // This is very slightly faster
// run_flash_bwd<Flash_bwd_kernel_traits<96, 64, 128, 8, 2, 4, 4, false, false, elem_type>>(params, stream, configure);
// } else {
// run_flash_bwd_seqq_parallel<Flash_bwd_kernel_traits<96, 128, 64, 8, 4, 4, 4, false, false, elem_type>>(params, stream, configure);
// }
// }
template
<
>
void
run_mha_bwd_
<
cutlass
::
half_t
,
96
>
(
Flash_bwd_params
&
params
,
cudaStream_t
stream
,
const
bool
configure
)
{
run_mha_bwd_hdim96
<
cutlass
::
half_t
>
(
params
,
stream
,
configure
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_fwd_hdim128_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
// template<>
// void run_mha_fwd_<cutlass::bfloat16_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) {
// using elem_type = cutlass::bfloat16_t;
// if (params.p_dropout == 1.f) {
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream);
// } else {
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream);
// }
// }
template
<
>
void
run_mha_fwd_
<
cutlass
::
bfloat16_t
,
128
>
(
Flash_fwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_fwd_hdim128
<
cutlass
::
bfloat16_t
>
(
params
,
stream
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_fwd_hdim128_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
// template<>
// void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) {
// using elem_type = cutlass::half_t;
// if (params.p_dropout == 1.f) {
// // Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, false, elem_type>, false>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, true, elem_type>, false>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, true, elem_type>, false>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, false>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 64, 4, false, false, elem_type>, false>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 128, 4, false, false, elem_type>, false>(params, stream);
// // 1st ones are good for H100, A100
// // 2nd one is good for A6000 bc we get slightly better occupancy
// } else {
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, false, elem_type>, true>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, true, elem_type>, true>(params, stream);
// // 1st one is good for H100, A100, A6000
// }
// }
template
<
>
void
run_mha_fwd_
<
cutlass
::
half_t
,
128
>
(
Flash_fwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_fwd_hdim128
<
cutlass
::
half_t
>
(
params
,
stream
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_fwd_hdim160_bf16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
// template<>
// void run_mha_fwd_<cutlass::bfloat16_t, 160>(Flash_fwd_params ¶ms, cudaStream_t stream) {
// using elem_type = cutlass::bfloat16_t;
// BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
// run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, elem_type>, Is_dropout>(params, stream);
// });
// }
template
<
>
void
run_mha_fwd_
<
cutlass
::
bfloat16_t
,
160
>
(
Flash_fwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_fwd_hdim160
<
cutlass
::
bfloat16_t
>
(
params
,
stream
);
}
\ No newline at end of file
}
csrc/flash_attn/src/flash_fwd_hdim160_fp16_sm80.cu
View file @
7a983df7
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
// template<>
// void run_mha_fwd_<cutlass::half_t, 160>(Flash_fwd_params ¶ms, cudaStream_t stream) {
// using elem_type = cutlass::half_t;
// BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
// run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, elem_type>, Is_dropout>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 32, 4, false, true, elem_type>, Is_dropout>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 64, 4, false, false, elem_type>, Is_dropout>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<160, 64, 64, 4, false, false, elem_type>, Is_dropout>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 64, 4, false, elem_type>>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<160, 64, 128, 4, false, elem_type>>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<160, 64, 64, 4, false, elem_type>>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 64, 8, false, elem_type>>(params, stream);
// // run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 128, 8, false, elem_type>>(params, stream);
// // For A6000, no-causal, 1st is fastest. causal, 4th is fastest.
// // For A100, H100, 1st is fastest.
// });
// }
template
<
>
void
run_mha_fwd_
<
cutlass
::
half_t
,
160
>
(
Flash_fwd_params
&
params
,
cudaStream_t
stream
)
{
run_mha_fwd_hdim160
<
cutlass
::
half_t
>
(
params
,
stream
);
}
\ No newline at end of file
}
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment