Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
8ca2a856
"vscode:/vscode.git/clone" did not exist on "cd5a0d56fae7de29493a66a39390b197750c478e"
Commit
8ca2a856
authored
May 13, 2022
by
Wangbo Zhao(黑色枷锁)
Committed by
binmakeswell
May 17, 2022
Browse files
[NFC] polish colossalai/kernel/cuda_native/scaled_softmax.py code style (#955)
parent
f6970ef8
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
13 additions
and
23 deletions
+13
-23
colossalai/kernel/cuda_native/scaled_softmax.py
colossalai/kernel/cuda_native/scaled_softmax.py
+13
-23
No files found.
colossalai/kernel/cuda_native/scaled_softmax.py
View file @
8ca2a856
...
...
@@ -28,9 +28,7 @@ class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
raise
RuntimeError
(
'ScaledUpperTriangMaskedSoftmax requires cuda extensions'
)
scale_t
=
torch
.
tensor
([
scale
])
softmax_results
=
colossal_scaled_upper_triang_masked_softmax
.
forward
(
inputs
,
scale_t
[
0
]
)
softmax_results
=
colossal_scaled_upper_triang_masked_softmax
.
forward
(
inputs
,
scale_t
[
0
])
ctx
.
save_for_backward
(
softmax_results
,
scale_t
)
return
softmax_results
...
...
@@ -43,9 +41,7 @@ class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
raise
RuntimeError
(
'ScaledUpperTriangMaskedSoftmax requires cuda extensions'
)
softmax_results
,
scale_t
=
ctx
.
saved_tensors
input_grads
=
colossal_scaled_upper_triang_masked_softmax
.
backward
(
output_grads
,
softmax_results
,
scale_t
[
0
]
)
input_grads
=
colossal_scaled_upper_triang_masked_softmax
.
backward
(
output_grads
,
softmax_results
,
scale_t
[
0
])
return
input_grads
,
None
...
...
@@ -81,9 +77,7 @@ class ScaledMaskedSoftmax(torch.autograd.Function):
softmax_results
,
scale_t
=
ctx
.
saved_tensors
input_grads
=
colossal_scaled_masked_softmax
.
backward
(
output_grads
,
softmax_results
,
scale_t
[
0
]
)
input_grads
=
colossal_scaled_masked_softmax
.
backward
(
output_grads
,
softmax_results
,
scale_t
[
0
])
return
input_grads
,
None
,
None
...
...
@@ -114,9 +108,8 @@ class FusedScaleMaskSoftmax(nn.Module):
super
(
FusedScaleMaskSoftmax
,
self
).
__init__
()
self
.
input_in_fp16
=
input_in_fp16
self
.
input_in_bf16
=
input_in_bf16
assert
not
(
self
.
input_in_fp16
and
self
.
input_in_bf16
),
"both fp16 and bf16 flags cannot be active at the same time."
assert
not
(
self
.
input_in_fp16
and
self
.
input_in_bf16
),
"both fp16 and bf16 flags cannot be active at the same time."
self
.
input_in_float16
=
self
.
input_in_fp16
or
self
.
input_in_bf16
self
.
attn_mask_type
=
attn_mask_type
self
.
scaled_masked_softmax_fusion
=
scaled_masked_softmax_fusion
...
...
@@ -124,9 +117,7 @@ class FusedScaleMaskSoftmax(nn.Module):
self
.
softmax_in_fp32
=
softmax_in_fp32
self
.
scale
=
scale
assert
(
self
.
scale
is
None
or
softmax_in_fp32
),
"softmax should be in fp32 when scaled"
assert
(
self
.
scale
is
None
or
softmax_in_fp32
),
"softmax should be in fp32 when scaled"
def
forward
(
self
,
input
,
mask
):
# [b, np, sq, sk]
...
...
@@ -140,8 +131,7 @@ class FusedScaleMaskSoftmax(nn.Module):
def
is_kernel_available
(
self
,
mask
,
b
,
np
,
sq
,
sk
):
attn_batches
=
b
*
np
if
(
self
.
scaled_masked_softmax_fusion
# user want to fuse
if
(
self
.
scaled_masked_softmax_fusion
# user want to fuse
and
self
.
input_in_float16
# input must be fp16
and
mask
is
not
None
# mask tensor must not be None
and
16
<
sk
<=
2048
# sk must be 16 ~ 2048
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment