Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
69d9180c
Commit
69d9180c
authored
Jan 07, 2023
by
jiaruifang
Browse files
[hotfix] issue #2388
parent
4e960396
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
16 additions
and
13 deletions
+16
-13
colossalai/kernel/cuda_native/layer_norm.py
colossalai/kernel/cuda_native/layer_norm.py
+8
-7
colossalai/kernel/cuda_native/scaled_softmax.py
colossalai/kernel/cuda_native/scaled_softmax.py
+8
-6
No files found.
colossalai/kernel/cuda_native/layer_norm.py
View file @
69d9180c
...
...
@@ -16,17 +16,17 @@ class FusedLayerNormAffineFunction(torch.autograd.Function):
@
custom_fwd
(
cast_inputs
=
torch
.
float32
)
def
forward
(
ctx
,
input
,
weight
,
bias
,
normalized_shape
,
eps
):
try
:
import
colossalai._C
.
layer_norm
from
colossalai._C
import
layer_norm
except
ImportError
:
raise
RuntimeError
(
'FusedLayerNormAffineFunction requires cuda extensions'
)
from
colossalai.kernel.op_builder.layernorm
import
LayerNormBuilder
layer_norm
=
LayerNormBuilder
().
load
()
ctx
.
normalized_shape
=
normalized_shape
ctx
.
eps
=
eps
input_
=
input
.
contiguous
()
weight_
=
weight
.
contiguous
()
bias_
=
bias
.
contiguous
()
output
,
mean
,
invvar
=
colossalai
.
_C
.
layer_norm
.
forward_affine
(
input_
,
ctx
.
normalized_shape
,
weight_
,
bias_
,
ctx
.
eps
)
output
,
mean
,
invvar
=
layer_norm
.
forward_affine
(
input_
,
ctx
.
normalized_shape
,
weight_
,
bias_
,
ctx
.
eps
)
ctx
.
save_for_backward
(
input_
,
weight_
,
bias_
,
mean
,
invvar
)
return
output
...
...
@@ -35,14 +35,15 @@ class FusedLayerNormAffineFunction(torch.autograd.Function):
@
custom_bwd
def
backward
(
ctx
,
grad_output
):
try
:
import
colossalai._C
.
layer_norm
from
colossalai._C
import
layer_norm
except
ImportError
:
raise
RuntimeError
(
'FusedLayerNormAffineFunction requires cuda extensions'
)
from
colossalai.kernel.op_builder.layernorm
import
LayerNormBuilder
layer_norm
=
LayerNormBuilder
().
load
()
input_
,
weight_
,
bias_
,
mean
,
invvar
=
ctx
.
saved_tensors
grad_input
=
grad_weight
=
grad_bias
=
None
grad_input
,
grad_weight
,
grad_bias
\
=
colossalai
.
_C
.
layer_norm
.
backward_affine
(
=
layer_norm
.
backward_affine
(
grad_output
.
contiguous
(),
mean
,
invvar
,
input_
,
ctx
.
normalized_shape
,
weight_
,
bias_
,
ctx
.
eps
)
...
...
colossalai/kernel/cuda_native/scaled_softmax.py
View file @
69d9180c
...
...
@@ -53,26 +53,28 @@ class ScaledMaskedSoftmax(torch.autograd.Function):
@
staticmethod
def
forward
(
ctx
,
inputs
,
mask
,
scale
):
try
:
import
colossalai._C
.
scaled_masked_softmax
from
colossalai._C
import
scaled_masked_softmax
except
ImportError
:
raise
RuntimeError
(
'ScaledMaskedSoftmax requires cuda extensions'
)
from
colossalai.kernel.op_builder.scaled_masked_softmax
import
ScaledMaskedSoftmaxBuilder
scaled_masked_softmax
=
ScaledMaskedSoftmaxBuilder
().
load
()
scale_t
=
torch
.
tensor
([
scale
])
softmax_results
=
colossalai
.
_C
.
scaled_masked_softmax
.
forward
(
inputs
,
mask
,
scale_t
[
0
])
softmax_results
=
scaled_masked_softmax
.
forward
(
inputs
,
mask
,
scale_t
[
0
])
ctx
.
save_for_backward
(
softmax_results
,
scale_t
)
return
softmax_results
@
staticmethod
def
backward
(
ctx
,
output_grads
):
try
:
import
colossalai._C
.
scaled_masked_softmax
from
colossalai._C
import
scaled_masked_softmax
except
ImportError
:
raise
RuntimeError
(
'ScaledMaskedSoftmax requires cuda extensions'
)
from
colossalai.kernel.op_builder.scaled_masked_softmax
import
ScaledMaskedSoftmaxBuilder
scaled_masked_softmax
=
ScaledMaskedSoftmaxBuilder
().
load
()
softmax_results
,
scale_t
=
ctx
.
saved_tensors
input_grads
=
colossalai
.
_C
.
scaled_masked_softmax
.
backward
(
output_grads
,
softmax_results
,
scale_t
[
0
])
input_grads
=
scaled_masked_softmax
.
backward
(
output_grads
,
softmax_results
,
scale_t
[
0
])
return
input_grads
,
None
,
None
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment