Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
5a4a3b77
Commit
5a4a3b77
authored
Mar 10, 2022
by
Jiang Zhuo
Committed by
Frank Lee
Mar 11, 2022
Browse files
fix format (#376)
parent
ce886a90
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
28 additions
and
25 deletions
+28
-25
colossalai/nn/layer/parallel_1d/_operation.py
colossalai/nn/layer/parallel_1d/_operation.py
+24
-25
colossalai/nn/layer/parallel_1d/_utils.py
colossalai/nn/layer/parallel_1d/_utils.py
+4
-0
No files found.
colossalai/nn/layer/parallel_1d/_operation.py
View file @
5a4a3b77
...
...
@@ -7,7 +7,7 @@ except:
class
FusedLayerNormAffineFunction1D
(
torch
.
autograd
.
Function
):
r
"""
r
"""
Layernorm
:param input: input maxtrix
...
...
@@ -20,27 +20,26 @@ class FusedLayerNormAffineFunction1D(torch.autograd.Function):
:param eps: a value added to the denominator for numerical stability
"""
@
staticmethod
def
forward
(
ctx
,
input
,
weight
,
bias
,
normalized_shape
,
eps
):
ctx
.
normalized_shape
=
normalized_shape
ctx
.
eps
=
eps
input_
=
input
.
contiguous
()
weight_
=
weight
.
contiguous
()
bias_
=
bias
.
contiguous
()
output
,
mean
,
invvar
=
fused_mix_prec_layer_norm_cuda
.
forward_affine
(
input_
,
ctx
.
normalized_shape
,
weight_
,
bias_
,
ctx
.
eps
)
ctx
.
save_for_backward
(
input_
,
weight_
,
bias_
,
mean
,
invvar
)
return
output
@
staticmethod
def
backward
(
ctx
,
grad_output
):
input_
,
weight_
,
bias_
,
mean
,
invvar
=
ctx
.
saved_tensors
grad_input
=
grad_weight
=
grad_bias
=
None
grad_input
,
grad_weight
,
grad_bias
\
=
fused_mix_prec_layer_norm_cuda
.
backward_affine
(
grad_output
.
contiguous
(),
mean
,
invvar
,
input_
,
ctx
.
normalized_shape
,
weight_
,
bias_
,
ctx
.
eps
)
return
grad_input
,
grad_weight
,
grad_bias
,
None
,
None
\ No newline at end of file
@
staticmethod
def
forward
(
ctx
,
input
,
weight
,
bias
,
normalized_shape
,
eps
):
ctx
.
normalized_shape
=
normalized_shape
ctx
.
eps
=
eps
input_
=
input
.
contiguous
()
weight_
=
weight
.
contiguous
()
bias_
=
bias
.
contiguous
()
output
,
mean
,
invvar
=
fused_mix_prec_layer_norm_cuda
.
forward_affine
(
input_
,
ctx
.
normalized_shape
,
weight_
,
bias_
,
ctx
.
eps
)
ctx
.
save_for_backward
(
input_
,
weight_
,
bias_
,
mean
,
invvar
)
return
output
@
staticmethod
def
backward
(
ctx
,
grad_output
):
input_
,
weight_
,
bias_
,
mean
,
invvar
=
ctx
.
saved_tensors
grad_input
=
grad_weight
=
grad_bias
=
None
grad_input
,
grad_weight
,
grad_bias
\
=
fused_mix_prec_layer_norm_cuda
.
backward_affine
(
grad_output
.
contiguous
(),
mean
,
invvar
,
input_
,
ctx
.
normalized_shape
,
weight_
,
bias_
,
ctx
.
eps
)
return
grad_input
,
grad_weight
,
grad_bias
,
None
,
None
colossalai/nn/layer/parallel_1d/_utils.py
View file @
5a4a3b77
...
...
@@ -81,6 +81,7 @@ class _ReduceGrad(torch.autograd.Function):
:param input_: input matrix
:param parallel_mode: parallel mode
"""
@
staticmethod
def
symbolic
(
graph
,
input_
):
return
input_
...
...
@@ -102,6 +103,7 @@ class _ReduceInput(torch.autograd.Function):
:param input_: input matrix
:param parallel_mode: parallel mode
"""
@
staticmethod
def
symbolic
(
graph
,
input_
):
return
_reduce
(
input_
)
...
...
@@ -123,6 +125,7 @@ class _SplitForwardGatherBackward(torch.autograd.Function):
:param parallel_mode: parallel mode
:param dim: dimension
"""
@
staticmethod
def
symbolic
(
graph
,
input_
):
return
_split
(
input_
)
...
...
@@ -146,6 +149,7 @@ class _GatherForwardSplitBackward(torch.autograd.Function):
:param parallel_mode: parallel mode
:param dim: dimension
"""
@
staticmethod
def
symbolic
(
graph
,
input_
):
return
_gather
(
input_
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment