Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
3c53cf81
Commit
3c53cf81
authored
Apr 11, 2019
by
Michael Carilli
Browse files
Merge branch 'master' into prepare_fused
parents
b7f10ad0
4dc711bc
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
50 additions
and
4 deletions
+50
-4
apex/amp/lists/torch_overrides.py
apex/amp/lists/torch_overrides.py
+4
-3
apex/normalization/fused_layer_norm.py
apex/normalization/fused_layer_norm.py
+4
-0
tests/L0/run_fused_layer_norm/test_fused_layer_norm.py
tests/L0/run_fused_layer_norm/test_fused_layer_norm.py
+41
-0
tests/L0/run_test.py
tests/L0/run_test.py
+1
-1
No files found.
apex/amp/lists/torch_overrides.py
View file @
3c53cf81
...
...
@@ -5,8 +5,9 @@ from .. import utils
MODULE
=
torch
FP16_FUNCS
=
[
# Math
# TODO: why are these in top-level torch namespace?
# Low level functions wrapped by torch.nn layers.
# The wrapper layers contain the weights which are then passed in as a parameter
# to these functions.
'conv1d'
,
'conv2d'
,
'conv3d'
,
...
...
@@ -14,6 +15,7 @@ FP16_FUNCS = [
'conv_transpose2d'
,
'conv_transpose3d'
,
'conv_tbc'
,
'prelu'
,
# BLAS
'addmm'
,
...
...
@@ -76,7 +78,6 @@ CASTS = [
'addcmul'
,
'atan2'
,
'cross'
,
'prelu'
,
# Element-wise _or_ tensor-wise math
'add'
,
...
...
apex/normalization/fused_layer_norm.py
View file @
3c53cf81
...
...
@@ -3,6 +3,7 @@ import torch
import
numbers
from
torch.nn.parameter
import
Parameter
from
torch.nn
import
init
from
torch.nn
import
functional
as
F
import
importlib
class
FusedLayerNormAffineFunction
(
torch
.
autograd
.
Function
):
...
...
@@ -144,6 +145,9 @@ class FusedLayerNorm(torch.nn.Module):
init
.
zeros_
(
self
.
bias
)
def
forward
(
self
,
input
):
if
not
input
.
is_cuda
:
return
F
.
layer_norm
(
input
,
self
.
normalized_shape
,
self
.
weight
,
self
.
bias
,
self
.
eps
)
if
self
.
elementwise_affine
:
return
FusedLayerNormAffineFunction
(
self
.
normalized_shape
,
self
.
eps
)(
input
,
self
.
weight
,
self
.
bias
)
...
...
tests/L0/run_fused_layer_norm/test_fused_layer_norm.py
0 → 100644
View file @
3c53cf81
import
unittest
import
os
import
random
import
torch
import
apex
class
TestFusedLayerNorm
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
module
=
apex
.
normalization
.
FusedLayerNorm
(
normalized_shape
=
[
32
,
64
],
elementwise_affine
=
False
)
self
.
input_
=
torch
.
randn
(
16
,
32
,
64
)
torch
.
cuda
.
manual_seed
(
42
)
def
forward_cpu
(
self
,
input_
):
self
.
module
.
cpu
()
return
self
.
module
(
input_
.
cpu
())
def
forward_cuda
(
self
,
input_
):
self
.
module
.
cuda
()
return
self
.
module
(
input_
.
cuda
())
def
test_forward_cuda
(
self
):
out_
=
self
.
forward_cuda
(
self
.
input_
)
assert
out_
.
is_cuda
==
True
def
test_forward_cpu
(
self
):
out_
=
self
.
forward_cpu
(
self
.
input_
)
assert
out_
.
is_cuda
==
False
def
test_same_output
(
self
):
out_cpu
=
self
.
forward_cpu
(
self
.
input_
)
out_cuda
=
self
.
forward_cuda
(
self
.
input_
)
torch
.
testing
.
assert_allclose
(
out_cpu
,
out_cuda
.
cpu
())
class
TestFusedLayerNormElemWise
(
TestFusedLayerNorm
):
def
setUp
(
self
):
self
.
module
=
apex
.
normalization
.
FusedLayerNorm
(
normalized_shape
=
[
32
,
64
],
elementwise_affine
=
True
)
self
.
input_
=
torch
.
randn
(
16
,
32
,
64
)
torch
.
cuda
.
manual_seed
(
42
)
\ No newline at end of file
tests/L0/run_test.py
View file @
3c53cf81
import
unittest
import
sys
test_dirs
=
[
"run_amp"
,
"run_fp16util"
,
"run_mixed_adam"
]
test_dirs
=
[
"run_amp"
,
"run_fp16util"
,
"run_mixed_adam"
,
"run_fused_layer_norm"
]
runner
=
unittest
.
TextTestRunner
(
verbosity
=
2
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment