Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
deepspeed
Commits
bbd8cd7d
Commit
bbd8cd7d
authored
May 29, 2020
by
Jeff Rasley
Browse files
update tests
parent
e04e4016
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
35 additions
and
34 deletions
+35
-34
tests/unit/test_cuda_backward.py
tests/unit/test_cuda_backward.py
+35
-34
No files found.
tests/unit/test_cuda_backward.py
View file @
bbd8cd7d
...
@@ -285,38 +285,39 @@ def test_backward(batch_size,
...
@@ -285,38 +285,39 @@ def test_backward(batch_size,
run_backward
(
ds_config
,
atol
=
atol
)
run_backward
(
ds_config
,
atol
=
atol
)
@
pytest
.
mark
.
parametrize
(
'batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16, atol'
,
[
(
3
,
1024
,
128
,
16
,
24
,
True
,
False
,
0.07
),
(
3
,
1024
,
128
,
16
,
24
,
True
,
True
,
0.05
),
(
3
,
1024
,
128
,
16
,
24
,
False
,
False
,
0.1
),
(
3
,
1024
,
128
,
16
,
24
,
False
,
True
,
0.2
),
])
# yapf: disable
def
test_backward_stochastic
(
batch_size
,
hidden_size
,
seq_len
,
heads
,
num_layers
,
is_preln
,
use_fp16
,
atol
):
# Only run fp16 test cases on devices with 7+ capability.
major
,
_
=
torch
.
cuda
.
get_device_capability
()
if
major
<
7
and
(
use_fp16
is
True
or
is_preln
is
False
):
return
ds_config
=
DeepSpeedTransformerConfig
()
ds_config
.
layer_id
=
None
ds_config
.
batch_size
=
batch_size
ds_config
.
hidden_size
=
hidden_size
ds_config
.
max_seq_length
=
seq_len
ds_config
.
heads
=
heads
ds_config
.
attn_dropout_ratio
=
0.0
ds_config
.
hidden_dropout_ratio
=
0.0
ds_config
.
num_hidden_layers
=
num_layers
ds_config
.
pre_layer_norm
=
is_preln
ds_config
.
initializer_range
=
0.02
ds_config
.
fp16
=
use_fp16
ds_config
.
stochastic_mode
=
True
run_backward
(
ds_config
,
atol
=
atol
)
#@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16, atol',
# [
# (3,1024,128,16,24,True,False, 0.07),
# (3,1024,128,16,24,True,True, 0.05),
# (3,1024,128,16,24,False,False, 0.1),
# (3,1024,128,16,24,False,True, 0.2),
# ]) # yapf: disable
#def test_backward_stochastic(batch_size,
# hidden_size,
# seq_len,
# heads,
# num_layers,
# is_preln,
# use_fp16,
# atol):
# # Only run fp16 test cases on devices with 7+ capability.
# major, _ = torch.cuda.get_device_capability()
# if major < 7 and (use_fp16 is True or is_preln is False):
# return
#
# ds_config = DeepSpeedTransformerConfig()
# ds_config.layer_id = None
# ds_config.batch_size = batch_size
# ds_config.hidden_size = hidden_size
# ds_config.max_seq_length = seq_len
# ds_config.heads = heads
# ds_config.attn_dropout_ratio = 0.0
# ds_config.hidden_dropout_ratio = 0.0
# ds_config.num_hidden_layers = num_layers
# ds_config.pre_layer_norm = is_preln
# ds_config.initializer_range = 0.02
# ds_config.fp16 = use_fp16
# ds_config.stochastic_mode = True
#
# run_backward(ds_config, atol=atol)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment