Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Megatron-LM
Commits
1271fd73
Commit
1271fd73
authored
Oct 28, 2020
by
Deepak Narayanan
Browse files
Remove unused parameter sharing logic
parent
9b558566
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
2 additions
and
44 deletions
+2
-44
megatron/arguments.py
megatron/arguments.py
+0
-18
megatron/model/transformer.py
megatron/model/transformer.py
+2
-26
No files found.
megatron/arguments.py
View file @
1271fd73
...
...
@@ -107,14 +107,6 @@ def parse_args(extra_args_provider=None, defaults={},
assert
args
.
min_lr
<=
args
.
lr
if
args
.
save
is
not
None
:
assert
args
.
save_interval
is
not
None
# Parameters sharing does not work with torch DDP.
if
(
args
.
num_unique_layers
is
not
None
)
and
(
args
.
num_layers
is
not
None
):
assert
args
.
num_unique_layers
<=
args
.
num_layers
assert
args
.
num_layers
%
args
.
num_unique_layers
==
0
,
\
'num-layers should be divisible by num-unique-layers.'
if
args
.
num_unique_layers
<
args
.
num_layers
:
assert
args
.
DDP_impl
==
'local'
,
\
'torch-DDP does not work with parameters sharing.'
# Mixed precision checks.
if
args
.
fp16_lm_cross_entropy
:
assert
args
.
fp16
,
'lm cross entropy in fp16 only support in fp16 mode.'
...
...
@@ -158,16 +150,6 @@ def _add_network_size_args(parser):
group
.
add_argument
(
'--num-layers'
,
type
=
int
,
default
=
None
,
help
=
'Number of transformer layers.'
)
group
.
add_argument
(
'--num-unique-layers'
,
type
=
int
,
default
=
None
,
help
=
'Number of unique transformer layers. '
'`num-layers` should be divisible by this value.'
)
group
.
add_argument
(
'--param-sharing-style'
,
default
=
'grouped'
,
choices
=
[
'grouped'
,
'spaced'
],
help
=
'Ordering of the shared parameters. For example, '
'for a `num-layers`=4 and `--num-unique-layers`=2, '
'we will have the following ordering for two unique '
'layers 1 and 2: '
' grouped: [1, 2, 1, 2] and spaced: [1, 1, 2, 2].'
)
group
.
add_argument
(
'--hidden-size'
,
type
=
int
,
default
=
None
,
help
=
'Tansformer hidden size.'
)
group
.
add_argument
(
'--num-attention-heads'
,
type
=
int
,
default
=
None
,
...
...
megatron/model/transformer.py
View file @
1271fd73
...
...
@@ -506,14 +506,6 @@ class ParallelTransformer(MegatronModule):
# Number of layers.
self
.
num_layers
=
args
.
num_layers
//
args
.
pipeline_model_parallel_size
# TODO: Need to do something different in case self.num_layers != self.num_unique_layers?
if
args
.
num_unique_layers
is
None
:
self
.
num_unique_layers
=
self
.
num_layers
else
:
self
.
num_unique_layers
=
args
.
num_unique_layers
//
args
.
pipeline_model_parallel_size
assert
self
.
num_layers
==
self
.
num_unique_layers
,
\
'number of layers should be equal to the number of unique layers'
self
.
param_sharing_style
=
args
.
param_sharing_style
# Transformer layers.
def
build_layer
(
layer_number
):
...
...
@@ -522,16 +514,7 @@ class ParallelTransformer(MegatronModule):
output_layer_init_method
,
layer_number
)
offset
=
mpu
.
get_pipeline_model_parallel_rank
()
*
self
.
num_layers
self
.
layers
=
torch
.
nn
.
ModuleList
(
[
build_layer
(
i
+
1
+
offset
)
for
i
in
range
(
self
.
num_unique_layers
)])
# Print layer ordering.
if
self
.
num_layers
!=
self
.
num_unique_layers
:
if
torch
.
distributed
.
get_rank
()
==
0
:
print
(
'> will be using the following layer ordering:'
)
for
i
in
range
(
self
.
num_layers
):
print
(
' layer id: {:3d} --> unique layer id: '
'{:3d}'
.
format
(
i
,
self
.
_get_layer_index
(
i
)),
flush
=
True
)
[
build_layer
(
i
+
1
+
offset
)
for
i
in
range
(
self
.
num_layers
)])
if
mpu
.
is_pipeline_last_stage
():
# Final layer norm before output.
...
...
@@ -539,15 +522,8 @@ class ParallelTransformer(MegatronModule):
args
.
hidden_size
,
eps
=
args
.
layernorm_epsilon
)
def
_get_layer_index
(
self
,
layer_number
):
if
self
.
param_sharing_style
==
'grouped'
:
return
layer_number
%
self
.
num_unique_layers
if
self
.
param_sharing_style
==
'spaced'
:
return
layer_number
//
(
self
.
num_layers
//
self
.
num_unique_layers
)
assert
False
,
'should not be here'
def
_get_layer
(
self
,
layer_number
):
return
self
.
layers
[
self
.
_get_layer_index
(
layer_number
)
]
return
self
.
layers
[
layer_number
]
def
_checkpointed_forward
(
self
,
hidden_states
,
attention_mask
):
"""Forward method with activation checkpointing."""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment