Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
0803a614
Commit
0803a614
authored
Jun 22, 2023
by
FoolPlayer
Committed by
Frank Lee
Jul 04, 2023
Browse files
[shardformer] add linearconv1d test (#4067)
* add linearconv1d test * add linearconv1d test
parent
8eb09a4c
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
122 additions
and
34 deletions
+122
-34
colossalai/shardformer/layer/linear_conv.py
colossalai/shardformer/layer/linear_conv.py
+13
-23
colossalai/shardformer/policies/gpt2.py
colossalai/shardformer/policies/gpt2.py
+2
-8
tests/test_shardformer/test_layer/test_linearconv_1d.py
tests/test_shardformer/test_layer/test_linearconv_1d.py
+107
-0
tests/test_shardformer/test_model/test_shard_gpt2.py
tests/test_shardformer/test_model/test_shard_gpt2.py
+0
-3
No files found.
colossalai/shardformer/layer/linear_conv.py
View file @
0803a614
...
@@ -103,10 +103,15 @@ class LinearConv1D_Col(ParallelModule):
...
@@ -103,10 +103,15 @@ class LinearConv1D_Col(ParallelModule):
self
.
reset_parameters
(
weight_initializer
,
bias_initializer
)
self
.
reset_parameters
(
weight_initializer
,
bias_initializer
)
@
staticmethod
@
staticmethod
def
from_native_module
(
module
:
nn
.
Linear
,
process_group
:
Union
[
ProcessGroup
,
List
[
ProcessGroup
]],
n_
cast
:
int
,
def
from_native_module
(
module
:
nn
.
Linear
,
process_group
:
Union
[
ProcessGroup
,
List
[
ProcessGroup
]],
n_
fused
:
int
,
*
args
,
**
kwargs
)
->
ParallelModule
:
*
args
,
**
kwargs
)
->
ParallelModule
:
r
"""
r
"""
Convert a huggingface layer `Conv1D` in gpt2 to a parallelized linear layer.
Convert a huggingface layer `Conv1D` in gpt2 to a parallelized linear layer.
Args:
module (`nn.Linear`): The module to be converted.
process_group (`Union[ProcessGroup, List[ProcessGroup]]`): The process group to be used for weight sharding and communication.
n_fused (int): The number of layers to be fused. In GPT2, Q,K,V are fused in one weight.
"""
"""
# get the attributes
# get the attributes
in_features
=
module
.
weight
.
shape
[
0
]
in_features
=
module
.
weight
.
shape
[
0
]
...
@@ -135,20 +140,20 @@ class LinearConv1D_Col(ParallelModule):
...
@@ -135,20 +140,20 @@ class LinearConv1D_Col(ParallelModule):
# first rearange the order of weight and bias
# first rearange the order of weight and bias
world_size
=
dist
.
get_world_size
(
group
=
process_group
)
world_size
=
dist
.
get_world_size
(
group
=
process_group
)
order
=
torch
.
arange
(
world_size
*
n_
cast
)
order
=
torch
.
arange
(
world_size
*
n_
fused
)
new_order
=
[]
new_order
=
[]
for
i
in
range
(
world_size
):
for
i
in
range
(
world_size
):
new_order
.
append
(
order
[
i
::
world_size
])
new_order
.
append
(
order
[
i
::
world_size
])
new_order
=
torch
.
cat
(
new_order
)
new_order
=
torch
.
cat
(
new_order
)
weight_chunks
=
torch
.
chunk
(
module
.
weight
.
data
,
world_size
*
n_
cast
,
dim
=
1
)
weight_chunks
=
torch
.
chunk
(
module
.
weight
.
data
,
world_size
*
n_
fused
,
dim
=
1
)
rearanged_weight_chunks
=
[
weight_chunks
[
i
]
for
i
in
new_order
]
rearanged_weight_chunks
=
[
weight_chunks
[
i
]
for
i
in
new_order
]
rearanged_weight
=
torch
.
cat
(
rearanged_weight_chunks
,
dim
=
1
)
rearanged_weight
=
torch
.
cat
(
rearanged_weight_chunks
,
dim
=
1
)
sharded_weight
=
shard_colwise
(
rearanged_weight
,
process_group
)
sharded_weight
=
shard_colwise
(
rearanged_weight
,
process_group
)
linear_1d
.
weight
.
data
.
copy_
(
sharded_weight
.
T
.
contiguous
())
linear_1d
.
weight
.
data
.
copy_
(
sharded_weight
.
T
.
contiguous
())
if
bias
:
if
bias
:
bias_chunks
=
torch
.
chunk
(
module
.
bias
.
data
,
world_size
*
n_
cast
,
dim
=
0
)
bias_chunks
=
torch
.
chunk
(
module
.
bias
.
data
,
world_size
*
n_
fused
,
dim
=
0
)
rearanged_bias_chunks
=
[
bias_chunks
[
i
]
for
i
in
new_order
]
rearanged_bias_chunks
=
[
bias_chunks
[
i
]
for
i
in
new_order
]
rearanged_bias
=
torch
.
cat
(
rearanged_bias_chunks
,
dim
=
0
)
rearanged_bias
=
torch
.
cat
(
rearanged_bias_chunks
,
dim
=
0
)
sharded_bias
=
shard_colwise
(
rearanged_bias
,
process_group
)
sharded_bias
=
shard_colwise
(
rearanged_bias
,
process_group
)
...
@@ -260,8 +265,8 @@ class LinearConv1D_Row(ParallelModule):
...
@@ -260,8 +265,8 @@ class LinearConv1D_Row(ParallelModule):
self
.
reset_parameters
(
weight_initializer
,
bias_initializer
)
self
.
reset_parameters
(
weight_initializer
,
bias_initializer
)
@
staticmethod
@
staticmethod
def
from_native_module
(
module
:
nn
.
Linear
,
process_group
:
Union
[
ProcessGroup
,
List
[
ProcessGroup
]],
n_cast
:
int
,
def
from_native_module
(
module
:
nn
.
Linear
,
process_group
:
Union
[
ProcessGroup
,
List
[
ProcessGroup
]],
*
args
,
*
args
,
**
kwargs
)
->
ParallelModule
:
**
kwargs
)
->
ParallelModule
:
r
"""
r
"""
Convert a native PyTorch linear layer to a parallelized linear layer.
Convert a native PyTorch linear layer to a parallelized linear layer.
"""
"""
...
@@ -289,26 +294,11 @@ class LinearConv1D_Row(ParallelModule):
...
@@ -289,26 +294,11 @@ class LinearConv1D_Row(ParallelModule):
with
torch
.
no_grad
():
with
torch
.
no_grad
():
# the weigh to the linear layer is a transpose
# the weigh to the linear layer is a transpose
# thus shard on col is equal to shard on row
# thus shard on col is equal to shard on row
sharded_weight
=
shard_rowwise
(
module
.
weight
.
data
,
process_group
)
# first rearange the order of weight and bias
world_size
=
dist
.
get_world_size
(
group
=
process_group
)
order
=
torch
.
arange
(
world_size
*
n_cast
)
new_order
=
[]
for
i
in
range
(
world_size
):
new_order
.
append
(
order
[
i
::
world_size
])
new_order
=
torch
.
cat
(
new_order
)
weight_chunks
=
torch
.
chunk
(
module
.
weight
.
data
,
world_size
*
n_cast
,
dim
=
0
)
rearanged_weight_chunks
=
[
weight_chunks
[
i
]
for
i
in
new_order
]
rearanged_weight
=
torch
.
cat
(
rearanged_weight_chunks
,
dim
=
0
)
sharded_weight
=
shard_rowwise
(
rearanged_weight
,
process_group
)
linear_1d
.
weight
.
data
.
copy_
(
sharded_weight
.
T
.
contiguous
())
linear_1d
.
weight
.
data
.
copy_
(
sharded_weight
.
T
.
contiguous
())
if
bias
:
if
bias
:
bias_chunks
=
torch
.
chunk
(
module
.
bias
.
data
,
world_size
*
n_cast
,
dim
=
0
)
linear_1d
.
bias
.
copy_
(
module
.
bias
.
data
)
rearanged_bias_chunks
=
[
bias_chunks
[
i
]
for
i
in
new_order
]
rearanged_bias
=
torch
.
cat
(
rearanged_bias_chunks
,
dim
=
0
)
linear_1d
.
bias
.
copy_
(
rearanged_bias
.
contiguous
())
return
linear_1d
return
linear_1d
...
...
colossalai/shardformer/policies/gpt2.py
View file @
0803a614
...
@@ -44,29 +44,23 @@ class GPT2Policy(Policy):
...
@@ -44,29 +44,23 @@ class GPT2Policy(Policy):
suffix
=
"attn.c_attn"
,
suffix
=
"attn.c_attn"
,
target_module
=
col_nn
.
LinearConv1D_Col
,
target_module
=
col_nn
.
LinearConv1D_Col
,
kwargs
=
{
kwargs
=
{
"n_
cast
"
:
3
,
"n_
fused
"
:
3
,
},
},
),
),
SubModuleReplacementDescription
(
SubModuleReplacementDescription
(
suffix
=
"attn.c_proj"
,
suffix
=
"attn.c_proj"
,
target_module
=
col_nn
.
LinearConv1D_Row
,
target_module
=
col_nn
.
LinearConv1D_Row
,
kwargs
=
{
"n_cast"
:
1
,
},
),
),
SubModuleReplacementDescription
(
SubModuleReplacementDescription
(
suffix
=
"mlp.c_fc"
,
suffix
=
"mlp.c_fc"
,
target_module
=
col_nn
.
LinearConv1D_Col
,
target_module
=
col_nn
.
LinearConv1D_Col
,
kwargs
=
{
kwargs
=
{
"n_
cast
"
:
1
,
"n_
fused
"
:
1
,
},
},
),
),
SubModuleReplacementDescription
(
SubModuleReplacementDescription
(
suffix
=
"mlp.c_proj"
,
suffix
=
"mlp.c_proj"
,
target_module
=
col_nn
.
LinearConv1D_Row
,
target_module
=
col_nn
.
LinearConv1D_Row
,
kwargs
=
{
"n_cast"
:
1
,
},
),
),
SubModuleReplacementDescription
(
SubModuleReplacementDescription
(
suffix
=
"attn.attn_dropout"
,
suffix
=
"attn.attn_dropout"
,
...
...
tests/test_shardformer/test_layer/test_linearconv_1d.py
0 → 100644
View file @
0803a614
import
torch
import
torch.distributed
as
dist
import
torch.nn
as
nn
from
torch.testing
import
assert_close
import
colossalai
from
colossalai.shardformer.layer
import
LinearConv1D_Col
,
LinearConv1D_Row
from
colossalai.testing
import
rerun_if_address_is_in_use
,
spawn
# This code is copied from https://github.com/huggingface/transformers
class
Conv1D
(
nn
.
Module
):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (`int`): The number of output features.
nx (`int`): The number of input features.
"""
def
__init__
(
self
,
nf
,
nx
):
super
().
__init__
()
self
.
nf
=
nf
self
.
weight
=
nn
.
Parameter
(
torch
.
empty
(
nx
,
nf
))
self
.
bias
=
nn
.
Parameter
(
torch
.
zeros
(
nf
))
nn
.
init
.
normal_
(
self
.
weight
,
std
=
0.02
)
def
forward
(
self
,
x
):
size_out
=
x
.
size
()[:
-
1
]
+
(
self
.
nf
,)
x
=
torch
.
addmm
(
self
.
bias
,
x
.
view
(
-
1
,
x
.
size
(
-
1
)),
self
.
weight
)
x
=
x
.
view
(
size_out
)
return
x
def
rearrange
(
tensor
:
torch
.
Tensor
,
dim
:
int
):
tensor
=
tensor
.
clone
()
world_size
=
2
order
=
torch
.
arange
(
world_size
*
3
)
new_order
=
[]
for
i
in
range
(
world_size
):
new_order
.
append
(
order
[
i
::
world_size
])
new_order
=
torch
.
cat
(
new_order
)
tensor_chunks
=
torch
.
chunk
(
tensor
,
world_size
*
3
,
dim
=
dim
)
rearanged_tensor_chunks
=
[
tensor_chunks
[
i
]
for
i
in
new_order
]
rearanged_tensor
=
torch
.
cat
(
rearanged_tensor_chunks
,
dim
=
dim
)
return
rearanged_tensor
def
check_linear_conv_1d_col
():
linear
=
Conv1D
(
192
,
48
).
cuda
()
linear_conv_col
=
LinearConv1D_Col
.
from_native_module
(
linear
,
process_group
=
None
,
gather_output
=
True
,
n_fused
=
3
)
assert
linear_conv_col
.
weight
.
shape
==
torch
.
Size
([
96
,
48
])
assert
linear_conv_col
.
bias
.
shape
==
torch
.
Size
([
96
])
# check computation correctness
x
=
torch
.
rand
(
4
,
48
).
cuda
()
out
=
linear
(
x
)
gather_out
=
linear_conv_col
(
x
)
assert_close
(
rearrange
(
out
,
1
),
gather_out
)
# check backward correctness
out
.
sum
().
backward
()
gather_out
.
sum
().
backward
()
rank
=
dist
.
get_rank
()
target_grad
=
torch
.
chunk
(
linear
.
weight
.
grad
,
2
,
dim
=
1
)[
rank
]
assert_close
(
target_grad
.
transpose
(
0
,
1
).
contiguous
(),
linear_conv_col
.
weight
.
grad
)
def
check_linear_1d_row
():
linear
=
Conv1D
(
192
,
48
).
cuda
()
linear_row
=
LinearConv1D_Row
.
from_native_module
(
linear
,
process_group
=
None
,
parallel_input
=
False
)
assert
linear_row
.
weight
.
shape
==
torch
.
Size
([
192
,
24
])
assert
linear_row
.
bias
.
shape
==
torch
.
Size
([
192
])
# check computation correctness
x
=
torch
.
rand
(
4
,
48
).
cuda
()
out
=
linear
(
x
)
gather_out
=
linear_row
(
x
)
assert_close
(
out
,
gather_out
)
# check backward correctness
out
.
sum
().
backward
()
gather_out
.
sum
().
backward
()
rank
=
dist
.
get_rank
()
target_grad
=
torch
.
chunk
(
linear
.
weight
.
grad
,
2
,
dim
=
1
)[
rank
]
assert_close
(
target_grad
,
linear_row
.
weight
.
grad
)
def
run_dist
(
rank
,
world_size
,
port
):
colossalai
.
launch
(
config
=
{},
rank
=
rank
,
world_size
=
world_size
,
host
=
'localhost'
,
port
=
port
,
backend
=
'nccl'
)
check_linear_conv_1d_col
()
@
rerun_if_address_is_in_use
()
def
test_linearconv
():
spawn
(
run_dist
,
nprocs
=
2
)
if
__name__
==
'__main__'
:
test_linearconv
()
tests/test_shardformer/test_model/test_shard_gpt2.py
View file @
0803a614
...
@@ -42,9 +42,6 @@ def check_gpt2(rank, world_size, port):
...
@@ -42,9 +42,6 @@ def check_gpt2(rank, world_size, port):
sub_model_zoo
=
model_zoo
.
get_sub_registry
(
'transformers_gpt'
)
sub_model_zoo
=
model_zoo
.
get_sub_registry
(
'transformers_gpt'
)
for
name
,
(
model_fn
,
data_gen_fn
,
output_transform_fn
,
loss_fn
,
_
)
in
sub_model_zoo
.
items
():
for
name
,
(
model_fn
,
data_gen_fn
,
output_transform_fn
,
loss_fn
,
_
)
in
sub_model_zoo
.
items
():
print
(
name
)
# if name == 'transformers_gpt':
# continue
org_model
,
sharded_model
=
build_model
(
world_size
,
model_fn
)
org_model
,
sharded_model
=
build_model
(
world_size
,
model_fn
)
check_forward_backward
(
org_model
,
sharded_model
,
data_gen_fn
,
output_transform_fn
,
loss_fn
)
check_forward_backward
(
org_model
,
sharded_model
,
data_gen_fn
,
output_transform_fn
,
loss_fn
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment