Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
1b657f9c
Unverified
Commit
1b657f9c
authored
Jun 27, 2022
by
Jiarui Fang
Committed by
GitHub
Jun 27, 2022
Browse files
[tensor] revert local view back (#1178)
parent
0dd4e2bb
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
10 additions
and
20 deletions
+10
-20
colossalai/nn/_ops/embedding.py
colossalai/nn/_ops/embedding.py
+1
-1
colossalai/tensor/colo_parameter.py
colossalai/tensor/colo_parameter.py
+0
-10
colossalai/tensor/colo_tensor.py
colossalai/tensor/colo_tensor.py
+4
-4
tests/test_tensor/test_tensor.py
tests/test_tensor/test_tensor.py
+5
-5
No files found.
colossalai/nn/_ops/embedding.py
View file @
1b657f9c
...
...
@@ -52,7 +52,7 @@ def colo_embedding_1Drow(input_tensor: ColoTensor,
input_tensor
=
input_tensor
.
convert_to_dist_spec
(
distspec
.
replicate
(
weight
.
tensor_spec
.
get_process_group
()))
tensor_parallel_rank
=
gpc
.
get_local_rank
(
ParallelMode
.
PARALLEL_1D
)
num_embeddings_per_partition
=
weight
.
size_
base
(
0
)
num_embeddings_per_partition
=
weight
.
size_
local
(
0
)
vocab_start_index
=
tensor_parallel_rank
*
num_embeddings_per_partition
vocab_end_index
=
vocab_start_index
+
num_embeddings_per_partition
...
...
colossalai/tensor/colo_parameter.py
View file @
1b657f9c
...
...
@@ -101,13 +101,3 @@ class ColoParameter(ColoTensor, torch.nn.Parameter):
# TODO(jzy) we don't support object reflection now.
# distspec cannot be pickled or rebuilt because it's tightly connected to runtime attribute `process_group`.
raise
NotImplementedError
#### the ColoParameter should use the torch.Tensor's builtin methodes ###
def
view
(
self
,
*
args
)
->
'ColoTensor'
:
return
super
().
view_base
(
*
args
)
def
size
(
self
,
*
args
,
**
kwargs
)
->
torch
.
Size
:
# import inspect
# print(*['{:40}| {}:{}\n'.format(x.function, x.filename, x.lineno) for x in inspect.stack()])
return
super
().
size_base
(
*
args
,
**
kwargs
)
colossalai/tensor/colo_tensor.py
View file @
1b657f9c
...
...
@@ -147,13 +147,13 @@ class ColoTensor(torch.Tensor):
##### override builtin functions which must use tensor in replicate placement ####
def
view_
base
(
self
,
*
args
)
->
'ColoTensor'
:
def
view_
local
(
self
,
*
args
)
->
'ColoTensor'
:
return
super
().
view
(
*
args
)
def
size_
base
(
self
,
*
args
,
**
kwargs
)
->
torch
.
Size
:
def
size_
local
(
self
,
*
args
,
**
kwargs
)
->
torch
.
Size
:
return
super
().
size
(
*
args
,
**
kwargs
)
def
view
(
self
,
*
args
)
->
'ColoTensor'
:
def
view
_global
(
self
,
*
args
)
->
'ColoTensor'
:
"""override the torch buildin view()
the args passed in must be in a replicate placement.
Returns:
...
...
@@ -167,7 +167,7 @@ class ColoTensor(torch.Tensor):
self
.
_tensor_spec
.
dist_spec
=
distspec
.
replicate
()
return
super
().
view
(
*
args
)
def
size
(
self
,
args
:
Optional
[
int
]
=
None
):
def
size
_global
(
self
,
args
:
Optional
[
int
]
=
None
):
"""override the torch buildin size()
the shape passed in must be in a replicate placement.
Returns:
...
...
tests/test_tensor/test_tensor.py
View file @
1b657f9c
...
...
@@ -67,14 +67,14 @@ def _run_view(world_size):
TensorSpec
(
distspec
.
shard
(
process_group
=
gpc
.
get_group
(
ParallelMode
.
DATA
),
dims
=
[
0
],
num_partitions
=
[
world_size
])))
assert
t
.
size
()[
0
]
==
4
*
world_size
assert
t
.
size
(
1
)
==
5
assert
t
.
size
()
==
torch
.
Size
([
4
*
world_size
,
5
])
assert
t
.
size
_global
()[
0
]
==
4
*
world_size
assert
t
.
size
_global
(
1
)
==
5
assert
t
.
size
_global
()
==
torch
.
Size
([
4
*
world_size
,
5
])
t
.
view_
base
(
4
*
5
)
t
.
view_
local
(
4
*
5
)
assert
t
.
tensor_spec
.
dist_spec
.
placement
.
value
==
's'
t
=
t
.
view
(
4
*
5
*
world_size
)
t
=
t
.
view
_global
(
4
*
5
*
world_size
)
assert
t
.
tensor_spec
.
dist_spec
.
placement
.
value
==
'r'
assert
t
.
shape
==
torch
.
Size
([
4
*
5
*
world_size
])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment