Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
0c4c9aa6
"...git@developer.sourcefind.cn:OpenDAS/colossalai.git" did not exist on "613efebc5c2254abc09c87feaeea514857577d3e"
Commit
0c4c9aa6
authored
Sep 08, 2022
by
BigOneLiXiaoMing
Committed by
Frank Lee
Sep 08, 2022
Browse files
[NFC] polish colossalai/nn/_ops/embedding.py code style (#1561)
parent
08815f0e
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
10 additions
and
11 deletions
+10
-11
colossalai/nn/_ops/embedding.py
colossalai/nn/_ops/embedding.py
+10
-11
No files found.
colossalai/nn/_ops/embedding.py
View file @
0c4c9aa6
...
@@ -111,18 +111,17 @@ def colo_embedding(input_tensor: GeneralTensor,
...
@@ -111,18 +111,17 @@ def colo_embedding(input_tensor: GeneralTensor,
assert
isinstance
(
weight
,
ColoTensor
)
assert
isinstance
(
weight
,
ColoTensor
)
input_tensor
=
convert_to_colo_tensor
(
input_tensor
,
weight
.
get_process_group
())
input_tensor
=
convert_to_colo_tensor
(
input_tensor
,
weight
.
get_process_group
())
if
not
weight
.
has_compute_spec
():
# No Model Parallel Applied
if
not
weight
.
has_compute_spec
():
# No Model Parallel Applied
assert
weight
.
is_replicate
(),
'Invalid weight spec for native embedding op'
assert
weight
.
is_replicate
(),
'Invalid weight spec for native embedding op'
return
ColoTensor
.
from_torch_tensor
(
return
ColoTensor
.
from_torch_tensor
(
tensor
=
F
.
embedding
(
input_tensor
,
tensor
=
F
.
embedding
(
input_tensor
,
weight
,
weight
,
padding_idx
=
padding_idx
,
padding_idx
=
padding_idx
,
max_norm
=
max_norm
,
max_norm
=
max_norm
,
norm_type
=
norm_type
,
norm_type
=
norm_type
,
scale_grad_by_freq
=
scale_grad_by_freq
,
scale_grad_by_freq
=
scale_grad_by_freq
,
sparse
=
sparse
),
sparse
=
sparse
),
spec
=
ColoTensorSpec
(
weight
.
get_process_group
()))
spec
=
ColoTensorSpec
(
weight
.
get_process_group
()))
elif
weight
.
has_compute_pattern
(
ComputePattern
.
TP1D
):
# Single Model Parallel Applied
elif
weight
.
has_compute_pattern
(
ComputePattern
.
TP1D
):
# Single Model Parallel Applied
if
weight
.
is_shard_1drow
():
if
weight
.
is_shard_1drow
():
mode
=
'row'
mode
=
'row'
elif
weight
.
is_shard_1dcol
():
elif
weight
.
is_shard_1dcol
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment