Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
30b4dd17
Unverified
Commit
30b4dd17
authored
Aug 11, 2022
by
Jiarui Fang
Committed by
GitHub
Aug 11, 2022
Browse files
[FAW] export FAW in _ops (#1438)
parent
9056677b
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
13 additions
and
14 deletions
+13
-14
colossalai/nn/_ops/__init__.py
colossalai/nn/_ops/__init__.py
+1
-1
colossalai/nn/parallel/layers/__init__.py
colossalai/nn/parallel/layers/__init__.py
+5
-8
colossalai/nn/parallel/layers/cache_embedding/__init__.py
colossalai/nn/parallel/layers/cache_embedding/__init__.py
+0
-0
colossalai/nn/parallel/layers/cache_embedding/base_embedding.py
...alai/nn/parallel/layers/cache_embedding/base_embedding.py
+0
-0
colossalai/nn/parallel/layers/cache_embedding/cache_mgr.py
colossalai/nn/parallel/layers/cache_embedding/cache_mgr.py
+0
-0
colossalai/nn/parallel/layers/cache_embedding/copyer.py
colossalai/nn/parallel/layers/cache_embedding/copyer.py
+0
-0
colossalai/nn/parallel/layers/cache_embedding/freq_aware_embedding.py
...n/parallel/layers/cache_embedding/freq_aware_embedding.py
+0
-0
colossalai/nn/parallel/layers/cache_embedding/parallel_freq_aware_embedding.py
...l/layers/cache_embedding/parallel_freq_aware_embedding.py
+2
-2
tests/test_layers/test_cache_embedding.py
tests/test_layers/test_cache_embedding.py
+5
-3
No files found.
colossalai/nn/_ops/__init__.py
View file @
30b4dd17
...
@@ -5,4 +5,4 @@ from .loss import colo_cross_entropy
...
@@ -5,4 +5,4 @@ from .loss import colo_cross_entropy
from
.embedding
import
colo_embedding
from
.embedding
import
colo_embedding
from
.addmm
import
colo_addmm
from
.addmm
import
colo_addmm
from
.embedding_bag
import
colo_embedding_bag
from
.embedding_bag
import
colo_embedding_bag
from
.view
import
colo_view
from
.view
import
colo_view
\ No newline at end of file
colossalai/nn/parallel/layers/__init__.py
View file @
30b4dd17
...
@@ -3,13 +3,10 @@ from .linear import ColoLinear
...
@@ -3,13 +3,10 @@ from .linear import ColoLinear
from
.embedding
import
ColoEmbedding
from
.embedding
import
ColoEmbedding
from
.module_utils
import
register_colo_module
,
is_colo_module
,
get_colo_module
,
init_colo_module
,
check_colo_module
from
.module_utils
import
register_colo_module
,
is_colo_module
,
get_colo_module
,
init_colo_module
,
check_colo_module
from
.cache_embedding
import
FreqAwareEmbeddingBag
,
ParallelFreqAwareEmbeddingBag
,
CachedParamMgr
,
LimitBuffIndexCopyer
__all__
=
[
__all__
=
[
'ColoModule'
,
'ColoModule'
,
'register_colo_module'
,
'is_colo_module'
,
'get_colo_module'
,
'init_colo_module'
,
'check_colo_module'
,
'register_colo_module'
,
'ColoLinear'
,
'ColoEmbedding'
,
'FreqAwareEmbeddingBag'
,
'ParallelFreqAwareEmbeddingBag'
,
'CachedParamMgr'
,
'is_colo_module'
,
'LimitBuffIndexCopyer'
'get_colo_module'
,
'init_colo_module'
,
'check_colo_module'
,
'ColoLinear'
,
'ColoEmbedding'
,
]
]
colossalai/nn/
_op
s/cache_embedding/__init__.py
→
colossalai/nn/
parallel/layer
s/cache_embedding/__init__.py
View file @
30b4dd17
File moved
colossalai/nn/
_op
s/cache_embedding/base_embedding.py
→
colossalai/nn/
parallel/layer
s/cache_embedding/base_embedding.py
View file @
30b4dd17
File moved
colossalai/nn/
_op
s/cache_embedding/cache_mgr.py
→
colossalai/nn/
parallel/layer
s/cache_embedding/cache_mgr.py
View file @
30b4dd17
File moved
colossalai/nn/
_op
s/cache_embedding/copyer.py
→
colossalai/nn/
parallel/layer
s/cache_embedding/copyer.py
View file @
30b4dd17
File moved
colossalai/nn/
_op
s/cache_embedding/freq_aware_embedding.py
→
colossalai/nn/
parallel/layer
s/cache_embedding/freq_aware_embedding.py
View file @
30b4dd17
File moved
colossalai/nn/
_op
s/cache_embedding/parallel_freq_aware_embedding.py
→
colossalai/nn/
parallel/layer
s/cache_embedding/parallel_freq_aware_embedding.py
View file @
30b4dd17
...
@@ -5,9 +5,9 @@ from typing import List, Optional, Iterator, Tuple
...
@@ -5,9 +5,9 @@ from typing import List, Optional, Iterator, Tuple
from
.base_embedding
import
BaseEmbeddingBag
from
.base_embedding
import
BaseEmbeddingBag
from
.cache_mgr
import
CachedParamMgr
from
.cache_mgr
import
CachedParamMgr
from
torch.nn.parameter
import
Parameter
from
torch.nn.parameter
import
Parameter
from
.
._utils
import
dual_all_to_all
from
colossalai.nn._ops
._utils
import
dual_all_to_all
from
colossalai.tensor
import
ColoParameter
,
ShardSpec
,
ComputeSpec
,
ComputePattern
,
ProcessGroup
,
ColoTensorSpec
from
colossalai.tensor
import
ColoParameter
,
ShardSpec
,
ComputePattern
,
ProcessGroup
,
ColoTensorSpec
def
get_partition
(
embedding_dim
,
rank
,
world_size
)
->
Tuple
[
int
,
int
,
bool
]:
def
get_partition
(
embedding_dim
,
rank
,
world_size
)
->
Tuple
[
int
,
int
,
bool
]:
...
...
tests/test_
op
s/test_cache_embedding.py
→
tests/test_
layer
s/test_cache_embedding.py
View file @
30b4dd17
import
pytest
import
pytest
from
functools
import
partial
from
functools
import
partial
import
torch
import
torch.multiprocessing
as
mp
import
numpy
as
np
import
numpy
as
np
import
random
import
random
import
torch
import
torch.multiprocessing
as
mp
import
colossalai
import
colossalai
from
colossalai.utils
import
free_port
from
colossalai.utils
import
free_port
from
colossalai.testing
import
rerun_if_address_is_in_use
from
colossalai.testing
import
rerun_if_address_is_in_use
from
colossalai.tensor
import
ColoParameter
,
ProcessGroup
,
ShardSpec
,
ComputePattern
,
ComputeSpec
from
colossalai.tensor
import
ColoParameter
,
ProcessGroup
,
ShardSpec
,
ComputePattern
,
ComputeSpec
from
colossalai.nn.
_ops.cache_embedding
import
CachedParamMgr
,
FreqAwareEmbeddingBag
,
ParallelFreqAwareEmbeddingBag
from
colossalai.nn.
parallel.layers
import
CachedParamMgr
,
FreqAwareEmbeddingBag
,
ParallelFreqAwareEmbeddingBag
NUM_EMBED
,
EMBED_DIM
=
10
,
8
NUM_EMBED
,
EMBED_DIM
=
10
,
8
BATCH_SIZE
=
8
BATCH_SIZE
=
8
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment