Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
bf5066fb
Unverified
Commit
bf5066fb
authored
Jul 19, 2022
by
HELSON
Committed by
GitHub
Jul 19, 2022
Browse files
[refactor] refactor ColoTensor's unit tests (#1340)
parent
f92c100d
Changes
16
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
11 additions
and
13 deletions
+11
-13
tests/test_tensor/_utils/__init__.py
tests/test_tensor/_utils/__init__.py
+0
-1
tests/test_tensor/common_utils/__init__.py
tests/test_tensor/common_utils/__init__.py
+1
-0
tests/test_tensor/common_utils/_utils.py
tests/test_tensor/common_utils/_utils.py
+0
-0
tests/test_tensor/core/test_dist_spec_mgr.py
tests/test_tensor/core/test_dist_spec_mgr.py
+0
-0
tests/test_tensor/core/test_tensor.py
tests/test_tensor/core/test_tensor.py
+0
-0
tests/test_tensor/model/test_gpt2.py
tests/test_tensor/model/test_gpt2.py
+1
-1
tests/test_tensor/model/test_model.py
tests/test_tensor/model/test_model.py
+2
-4
tests/test_tensor/model/test_module_spec.py
tests/test_tensor/model/test_module_spec.py
+1
-1
tests/test_tensor/ops/test_addmm_tp.py
tests/test_tensor/ops/test_addmm_tp.py
+1
-1
tests/test_tensor/ops/test_embedding_bag_tp.py
tests/test_tensor/ops/test_embedding_bag_tp.py
+1
-1
tests/test_tensor/ops/test_embedding_tp.py
tests/test_tensor/ops/test_embedding_tp.py
+1
-1
tests/test_tensor/ops/test_linear_tp.py
tests/test_tensor/ops/test_linear_tp.py
+1
-1
tests/test_tensor/ops/test_loss_func.py
tests/test_tensor/ops/test_loss_func.py
+0
-0
tests/test_tensor/ops/test_op.py
tests/test_tensor/ops/test_op.py
+0
-0
tests/test_tensor/test_parameter.py
tests/test_tensor/test_parameter.py
+1
-1
tests/test_tensor/test_zero_optim.py
tests/test_tensor/test_zero_optim.py
+1
-1
No files found.
tests/test_tensor/_utils/__init__.py
deleted
100644 → 0
View file @
f92c100d
from
._util
import
*
\ No newline at end of file
tests/test_tensor/common_utils/__init__.py
0 → 100644
View file @
bf5066fb
from
._utils
import
*
tests/test_tensor/_utils/_util.py
→
tests/test_tensor/
common
_utils/_util
s
.py
View file @
bf5066fb
File moved
tests/test_tensor/test_dist_spec_mgr.py
→
tests/test_tensor/
core/
test_dist_spec_mgr.py
View file @
bf5066fb
File moved
tests/test_tensor/test_tensor.py
→
tests/test_tensor/
core/
test_tensor.py
View file @
bf5066fb
File moved
tests/test_tensor/test_gpt2.py
→
tests/test_tensor/
model/
test_gpt2.py
View file @
bf5066fb
import
pytest
from
functools
import
partial
from
_utils
import
tensor_equal
,
tensor_shard_equal
,
set_seed
from
tests.test_tensor.common
_utils
import
tensor_equal
,
tensor_shard_equal
,
set_seed
import
torch
from
torch.nn.parallel
import
DistributedDataParallel
as
DDP
...
...
tests/test_tensor/test_model.py
→
tests/test_tensor/
model/
test_model.py
View file @
bf5066fb
import
pytest
from
functools
import
partial
from
_utils
import
tensor_shard_equal
,
set_seed
import
torch
import
torch.multiprocessing
as
mp
...
...
@@ -15,7 +13,8 @@ from colossalai.tensor import ColoTensor, ProcessGroup
from
colossalai.nn.optimizer
import
ColossalaiOptimizer
from
tests.components_to_test.registry
import
non_distributed_component_funcs
from
_utils
import
split_param_row_tp1d
,
split_param_col_tp1d
from
tests.test_tensor.common_utils
import
tensor_shard_equal
,
check_equal
,
set_seed
,
\
split_param_row_tp1d
,
split_param_col_tp1d
def
run_1d_hybrid_tp
(
model_name
):
...
...
@@ -264,7 +263,6 @@ def run_1d_row_tp(model_name: str):
def
_run_pretrain_load
():
from
_utils
import
check_equal
from
transformers
import
BertForMaskedLM
set_seed
(
1
)
model_pretrained
=
BertForMaskedLM
.
from_pretrained
(
'bert-base-uncased'
)
...
...
tests/test_tensor/test_module_spec.py
→
tests/test_tensor/
model/
test_module_spec.py
View file @
bf5066fb
...
...
@@ -7,7 +7,7 @@ import torch.multiprocessing as mp
from
colossalai.tensor
import
ColoTensor
,
ComputePattern
,
ComputeSpec
,
ShardSpec
,
ColoTensorSpec
from
colossalai.nn.parallel.layers
import
init_colo_module
,
check_colo_module
from
_utils
import
tensor_equal
,
tensor_shard_equal
,
set_seed
from
tests.test_tensor.common
_utils
import
tensor_equal
,
tensor_shard_equal
,
set_seed
import
colossalai
from
colossalai.utils.cuda
import
get_current_device
...
...
tests/test_tensor/test_addmm_tp.py
→
tests/test_tensor/
ops/
test_addmm_tp.py
View file @
bf5066fb
...
...
@@ -8,7 +8,7 @@ from colossalai.tensor import ColoTensorSpec
from
colossalai.testing
import
rerun_if_address_is_in_use
from
colossalai.utils
import
free_port
from
functools
import
partial
from
_utils
import
tensor_shard_equal
,
tensor_equal
,
split_param_row_tp1d
,
split_param_col_tp1d
from
tests.test_tensor.common
_utils
import
tensor_shard_equal
,
tensor_equal
,
split_param_row_tp1d
,
split_param_col_tp1d
class
Conv1D
(
nn
.
Module
):
...
...
tests/test_tensor/test_embedding_bag_tp.py
→
tests/test_tensor/
ops/
test_embedding_bag_tp.py
View file @
bf5066fb
...
...
@@ -8,7 +8,7 @@ import torch.multiprocessing as mp
from
colossalai.testing
import
rerun_if_address_is_in_use
from
colossalai.utils
import
free_port
from
colossalai.tensor
import
ColoParameter
,
ColoTensorSpec
,
ProcessGroup
from
_utils
import
tensor_equal
,
tensor_shard_equal
,
split_param_col_tp1d
from
tests.test_tensor.common
_utils
import
tensor_equal
,
tensor_shard_equal
,
split_param_col_tp1d
def
run_with_spec
(
spec_init_func
):
...
...
tests/test_tensor/test_embedding_tp.py
→
tests/test_tensor/
ops/
test_embedding_tp.py
View file @
bf5066fb
...
...
@@ -8,7 +8,7 @@ import torch.multiprocessing as mp
from
colossalai.testing
import
rerun_if_address_is_in_use
from
colossalai.utils
import
free_port
from
colossalai.tensor
import
ColoTensorSpec
,
ProcessGroup
,
ColoTensor
from
_utils
import
tensor_equal
,
tensor_shard_equal
,
split_param_col_tp1d
,
split_param_row_tp1d
from
tests.test_tensor.common
_utils
import
tensor_equal
,
tensor_shard_equal
,
split_param_col_tp1d
,
split_param_row_tp1d
def
run_with_spec
(
spec_init_func
,
pg
:
ProcessGroup
):
...
...
tests/test_tensor/test_linear_tp.py
→
tests/test_tensor/
ops/
test_linear_tp.py
View file @
bf5066fb
...
...
@@ -8,7 +8,7 @@ import torch.nn.functional as F
from
colossalai.testing
import
rerun_if_address_is_in_use
from
colossalai.utils
import
free_port
from
colossalai.tensor
import
ColoTensorSpec
,
ProcessGroup
,
ColoTensor
from
_utils
import
tensor_equal
,
tensor_shard_equal
,
split_param_col_tp1d
,
split_param_row_tp1d
from
tests.test_tensor.common
_utils
import
tensor_equal
,
tensor_shard_equal
,
split_param_col_tp1d
,
split_param_row_tp1d
def
run_with_spec
(
spec_init_func
,
split_bias
):
...
...
tests/test_tensor/test_loss_func.py
→
tests/test_tensor/
ops/
test_loss_func.py
View file @
bf5066fb
File moved
tests/test_tensor/test_op.py
→
tests/test_tensor/
ops/
test_op.py
View file @
bf5066fb
File moved
tests/test_tensor/test_parameter.py
View file @
bf5066fb
from
colossalai.tensor
import
ColoParameter
,
ColoTensor
,
ColoTensorSpec
,
ProcessGroup
import
torch
import
pytest
from
_utils
import
tensor_equal
from
common
_utils
import
tensor_equal
import
colossalai
from
colossalai.utils
import
free_port
...
...
tests/test_tensor/test_zero_optim.py
View file @
bf5066fb
...
...
@@ -8,7 +8,7 @@ from colossalai.utils import free_port
from
colossalai.utils.model.colo_init_context
import
ColoInitContext
from
colossalai.gemini
import
ChunkManager
from
functools
import
partial
from
_utils
import
tensor_equal
,
set_seed
,
tensor_shard_equal
from
tests.test_tensor.common
_utils
import
tensor_equal
,
set_seed
,
tensor_shard_equal
from
tests.components_to_test.registry
import
non_distributed_component_funcs
from
torch.nn.parallel
import
DistributedDataParallel
as
DDP
from
colossalai.nn.parallel
import
ZeroDDP
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment