Unverified Commit b5f9e37c authored by Hongxin Liu's avatar Hongxin Liu Committed by GitHub
Browse files

[legacy] clean up legacy code (#4743)

* [legacy] remove outdated codes of pipeline (#4692)

* [legacy] remove cli of benchmark and update optim (#4690)

* [legacy] remove cli of benchmark and update optim

* [doc] fix cli doc test

* [legacy] fix engine clip grad norm

* [legacy] remove outdated colo tensor (#4694)

* [legacy] remove outdated colo tensor

* [test] fix test import

* [legacy] move outdated zero to legacy (#4696)

* [legacy] clean up utils (#4700)

* [legacy] clean up utils

* [example] update examples

* [legacy] clean up amp

* [legacy] fix amp module

* [legacy] clean up gpc (#4742)

* [legacy] clean up context

* [legacy] clean core, constants and global vars

* [legacy] refactor initialize

* [example] fix examples ci

* [example] fix examples ci

* [legacy] fix tests

* [example] fix gpt example

* [example] fix examples ci

* [devops] fix ci installation

* [example] fix examples ci
parent 32e7f994
......@@ -5,9 +5,9 @@ import torch.fx
import colossalai
from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
from colossalai.core import global_context as gpc
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.legacy.core import global_context as gpc
if AUTOCHUNK_AVAILABLE:
from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen
......
......@@ -7,8 +7,8 @@ from colossalai.testing import spawn
def check_process_group_mesh_with_gpc():
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
DP_DIM, PP_DIM, TP_DIM = 0, 1, 2
pg_mesh = ProcessGroupMesh(1, 2, 2)
......@@ -138,7 +138,7 @@ def run_dist(rank, world_size, port):
port=port,
host='localhost')
# TODO(ver217): this function should be removed when gpc is removed
check_process_group_mesh_with_gpc()
# check_process_group_mesh_with_gpc()
check_process_group_mesh_with_cases()
......
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
depth=2,
mode='2.5d'
)
)
......@@ -3,7 +3,6 @@ import torch
import torch.distributed as dist
from torch.distributed import ReduceOp
from colossalai.core import global_context as gpc
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.testing import rerun_if_address_is_in_use, spawn
......@@ -13,7 +12,7 @@ def check_layer(rank, world_size, port):
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
physical_mesh_id = torch.arange(0, 4)
assert rank == gpc.get_global_rank()
assert rank == dist.get_rank()
tensor_to_check = torch.tensor([2, 2, 2, 2]).cuda()
mesh_shape = (2, 2)
......@@ -27,8 +26,6 @@ def check_layer(rank, world_size, port):
dist.all_reduce(tensor, op=ReduceOp.SUM, group=pg)
assert tensor.equal(tensor_to_check)
gpc.destroy()
@pytest.mark.dist
@rerun_if_address_is_in_use()
......
......@@ -4,9 +4,9 @@ import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
import colossalai
from colossalai.core import global_context as gpc
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
try:
......
......@@ -2,9 +2,9 @@ import pytest
import torch
import colossalai
from colossalai.core import global_context as gpc
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
try:
......
......@@ -5,9 +5,9 @@ import torch
from torch.fx import GraphModule
import colossalai
from colossalai.core import global_context as gpc
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
try:
......
......@@ -5,9 +5,9 @@ import pytest
import torch
from torch.fx import symbolic_trace
from colossalai.core import global_context as gpc
from colossalai.fx.passes import column_shard_linear_pass
from colossalai.initialize import launch
from colossalai.legacy.core import global_context as gpc
from colossalai.logging import disable_existing_loggers
from colossalai.testing import clear_cache_before_run, rerun_if_address_is_in_use, spawn
......
import random
import numpy as np
import torch
from torch.fx import GraphModule
from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass
from colossalai.fx import ColoTracer
from colossalai.pipeline.middleware import Partition, PartitionInputVal, PartitionOutputVal, Topo
from colossalai.pipeline.middleware.adaptor import get_fx_topology
import random
import numpy as np
from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass
from colossalai.legacy.pipeline.middleware import Partition, PartitionInputVal, PartitionOutputVal, Topo
from colossalai.legacy.pipeline.middleware.adaptor import get_fx_topology
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
torch.manual_seed(MANUAL_SEED)
class MLP(torch.nn.Module):
def __init__(self, config={}):
super().__init__()
dim = config['dim']
......@@ -27,6 +31,7 @@ class MLP(torch.nn.Module):
x = layer(x)
return x
def split_model_and_get_DAG(model, data_gen):
model.eval()
......@@ -54,6 +59,7 @@ def split_model_and_get_DAG(model, data_gen):
return top_module, split_submodules[0]._topo
def check_input(top_module, input_partition: Partition):
partition_output = input_partition.get_output_vals()
arg_pos = 0
......@@ -66,6 +72,7 @@ def check_input(top_module, input_partition: Partition):
assert arg_pos == len(partition_output)
def check_submod(top_module, part_id, mid_partition: Partition):
partition_input = mid_partition.get_input_vals()
partition_output = mid_partition.get_output_vals()
......@@ -82,6 +89,7 @@ def check_submod(top_module, part_id, mid_partition: Partition):
assert len(partition_input) == len(cur_node.args)
assert len(partition_output) == len(cur_node.users)
def check_topo(top_module, topo: Topo):
input_partition = topo.get_input_partition()
mid_partitions = topo.get_mid_partitions()
......@@ -89,4 +97,3 @@ def check_topo(top_module, topo: Topo):
check_input(top_module, input_partition)
for part_id, submod in mid_partitions.items():
check_submod(top_module, part_id, submod)
\ No newline at end of file
......@@ -4,7 +4,7 @@ import pytest
import torch
import colossalai
from colossalai.amp import convert_to_apex_amp, convert_to_naive_amp
from colossalai.legacy.amp import convert_to_apex_amp, convert_to_naive_amp
from colossalai.testing import assert_close_loose, clear_cache_before_run, rerun_if_address_is_in_use, spawn
from tests.components_to_test.registry import non_distributed_component_funcs
......@@ -78,7 +78,7 @@ def run_naive_amp():
def run_dist(rank, world_size, port):
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
colossalai.legacy.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
run_naive_amp()
......
......@@ -4,7 +4,7 @@ import pytest
import torch
import colossalai
from colossalai.amp import convert_to_apex_amp, convert_to_torch_amp
from colossalai.legacy.amp import convert_to_apex_amp, convert_to_torch_amp
from colossalai.testing import assert_close_loose, clear_cache_before_run, rerun_if_address_is_in_use, spawn
from tests.components_to_test.registry import non_distributed_component_funcs
......@@ -78,7 +78,7 @@ def run_torch_amp():
def run_dist(rank, world_size, port):
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
colossalai.legacy.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
run_torch_amp()
......
import pytest
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.legacy.communication.p2p_v2 import _recv_object, _send_object
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, spawn
......
......@@ -2,10 +2,10 @@ import pytest
import torch
import torch.distributed as dist
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.legacy.communication import all_gather, all_reduce, reduce_scatter
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.utils import get_current_device
......
import pytest
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.legacy.communication.p2p import (
recv_backward,
recv_forward,
......@@ -12,6 +9,9 @@ from colossalai.legacy.communication.p2p import (
send_forward,
send_forward_recv_backward,
)
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = dict(parallel=dict(pipeline=2))
......
import pytest
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.legacy.communication.p2p_v2 import recv_backward, recv_forward, send_backward, send_forward
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, spawn
......
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from colossalai.context.parallel_context import global_context
__all__ = ['global_context']
\ No newline at end of file
parallel = dict(pipeline=dict(size=2), tensor=dict(size=4, mode='2d'))
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(pipeline=dict(size=2), tensor=dict(size=8, depth=2, mode='2.5d'))
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
mode='3d'
)
)
parallel = dict(pipeline=dict(size=2), tensor=dict(size=8, mode='3d'))
......@@ -6,11 +6,11 @@ from pathlib import Path
import pytest
import torch
from colossalai import launch
from colossalai.context import reset_seeds
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as tp_env
from colossalai.legacy import launch
from colossalai.legacy.context import reset_seeds
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.global_variables import tensor_parallel_env as tp_env
from colossalai.testing import free_port, rerun_if_address_is_in_use, spawn
CONFIG_PATH_LIST = list(Path(__file__).parent.glob('configs/*.py'))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment