Unverified Commit b5f9e37c authored by Hongxin Liu's avatar Hongxin Liu Committed by GitHub
Browse files

[legacy] clean up legacy code (#4743)

* [legacy] remove outdated codes of pipeline (#4692)

* [legacy] remove cli of benchmark and update optim (#4690)

* [legacy] remove cli of benchmark and update optim

* [doc] fix cli doc test

* [legacy] fix engine clip grad norm

* [legacy] remove outdated colo tensor (#4694)

* [legacy] remove outdated colo tensor

* [test] fix test import

* [legacy] move outdated zero to legacy (#4696)

* [legacy] clean up utils (#4700)

* [legacy] clean up utils

* [example] update examples

* [legacy] clean up amp

* [legacy] fix amp module

* [legacy] clean up gpc (#4742)

* [legacy] clean up context

* [legacy] clean core, constants and global vars

* [legacy] refactor initialize

* [example] fix examples ci

* [example] fix examples ci

* [legacy] fix tests

* [example] fix gpt example

* [example] fix examples ci

* [devops] fix ci installation

* [example] fix examples ci
parent 32e7f994
......@@ -23,8 +23,8 @@ from transformers import AutoTokenizer, PretrainedConfig
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn.optimizer import HybridAdam
from colossalai.utils import get_current_device
......
......@@ -7,8 +7,8 @@ import transformers
from gpt_modules import GPT2LMHeadModel, GPTLMLoss
from colossalai.auto_parallel.tensor_shard.initialize import autoparallelize
from colossalai.core import global_context as gpc
from colossalai.initialize import launch_from_torch
from colossalai.legacy.core import global_context as gpc
from colossalai.logging import disable_existing_loggers, get_dist_logger
BATCH_SIZE = 16
......
......@@ -3,7 +3,6 @@ import time
from functools import partial
import torch
from model_zoo import model_builder
from torch import nn
from tqdm import tqdm
......@@ -14,11 +13,12 @@ from colossalai.fx.passes.adding_split_node_pass import (
split_with_split_nodes_pass,
)
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.legacy.pipeline.middleware.adaptor import get_fx_topology
from colossalai.legacy.pipeline.rpc._pipeline_schedule import FillDrainPipelineEngine, OneFOneBPipelineEngine
from colossalai.legacy.pipeline.rpc.utils import rpc_run
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn.optimizer import HybridAdam
from colossalai.pipeline.middleware.adaptor import get_fx_topology
from colossalai.pipeline.rpc._pipeline_schedule import FillDrainPipelineEngine, OneFOneBPipelineEngine
from colossalai.pipeline.rpc.utils import rpc_run
from model_zoo import model_builder
def parse_args():
......
......@@ -9,11 +9,6 @@ export MODEL_TYPE=${MODEL_TYPE:-"gpt2_medium"}
export TRAIN_STEP=${TRAIN_STEP:-10}
# export PYTHONPATH=$PWD:$PYTHONPATH
if [ ${USE_SHARD_INIT} = "True" ]; then
USE_SHARD_INIT="--shardinit"
else
USE_SHARD_INIT=""
fi
mkdir -p gemini_logs
......@@ -22,4 +17,4 @@ torchrun --standalone --nproc_per_node=${GPUNUM} ./train_gpt_demo.py \
--batch_size=${BATCH_SIZE} \
--distplan=${DISTPLAN} \
--train_step=${TRAIN_STEP} \
2>&1 | tee ./gemini_logs/${MODEL_TYPE}_${DISTPLAN}_gpu_${GPUNUM}_bs_${BATCH_SIZE}_tp_${TPDEGREE}_${PLACEMENT}.log
2>&1 | tee ./gemini_logs/${MODEL_TYPE}_${DISTPLAN}_gpu_${GPUNUM}_bs_${BATCH_SIZE}.log
import argparse
import os
from contextlib import nullcontext
from functools import partial
......@@ -9,7 +10,6 @@ import torch.nn as nn
from commons.model_zoo import model_builder
from commons.utils import get_data, get_profile_context, get_tflops, get_time_stamp
from packaging import version
from torch.nn.parallel import DistributedDataParallel as DDP
import colossalai
from colossalai.booster import Booster
......@@ -23,7 +23,7 @@ CAI_VERSION = colossalai.__version__
def parse_args():
parser = colossalai.get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument(
"--distplan",
type=str,
......
......@@ -2,4 +2,4 @@ set -x
pip install -r requirements.txt
cd gemini && bash test_ci.sh
cd ../hybridparallelism && bash run.sh
# cd ../hybridparallelism && bash run.sh
......@@ -6,8 +6,8 @@ from torch import nn as nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from colossalai.context import ParallelMode, seed
from colossalai.core import global_context as gpc
from colossalai.legacy.context import ParallelMode, seed
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn.layer.base_layer import ParallelLayer
from colossalai.legacy.nn.layer.parallel_1d._utils import gather_forward_split_backward, reduce_grad, reduce_input
from colossalai.legacy.nn.layer.parallel_1d.layers import Linear1D_Row
......
......@@ -9,13 +9,13 @@ from torch import nn as nn
from colossalai import kernel
from colossalai import nn as col_nn
from colossalai.core import global_context as gpc
from colossalai.kernel.cuda_native.scaled_softmax import AttnMaskType
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn.layer import Linear1D_Col, Linear1D_Row
from colossalai.legacy.nn.layer.base_layer import ParallelLayer
from colossalai.legacy.nn.layer.utils import ACT2FN, divide
from colossalai.legacy.utils.activation_checkpoint import checkpoint
from colossalai.utils import checkpoint
from colossalai.utils.activation_checkpoint import checkpoint
__all__ = [
'GPTMLP1D', 'GPTSelfAttention1D', 'GPTTransformerLayer1D', 'FusedGPTSelfAttention1D', 'FusedGPTTransformerLayer1D'
......
......@@ -7,11 +7,11 @@ import torch.nn as nn
from colossalai import kernel
from colossalai import nn as col_nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn.layer.wrapper import PipelineSharedModuleWrapper
from colossalai.legacy.pipeline.utils import partition_uniform
from colossalai.logging import get_dist_logger
from colossalai.pipeline.utils import partition_uniform
from .embed import HiddenParallelEmbedding, HiddenParallelGPTLMHead1D, VocabParallelEmbedding, VocabParallelGPTLMHead1D
from .gpt1d import FusedGPTTransformerLayer1D, GPTTransformerLayer1D
......
......@@ -8,14 +8,14 @@ from titans.model.gpt import GPTLMLoss
import colossalai
import colossalai.utils as utils
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.trainer import Trainer, hooks
from colossalai.legacy.zero.init_ctx import ZeroInitContext
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn import LinearWarmupLR
from colossalai.utils import colo_set_process_memory_fraction, is_using_pp
from colossalai.utils.timer import MultiTimer
from colossalai.zero.legacy.init_ctx import ZeroInitContext
def calc_local_model_size(model: torch.nn.Module):
......
......@@ -4,8 +4,8 @@ from tqdm import tqdm
import colossalai
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
from colossalai.core import global_context as gpc
from colossalai.device.device_mesh import DeviceMesh
from colossalai.legacy.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.nn.lr_scheduler import CosineAnnealingLR
......
#!/bin/bash
set -euxo pipefail
pip install -r requirements.txt
conda install -c conda-forge coin-or-cbc
colossalai run --nproc_per_node 4 auto_parallel_with_resnet.py
echo "this test is outdated"
# pip install -r requirements.txt
# conda install -c conda-forge coin-or-cbc
# colossalai run --nproc_per_node 4 auto_parallel_with_resnet.py
from colossalai.amp import AMP_TYPE
from colossalai.legacy.amp import AMP_TYPE
# hyperparameters
# BATCH_SIZE is as per GPU
......
......@@ -5,12 +5,12 @@ from titans.model.vit.vit import _create_vit_model
from tqdm import tqdm
import colossalai
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn import CrossEntropyLoss
from colossalai.legacy.pipeline.pipelinable import PipelinableContext
from colossalai.logging import get_dist_logger
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.pipeline.pipelinable import PipelinableContext
from colossalai.utils import is_using_pp
......
from colossalai.amp import AMP_TYPE
from colossalai.legacy.amp import AMP_TYPE
# hyperparameters
# BATCH_SIZE is as per GPU
......
#!/bin/bash
set -euxo pipefail
echo "this test is outdated"
pip install -r requirements.txt
# pip install -r requirements.txt
# run test
colossalai run --nproc_per_node 4 --master_port 29500 train.py --config config.py --optimizer lars
colossalai run --nproc_per_node 4 --master_port 29501 train.py --config config.py --optimizer lamb
# colossalai run --nproc_per_node 4 --master_port 29500 train.py --config config.py --optimizer lars
# colossalai run --nproc_per_node 4 --master_port 29501 train.py --config config.py --optimizer lamb
......@@ -4,7 +4,7 @@ from torchvision.models import resnet18
from tqdm import tqdm
import colossalai
from colossalai.core import global_context as gpc
from colossalai.legacy.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.nn.optimizer import Lamb, Lars
......
......@@ -2,7 +2,7 @@ try:
from colossalai.zero.shard_utils import TensorShardStrategy
except ImportError:
# colossalai > 0.2.8
from colossalai.zero.legacy import TensorShardStrategy
from colossalai.legacy.zero import TensorShardStrategy
zero = dict(model_config=dict(shard_strategy=TensorShardStrategy(),
tensor_placement_policy="auto",
......
import torch.distributed as dist
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
class barrier_context():
......
......@@ -51,12 +51,13 @@ from transformers import (
from transformers.utils.versions import require_version
import colossalai
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.tensor import ProcessGroup
from colossalai.legacy.utils import get_dataloader
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn.optimizer import HybridAdam
from colossalai.tensor import ProcessGroup
from colossalai.utils import get_current_device, get_dataloader
from colossalai.utils import get_current_device
from colossalai.zero import GeminiOptimizer
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment