train_sft.py 9.59 KB
Newer Older
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
1
import argparse
2
import math
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
3
4
5
6
7
8
import os

import loralib as lora
import torch
import torch.distributed as dist
from coati.dataset import DataCollatorForSupervisedDataset, SFTDataset, SupervisedDataset
9
from coati.models import convert_to_lora_module
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
10
from coati.trainer import SFTTrainer
11
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
12
13
14
15
from datasets import load_dataset
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
16
17
18
from transformers import AutoTokenizer, BloomConfig, BloomForCausalLM, BloomTokenizerFast, LlamaConfig, LlamaForCausalLM
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
19
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
20
21
from transformers.models.opt.configuration_opt import OPTConfig
from transformers.models.opt.modeling_opt import OPTForCausalLM
22
from transformers.trainer import get_scheduler
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
23
24
25
26
27
28
29
30

from colossalai.logging import get_dist_logger
from colossalai.nn.optimizer import HybridAdam
from colossalai.tensor import ColoParameter


def train(args):
    # configure strategy
31
    if args.strategy == 'ddp':
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
32
33
        strategy = DDPStrategy()
    elif args.strategy == 'colossalai_gemini':
34
35
        raise NotImplementedError(
            'Gemini is not supported .from_pretrained() yet. We will update this after checkpoint io is ready.')
36
        strategy = GeminiStrategy(placement_policy='cuda')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
37
    elif args.strategy == 'colossalai_zero2':
38
        strategy = LowLevelZeroStrategy(stage=2, placement_policy='cuda')
39
    elif args.strategy == 'colossalai_zero2_cpu':
40
        strategy = LowLevelZeroStrategy(stage=2, placement_policy='cpu')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
41
42
43
44
45
46
    else:
        raise ValueError(f'Unsupported strategy "{args.strategy}"')

    # configure model
    with strategy.model_init_context():
        if args.model == 'bloom':
47
48
            model = convert_to_lora_module(BloomForCausalLM.from_pretrained(args.pretrain),
                                           args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
49
        elif args.model == 'opt':
50
            model = convert_to_lora_module(OPTForCausalLM.from_pretrained(args.pretrain), args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
51
        elif args.model == 'gpt2':
52
            model = convert_to_lora_module(GPT2LMHeadModel.from_pretrained(args.pretrain), args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
53
        elif args.model == 'llama':
54
55
            model = convert_to_lora_module(LlamaForCausalLM.from_pretrained(args.pretrain),
                                           args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
56
57
        else:
            raise ValueError(f'Unsupported model "{args.model}"')
58
59
    if args.grad_checkpoint:
        model.gradient_checkpointing_enable()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
60
61
62
63
64
65

    # configure tokenizer
    if args.model == 'gpt2':
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        tokenizer.pad_token = tokenizer.eos_token
    elif args.model == 'bloom':
66
        tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
67
68
69
        tokenizer.pad_token = tokenizer.eos_token
    elif args.model == 'opt':
        tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
70
        tokenizer.pad_token = tokenizer.eos_token
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
71
72
73
74
75
76
    elif args.model == 'llama':
        tokenizer = AutoTokenizer.from_pretrained(
            args.pretrain,
            padding_side="right",
            use_fast=False,
        )
77
78
        tokenizer.eos_token = '</s>'
        tokenizer.pad_token = tokenizer.eos_token
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
79
80
81
    else:
        raise ValueError(f'Unsupported model "{args.model}"')

82
83
84
85
86
87
88
89
90
    if args.model == 'llama' and args.strategy == 'colossalai_gemini':
        # this is a hack to deal with the resized embedding
        # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatibility
        for name, param in model.named_parameters():
            if not isinstance(param, ColoParameter):
                sub_module_name = '.'.join(name.split('.')[:-1])
                weight_name = name.split('.')[-1]
                sub_module = model.get_submodule(sub_module_name)
                setattr(sub_module, weight_name, ColoParameter(param))
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
91
92
93
94
95
96
97
98
99
100
101
102
103
104

    # configure optimizer
    if args.strategy.startswith('colossalai'):
        optim = HybridAdam(model.parameters(), lr=args.lr, clipping_norm=1.0)
    else:
        optim = Adam(model.parameters(), lr=args.lr)

    logger = get_dist_logger()

    # configure dataset
    if args.dataset == 'yizhongw/self_instruct':
        train_data = load_dataset(args.dataset, 'super_natural_instructions', split='train')
        eval_data = load_dataset(args.dataset, 'super_natural_instructions', split='test')

105
106
        train_dataset = SFTDataset(train_data, tokenizer, args.max_len)
        eval_dataset = SFTDataset(eval_data, tokenizer, args.max_len)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
107
108
109
110

    else:
        train_dataset = SupervisedDataset(tokenizer=tokenizer,
                                          data_path=args.dataset,
111
                                          max_datasets_size=args.max_datasets_size,
112
                                          max_length=args.max_len)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
113
        eval_dataset = None
tingfeng cao's avatar
tingfeng cao committed
114
    data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149

    if dist.is_initialized() and dist.get_world_size() > 1:
        train_sampler = DistributedSampler(train_dataset,
                                           shuffle=True,
                                           seed=42,
                                           drop_last=True,
                                           rank=dist.get_rank(),
                                           num_replicas=dist.get_world_size())
        if eval_dataset is not None:
            eval_sampler = DistributedSampler(eval_dataset,
                                              shuffle=False,
                                              seed=42,
                                              drop_last=False,
                                              rank=dist.get_rank(),
                                              num_replicas=dist.get_world_size())
    else:
        train_sampler = None
        eval_sampler = None

    train_dataloader = DataLoader(train_dataset,
                                  shuffle=(train_sampler is None),
                                  sampler=train_sampler,
                                  batch_size=args.batch_size,
                                  collate_fn=data_collator,
                                  pin_memory=True)
    if eval_dataset is not None:
        eval_dataloader = DataLoader(eval_dataset,
                                     shuffle=(eval_sampler is None),
                                     sampler=eval_sampler,
                                     batch_size=args.batch_size,
                                     collate_fn=data_collator,
                                     pin_memory=True)
    else:
        eval_dataloader = None

150
151
152
153
154
155
    num_update_steps_per_epoch = len(train_dataloader) // args.accumulation_steps
    max_steps = math.ceil(args.max_epochs * num_update_steps_per_epoch)
    lr_scheduler = get_scheduler("cosine",
                                 optim,
                                 num_warmup_steps=math.ceil(max_steps * 0.03),
                                 num_training_steps=max_steps)
156
    strategy_dict = strategy.prepare(dict(model=model, optimizer=optim, lr_scheduler=lr_scheduler))
157
158
159
    model = strategy_dict['model']
    optim = strategy_dict['optimizer']
    lr_scheduler = strategy_dict['lr_scheduler']
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
160
161
162
    trainer = SFTTrainer(model=model,
                         strategy=strategy,
                         optim=optim,
163
                         lr_scheduler=lr_scheduler,
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
164
                         max_epochs=args.max_epochs,
165
                         accumulation_steps=args.accumulation_steps)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
166

167
168
169
170
    trainer.fit(train_dataloader=train_dataloader,
                eval_dataloader=eval_dataloader,
                logger=logger,
                use_wandb=args.use_wandb)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
171
172

    # save model checkpoint after fitting on only rank0
173
    strategy.save_pretrained(model, path=args.save_path, only_rank0=True, tokenizer=tokenizer)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
174
175
176
177
178
179
180
181
182
183
    # save optimizer checkpoint on all ranks
    if args.need_optim_ckpt:
        strategy.save_optimizer(trainer.optimizer,
                                'rm_optim_checkpoint_%d.pt' % (torch.cuda.current_device()),
                                only_rank0=False)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--strategy',
184
                        choices=['ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_zero2_cpu'],
185
                        default='colossalai_zero2')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
186
187
188
189
190
191
192
193
    parser.add_argument('--model', choices=['gpt2', 'bloom', 'opt', 'llama'], default='bloom')
    parser.add_argument('--pretrain', type=str, default=None)
    parser.add_argument('--dataset', type=str, default=None)
    parser.add_argument('--max_datasets_size', type=int, default=None)
    parser.add_argument('--save_path', type=str, default='output')
    parser.add_argument('--need_optim_ckpt', type=bool, default=False)
    parser.add_argument('--max_epochs', type=int, default=3)
    parser.add_argument('--batch_size', type=int, default=4)
194
    parser.add_argument('--max_len', type=int, default=512)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
195
196
197
    parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
    parser.add_argument('--log_interval', type=int, default=100, help="how many steps to log")
    parser.add_argument('--lr', type=float, default=5e-6)
198
    parser.add_argument('--accumulation_steps', type=int, default=8)
Hongxin Liu's avatar
Hongxin Liu committed
199
    parser.add_argument('--use_wandb', default=False, action='store_true')
200
    parser.add_argument('--grad_checkpoint', default=False, action='store_true')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
201
202
    args = parser.parse_args()
    train(args)