"applications/Colossal-LLaMA/README.md" did not exist on "e094933da1d0a574eda105ab6ec0f171d8ddaebb"
train_sft.py 9.87 KB
Newer Older
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
1
import argparse
2
import math
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
3
4
5
6
7
8
import os

import loralib as lora
import torch
import torch.distributed as dist
from coati.dataset import DataCollatorForSupervisedDataset, SFTDataset, SupervisedDataset
9
from coati.models import convert_to_lora_module
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
10
11
12
13
14
15
16
from coati.trainer import SFTTrainer
from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy
from coati.utils import prepare_llama_tokenizer_and_embedding
from datasets import load_dataset
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
17
18
19
from transformers import AutoTokenizer, BloomConfig, BloomForCausalLM, BloomTokenizerFast, LlamaConfig, LlamaForCausalLM
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
20
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
21
22
from transformers.models.opt.configuration_opt import OPTConfig
from transformers.models.opt.modeling_opt import OPTForCausalLM
23
from transformers.trainer import get_scheduler
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
24
25
26
27
28
29
30
31
32
33
34
35
36

from colossalai.logging import get_dist_logger
from colossalai.nn.optimizer import HybridAdam
from colossalai.tensor import ColoParameter


def train(args):
    # configure strategy
    if args.strategy == 'naive':
        strategy = NaiveStrategy()
    elif args.strategy == 'ddp':
        strategy = DDPStrategy()
    elif args.strategy == 'colossalai_gemini':
37
38
        raise NotImplementedError(
            'Gemini is not supported .from_pretrained() yet. We will update this after checkpoint io is ready.')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
39
40
41
        strategy = ColossalAIStrategy(stage=3, placement_policy='cuda')
    elif args.strategy == 'colossalai_zero2':
        strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')
42
43
    elif args.strategy == 'colossalai_zero2_cpu':
        strategy = ColossalAIStrategy(stage=2, placement_policy='cpu')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
44
45
46
47
48
49
    else:
        raise ValueError(f'Unsupported strategy "{args.strategy}"')

    # configure model
    with strategy.model_init_context():
        if args.model == 'bloom':
50
51
            model = convert_to_lora_module(BloomForCausalLM.from_pretrained(args.pretrain),
                                           args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
52
        elif args.model == 'opt':
53
            model = convert_to_lora_module(OPTForCausalLM.from_pretrained(args.pretrain), args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
54
        elif args.model == 'gpt2':
55
            model = convert_to_lora_module(GPT2LMHeadModel.from_pretrained(args.pretrain), args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
56
        elif args.model == 'llama':
57
58
            model = convert_to_lora_module(LlamaForCausalLM.from_pretrained(args.pretrain),
                                           args.lora_rank).half().cuda()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
59
60
        else:
            raise ValueError(f'Unsupported model "{args.model}"')
61
62
    if args.grad_checkpoint:
        model.gradient_checkpointing_enable()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82

    # configure tokenizer
    if args.model == 'gpt2':
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        tokenizer.pad_token = tokenizer.eos_token
    elif args.model == 'bloom':
        tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain)
        tokenizer.pad_token = tokenizer.eos_token
    elif args.model == 'opt':
        tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
    elif args.model == 'llama':
        tokenizer = AutoTokenizer.from_pretrained(
            args.pretrain,
            padding_side="right",
            use_fast=False,
        )
        tokenizer.eos_token = '<\s>'
    else:
        raise ValueError(f'Unsupported model "{args.model}"')
    tokenizer.pad_token = tokenizer.eos_token
83
    max_len = args.max_len
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
84
85
86
87
88
    if args.model == 'llama':
        tokenizer = prepare_llama_tokenizer_and_embedding(tokenizer, model)

        if args.strategy == 'colossalai_gemini':
            # this is a hack to deal with the resized embedding
89
            # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatibility
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
            for name, param in model.named_parameters():
                if not isinstance(param, ColoParameter):
                    sub_module_name = '.'.join(name.split('.')[:-1])
                    weight_name = name.split('.')[-1]
                    sub_module = model.get_submodule(sub_module_name)
                    setattr(sub_module, weight_name, ColoParameter(param))
    else:
        tokenizer.pad_token = tokenizer.eos_token

    # configure optimizer
    if args.strategy.startswith('colossalai'):
        optim = HybridAdam(model.parameters(), lr=args.lr, clipping_norm=1.0)
    else:
        optim = Adam(model.parameters(), lr=args.lr)

    logger = get_dist_logger()

    # configure dataset
    if args.dataset == 'yizhongw/self_instruct':
        train_data = load_dataset(args.dataset, 'super_natural_instructions', split='train')
        eval_data = load_dataset(args.dataset, 'super_natural_instructions', split='test')

112
113
        train_dataset = SFTDataset(train_data, tokenizer, max_len)
        eval_dataset = SFTDataset(eval_data, tokenizer, max_len)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
114
115
116
117

    else:
        train_dataset = SupervisedDataset(tokenizer=tokenizer,
                                          data_path=args.dataset,
118
119
                                          max_datasets_size=args.max_datasets_size,
                                          max_length=max_len)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
120
        eval_dataset = None
tingfeng cao's avatar
tingfeng cao committed
121
    data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156

    if dist.is_initialized() and dist.get_world_size() > 1:
        train_sampler = DistributedSampler(train_dataset,
                                           shuffle=True,
                                           seed=42,
                                           drop_last=True,
                                           rank=dist.get_rank(),
                                           num_replicas=dist.get_world_size())
        if eval_dataset is not None:
            eval_sampler = DistributedSampler(eval_dataset,
                                              shuffle=False,
                                              seed=42,
                                              drop_last=False,
                                              rank=dist.get_rank(),
                                              num_replicas=dist.get_world_size())
    else:
        train_sampler = None
        eval_sampler = None

    train_dataloader = DataLoader(train_dataset,
                                  shuffle=(train_sampler is None),
                                  sampler=train_sampler,
                                  batch_size=args.batch_size,
                                  collate_fn=data_collator,
                                  pin_memory=True)
    if eval_dataset is not None:
        eval_dataloader = DataLoader(eval_dataset,
                                     shuffle=(eval_sampler is None),
                                     sampler=eval_sampler,
                                     batch_size=args.batch_size,
                                     collate_fn=data_collator,
                                     pin_memory=True)
    else:
        eval_dataloader = None

157
158
159
160
161
162
163
164
165
166
167
168
    num_update_steps_per_epoch = len(train_dataloader) // args.accumulation_steps
    max_steps = math.ceil(args.max_epochs * num_update_steps_per_epoch)
    lr_scheduler = get_scheduler("cosine",
                                 optim,
                                 num_warmup_steps=math.ceil(max_steps * 0.03),
                                 num_training_steps=max_steps)
    strategy_dict = strategy.prepare(
        dict(model=model, optimizer=optim, lr_scheduler=lr_scheduler)
    )
    model = strategy_dict['model']
    optim = strategy_dict['optimizer']
    lr_scheduler = strategy_dict['lr_scheduler']
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
169
170
171
    trainer = SFTTrainer(model=model,
                         strategy=strategy,
                         optim=optim,
172
                         lr_scheduler=lr_scheduler,
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
173
                         max_epochs=args.max_epochs,
174
                         accumulation_steps=args.accumulation_steps)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
175

176
177
178
179
    trainer.fit(train_dataloader=train_dataloader,
                eval_dataloader=eval_dataloader,
                logger=logger,
                use_wandb=args.use_wandb)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
180
181

    # save model checkpoint after fitting on only rank0
182
    strategy.save_pretrained(model, path=args.save_path, only_rank0=True, tokenizer=tokenizer)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
183
184
185
186
187
188
189
190
191
192
    # save optimizer checkpoint on all ranks
    if args.need_optim_ckpt:
        strategy.save_optimizer(trainer.optimizer,
                                'rm_optim_checkpoint_%d.pt' % (torch.cuda.current_device()),
                                only_rank0=False)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--strategy',
193
                        choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_zero2_cpu'],
194
                        default='colossalai_zero2')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
195
196
197
198
199
200
201
202
    parser.add_argument('--model', choices=['gpt2', 'bloom', 'opt', 'llama'], default='bloom')
    parser.add_argument('--pretrain', type=str, default=None)
    parser.add_argument('--dataset', type=str, default=None)
    parser.add_argument('--max_datasets_size', type=int, default=None)
    parser.add_argument('--save_path', type=str, default='output')
    parser.add_argument('--need_optim_ckpt', type=bool, default=False)
    parser.add_argument('--max_epochs', type=int, default=3)
    parser.add_argument('--batch_size', type=int, default=4)
203
    parser.add_argument('--max_len', type=int, default=512)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
204
205
206
    parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
    parser.add_argument('--log_interval', type=int, default=100, help="how many steps to log")
    parser.add_argument('--lr', type=float, default=5e-6)
207
    parser.add_argument('--accumulation_steps', type=int, default=8)
Hongxin Liu's avatar
Hongxin Liu committed
208
    parser.add_argument('--use_wandb', default=False, action='store_true')
209
    parser.add_argument('--grad_checkpoint', default=False, action='store_true')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
210
211
    args = parser.parse_args()
    train(args)