finetune.py 12.1 KB
Newer Older
1
import argparse
2
3
from contextlib import nullcontext
from typing import Callable, List, Union
4
5
6
7
8
9

import evaluate
import torch
import torch.distributed as dist
import torch.nn as nn
from data import GLUEDataBuilder
10
11
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
12
13
14
15
16
17
18
19
20
21
22
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
    AlbertForSequenceClassification,
    AutoConfig,
    BertForSequenceClassification,
    get_linear_schedule_with_warmup,
)

import colossalai
from colossalai.booster import Booster
23
from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin
24
from colossalai.cluster import DistCoordinator
25
from colossalai.lazy import LazyInitContext
26
27
28
29
30
31
32
33
34
35
36
37
from colossalai.nn.optimizer import HybridAdam
from colossalai.utils import get_current_device

# ==============================
# Prepare Hyperparameters
# ==============================
NUM_EPOCHS = 3
BATCH_SIZE = 32
LEARNING_RATE = 2.4e-5
WEIGHT_DECAY = 0.01
WARMUP_FRACTION = 0.1

38
39
40
output_transform_fn = lambda x: x
criterion = lambda x: x.loss

41
42
43
44
45
46

def move_to_cuda(batch):
    return {k: v.cuda() for k, v in batch.items()}


@torch.no_grad()
47
48
49
50
51
52
53
54
55
56
57
def evaluate_model(
    model: nn.Module,
    optimizer,
    criterion,
    test_dataloader: Union[DataLoader, List[DataLoader]],
    num_labels: int,
    task_name: str,
    eval_splits: List[str],
    booster: Booster,
    coordinator: DistCoordinator,
):
58
59
60
61
62
63
64
65
    metric = evaluate.load("glue", task_name, process_id=coordinator.rank, num_process=coordinator.world_size)
    model.eval()

    def evaluate_subset(dataloader: DataLoader):
        accum_loss = torch.zeros(1, device=get_current_device())
        for batch in dataloader:
            batch = move_to_cuda(batch)
            labels = batch["labels"]
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
            batch_size = batch["input_ids"].shape[0]
            if hasattr(booster.plugin, "stage_manager") and booster.plugin.stage_manager is not None:
                pg_mesh = booster.plugin.pg_mesh
                pp_group = booster.plugin.pp_group
                current_pp_group_ranks = pg_mesh.get_ranks_in_group(pp_group)
                current_rank = dist.get_rank()
                #TODO pass dataloader to execute_pipeline directly
                batch = iter([batch])
                outputs = booster.execute_pipeline(batch,
                                                   model,
                                                   criterion,
                                                   optimizer,
                                                   return_loss=True,
                                                   return_outputs=True)

                if booster.plugin.stage_manager.is_last_stage():
                    val_loss = outputs["loss"]

                    logits = outputs["outputs"]["logits"]

                    accum_loss.add_(val_loss)

                    if num_labels > 1:
                        preds = torch.argmax(logits, axis=1)
                    elif num_labels == 1:
                        preds = logits.squeeze()

                    dist.broadcast(preds, src=current_rank, group=pp_group)
                    dist.broadcast(val_loss, src=current_rank, group=pp_group)

                    metric.add_batch(predictions=preds, references=labels)
                elif current_rank in current_pp_group_ranks:
                    val_loss = torch.empty((1,), device=get_current_device())
                    preds = torch.empty((batch_size,), dtype=torch.int64, device=get_current_device())

                    dist.broadcast(preds, src=current_pp_group_ranks[-1], group=pp_group)
                    dist.broadcast(val_loss, src=current_pp_group_ranks[-1], group=pp_group)

                    accum_loss.add_(val_loss)
                    metric.add_batch(predictions=preds, references=labels)

            else:
                batch = move_to_cuda(batch)
                outputs = model(**batch)
                val_loss, logits = outputs[:2]
                accum_loss.add_(val_loss)

                if num_labels > 1:
                    preds = torch.argmax(logits, axis=1)
                elif num_labels == 1:
                    preds = logits.squeeze()

                metric.add_batch(predictions=preds, references=labels)
119
120
121

        results = metric.compute()
        dist.all_reduce(accum_loss.div_(len(dataloader)))
122
        if coordinator.is_master() and results is not None:
123
            results['loss'] = accum_loss.item() / coordinator.world_size
124

125
126
127
128
129
130
131
132
133
134
135
136
137
        return results

    if isinstance(test_dataloader, DataLoader):
        return evaluate_subset(test_dataloader)
    else:
        assert len(test_dataloader) == len(eval_splits)
        final_results = {}
        for split, sub_loader in zip(eval_splits, test_dataloader):
            results = evaluate_subset(sub_loader)
            final_results.update({f'{k}_{split}': v for k, v in results.items()})
        return final_results


138
139
140
def train_epoch(epoch: int, model: nn.Module, optimizer: Optimizer, _criterion: Callable, lr_scheduler: LRScheduler,
                train_dataloader: DataLoader, booster: Booster, coordinator: DistCoordinator):

141
    model.train()
142
143
144
145
146
147
    is_pp_last_stage = hasattr(
        booster.plugin,
        "stage_manager") and booster.plugin.stage_manager is not None and booster.plugin.stage_manager.is_last_stage()
    with tqdm(train_dataloader,
              desc=f'Epoch [{epoch + 1}/{NUM_EPOCHS}]',
              disable=not (coordinator.is_master() or is_pp_last_stage)) as pbar:
148
149
150
        for batch in pbar:
            # Forward pass
            batch = move_to_cuda(batch)
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
            if hasattr(booster.plugin, "stage_manager") and booster.plugin.stage_manager is not None:
                #TODO pass train_dataloader to execute_pipeline directly
                batch = iter([batch])
                outputs = booster.execute_pipeline(batch,
                                                   model,
                                                   _criterion,
                                                   optimizer,
                                                   return_loss=True,
                                                   return_outputs=True)
                # Backward and optimize
                if booster.plugin.stage_manager.is_last_stage():
                    loss = outputs['loss']
                    pbar.set_postfix({'loss': loss.item()})
            else:
                outputs = model(**batch)
                loss = _criterion(outputs, None)
                # Backward
                booster.backward(loss, optimizer)
                pbar.set_postfix({'loss': loss.item()})
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

            optimizer.step()
            optimizer.zero_grad()
            lr_scheduler.step()


def main():
    # ==============================
    # Parse Arguments
    # ==============================
    parser = argparse.ArgumentParser()
    parser.add_argument('-t', '--task', default='mrpc', help="GLUE task to run")
    parser.add_argument('-p',
                        '--plugin',
                        type=str,
                        default='torch_ddp',
186
                        choices=['torch_ddp', 'torch_ddp_fp16', 'gemini', 'low_level_zero', 'hybrid_parallel'],
187
188
189
190
191
192
193
194
                        help="plugin to use")
    parser.add_argument(
        "--model_type",
        type=str,
        default="bert",
        help="bert or albert",
    )
    parser.add_argument('--target_f1', type=float, default=None, help="target f1 score. Raise exception if not reached")
195
    parser.add_argument('--use_lazy_init', type=bool, default=False, help="for initiating lazy init context")
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
    args = parser.parse_args()

    if args.model_type == 'bert':
        model_name = "bert-base-uncased"
    elif args.model_type == 'albert':
        model_name = "albert-xxlarge-v2"
    else:
        raise RuntimeError
    # ==============================
    # Launch Distributed Environment
    # ==============================
    colossalai.launch_from_torch(config={}, seed=42)
    coordinator = DistCoordinator()

    # local_batch_size = BATCH_SIZE // coordinator.world_size
    lr = LEARNING_RATE * coordinator.world_size

    # ==============================
    # Instantiate Plugin and Booster
    # ==============================
    booster_kwargs = {}
    if args.plugin == 'torch_ddp_fp16':
        booster_kwargs['mixed_precision'] = 'fp16'
    if args.plugin.startswith('torch_ddp'):
        plugin = TorchDDPPlugin()
    elif args.plugin == 'gemini':
222
        plugin = GeminiPlugin(initial_scale=2**5)
223
224
    elif args.plugin == 'low_level_zero':
        plugin = LowLevelZeroPlugin(initial_scale=2**5)
225
226
227
228
229
230
231
232
233
234
235
    elif args.plugin == 'hybrid_parallel':

        # modify the param accordingly for finetuning test cases
        plugin = HybridParallelPlugin(tp_size=1,
                                      pp_size=2,
                                      num_microbatches=None,
                                      microbatch_size=1,
                                      enable_all_optimization=True,
                                      zero_stage=1,
                                      precision='fp16',
                                      initial_scale=1)
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255

    booster = Booster(plugin=plugin, **booster_kwargs)

    # ==============================
    # Prepare Dataloader
    # ==============================
    data_builder = GLUEDataBuilder(model_name,
                                   plugin,
                                   args.task,
                                   train_batch_size=BATCH_SIZE,
                                   eval_batch_size=BATCH_SIZE)
    train_dataloader = data_builder.train_dataloader()
    test_dataloader = data_builder.test_dataloader()

    # ====================================
    # Prepare model, optimizer
    # ====================================
    # bert pretrained model

    cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)
256

257
    if model_name == "bert-base-uncased":
258
        model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda()
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
    elif model_name == "albert-xxlarge-v2":
        model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)
    else:
        raise RuntimeError

    # optimizer
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
            "weight_decay": WEIGHT_DECAY,
        },
        {
            "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
            "weight_decay": 0.0,
        },
    ]

    optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)

    # lr scheduler
    total_steps = len(train_dataloader) * NUM_EPOCHS
    num_warmup_steps = int(WARMUP_FRACTION * total_steps)
    lr_scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=num_warmup_steps,
        num_training_steps=total_steps,
    )

288
289
290
291
292
    def _criterion(outputs, inputs):
        outputs = output_transform_fn(outputs)
        loss = criterion(outputs)
        return loss

293
294
295
    # ==============================
    # Boost with ColossalAI
    # ==============================
296
297
298
299
    model, optimizer, _criterion, _, lr_scheduler = booster.boost(model,
                                                                  optimizer,
                                                                  criterion=_criterion,
                                                                  lr_scheduler=lr_scheduler)
300
301
302
303
304

    # ==============================
    # Train model
    # ==============================
    for epoch in range(NUM_EPOCHS):
305
        train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)
306

307
308
    results = evaluate_model(model, optimizer, _criterion, test_dataloader, data_builder.num_labels, args.task,
                             data_builder.eval_splits, booster, coordinator)
309
310
311
312
313
314
315
316
317

    if coordinator.is_master():
        print(results)
        if args.target_f1 is not None and 'f1' in results:
            assert results['f1'] >= args.target_f1, f'f1 score {results["f1"]} is lower than target {args.target_f1}'


if __name__ == '__main__':
    main()