test_baichuan.py 17.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import os
import time
from pathlib import Path

current_dir = Path(__file__).parent.absolute()

import torch
import pytest

from einops import rearrange

from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM

from flash_attn.models.gpt import (
    GPTLMHeadModel,
    combine_state_dicts_tp,
    shard_state_dict_tp,
)
from flash_attn.models.baichuan import (
    remap_state_dict_hf_baichuan,
    baichuan_config_to_gpt2_config,
)
from flash_attn.models.baichuan import (
    config_from_checkpoint,
    state_dicts_from_checkpoint,
)
from flash_attn.utils.distributed import all_gather_raw
from flash_attn.utils.pretrained import state_dict_from_pretrained
from flash_attn.utils.generation import update_graph_cache


Tri Dao's avatar
Tri Dao committed
32
@pytest.mark.parametrize("model_name", ["baichuan-inc/Baichuan-7B"])
33
34
def test_baichuan_state_dict(model_name):
    config = baichuan_config_to_gpt2_config(
Tri Dao's avatar
Tri Dao committed
35
        AutoConfig.from_pretrained(model_name, trust_remote_code=True)
36
    )
Tri Dao's avatar
Tri Dao committed
37
38
39
40
    pretrained_state_dict = remap_state_dict_hf_baichuan(
        state_dict_from_pretrained(model_name), config
    )
    model = GPTLMHeadModel(config, device="meta")  # Without device='meta' init is very slow
41
42
43
44
45
46
47
    state_dict = model.state_dict()
    assert len(state_dict.keys()) == len(pretrained_state_dict.keys())
    assert state_dict.keys() == pretrained_state_dict.keys()
    for k in state_dict.keys():
        assert state_dict[k].shape == pretrained_state_dict[k].shape


Tri Dao's avatar
Tri Dao committed
48
@pytest.mark.parametrize("model_name", ["baichuan-inc/Baichuan-7B"])
49
50
51
52
53
54
55
56
def test_baichuan_optimized(model_name):
    """Check that our implementation of Baichuan (with all optimizations enabled) matches the
    HF implementation: the output of our forward pass in fp16 should be around the same as the HF
    forward pass in fp16, when compared to the HF forward pass in fp32.
    """
    dtype = torch.float16
    device = "cuda"
    config = baichuan_config_to_gpt2_config(
Tri Dao's avatar
Tri Dao committed
57
        AutoConfig.from_pretrained(model_name, trust_remote_code=True)
58
59
60
61
62
63
64
    )
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

Tri Dao's avatar
Tri Dao committed
65
66
67
    pretrained_state_dict = remap_state_dict_hf_baichuan(
        state_dict_from_pretrained(model_name), config
    )
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
    model = GPTLMHeadModel(config, device=device, dtype=dtype)
    model.load_state_dict(pretrained_state_dict)
    model.eval()

    torch.manual_seed(0)
    batch_size = 2
    max_seqlen = 256
    seqlens = torch.randint(
        max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device
    )
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
    )
    with torch.no_grad():
        out = model.transformer(input_ids)
        logits = model(input_ids).logits
    del model

    # Without device_map, the model is loaded on the CPU, which is very slow
    # Need auto here since the 13B fp32 model doesn't fit in memory on a A100 40GB
    model_ref = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
89
        model_name, device_map="auto", trust_remote_code=True
90
91
92
93
94
95
96
97
    )
    model_ref.eval()
    with torch.no_grad():
        out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device)
        logits_ref = model_ref(input_ids).logits.to(device=device)
    del model_ref

    model_hf = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
98
        model_name, torch_dtype=dtype, device_map={"": device}, trust_remote_code=True,
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
    )
    model_hf.eval()
    with torch.no_grad():
        out_hf = model_hf.model(input_ids).last_hidden_state
        logits_hf = model_hf(input_ids).logits
    del model_hf

    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
    print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
    assert (out - out_ref).abs().max().item() < 3 * (
        out_hf - out_ref
    ).abs().max().item()

    print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
    print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
    print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
    print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
    assert (logits - logits_ref).abs().max().item() < 3 * (
        logits_hf - logits_ref
    ).abs().max().item()


Tri Dao's avatar
Tri Dao committed
123
# torchrun --no_python --nproc_per_node=2 pytest -q -s tests/models/test_baichuan.py -k "test_baichuan_parallel_forward"
124
@pytest.mark.parametrize("world_size", [2])
Tri Dao's avatar
Tri Dao committed
125
126
@pytest.mark.parametrize("model_name", ["baichuan-inc/Baichuan-7B"])
def test_baichuan_parallel_forward(model_name, world_size):
127
128
129
130
131
132
133
134
    """Check that our implementation of Baichuan (with all optimizations enabled) matches the
    HF implementation: the output of our forward pass in fp16 should be around the same as the HF
    forward pass in fp16, when compared to the HF forward pass in fp32.
    """
    from apex.transformer import parallel_state

    dtype = torch.float16
    config = baichuan_config_to_gpt2_config(
Tri Dao's avatar
Tri Dao committed
135
        AutoConfig.from_pretrained(model_name, trust_remote_code=True)
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    )
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

    if not torch.distributed.is_initialized():
        torch.distributed.init_process_group(backend="nccl", init_method="env://")
    device = f"cuda:{torch.distributed.get_rank()}"
    assert world_size <= torch.distributed.get_world_size()
    parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
    rank = parallel_state.get_tensor_model_parallel_rank()
    process_group = parallel_state.get_tensor_model_parallel_group()

Tri Dao's avatar
Tri Dao committed
151
152
153
    pretrained_state_dict = remap_state_dict_hf_baichuan(
        state_dict_from_pretrained(model_name), config
    )
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180

    model = GPTLMHeadModel(
        config, process_group=process_group, device=device, dtype=dtype
    )
    model.load_state_dict(
        shard_state_dict_tp(pretrained_state_dict, config, world_size, rank)
    )
    model.eval()

    torch.manual_seed(0)
    batch_size = 2
    max_seqlen = 256
    seqlens = torch.randint(
        max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device
    )
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
    )
    with torch.no_grad():
        out = model.transformer(input_ids)
        out, _ = all_gather_raw(out, process_group=process_group)
        out = rearrange(out, "(b s) d -> b s d", b=batch_size)
        logits = model(input_ids).logits
        logits = rearrange(logits, "(b s) d -> b s d", b=batch_size)
        logits, _ = all_gather_raw(logits, process_group)
        logits = rearrange(logits, "(n b) ... d -> b ... (n d)", b=batch_size)
    del model
Tri Dao's avatar
Tri Dao committed
181
    parallel_state.destroy_model_parallel()
182
183
184
185

    if rank == 0:
        # Without device_map, the model is loaded on the CPU, which is very slow
        model_ref = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
186
            model_name, device_map="auto", trust_remote_code=True
187
188
189
190
191
192
193
194
        )
        model_ref.eval()
        with torch.no_grad():
            out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device)
            logits_ref = model_ref(input_ids).logits.to(device=device)
        del model_ref

        model_hf = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
195
            model_name, torch_dtype=dtype, device_map="auto", trust_remote_code=True
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
        )
        model_hf.eval()
        with torch.no_grad():
            out_hf = model_hf.model(input_ids).last_hidden_state.to(device=device)
            logits_hf = model_hf(input_ids).logits.to(device=device)
        del model_hf

        print(f"Output max diff: {(out - out_ref).abs().max().item()}")
        print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
        print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
        print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
        assert (out - out_ref).abs().max().item() < 2 * (
            out_hf - out_ref
        ).abs().max().item()

        print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
        print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
        print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
        print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
        assert (logits - logits_ref).abs().max().item() < 2 * (
            logits_hf - logits_ref
        ).abs().max().item()


Tri Dao's avatar
Tri Dao committed
220
@pytest.mark.parametrize("model_name", ["baichuan-inc/Baichuan-7B"])
221
def test_baichuan_generation(model_name):
222
223
224
    dtype = torch.float16
    device = "cuda"
    config = baichuan_config_to_gpt2_config(
Tri Dao's avatar
Tri Dao committed
225
        AutoConfig.from_pretrained(model_name, trust_remote_code=True)
226
227
228
229
230
231
232
    )
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

Tri Dao's avatar
Tri Dao committed
233
    tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
234
235
236
237
    eos_token_id = tokenizer.eos_token_id

    torch.manual_seed(0)
    batch_size = 1
238
239
    seqlen = 2048
    max_length = 2048 + 150
240
241
242
243
244
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device
    )

    model_hf = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
245
        model_name, torch_dtype=dtype, device_map={"": device}, trust_remote_code=True
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
    )
    model_hf.eval()
    print("HF fp16")
    torch.cuda.synchronize()
    start = time.time()
    out_hf = model_hf.generate(
        input_ids=input_ids,
        max_length=max_length,
        return_dict_in_generate=True,
        output_scores=True,
    )
    torch.cuda.synchronize()
    print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
    del model_hf

    # Need auto here since the 13B fp32 model doesn't fit in memory on a A100 40GB
    model_ref = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
263
        model_name, device_map="auto", trust_remote_code=True
264
265
266
267
268
269
270
271
    )
    model_ref.eval()
    with torch.no_grad():
        logits_ref = (
            model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1].to(device=device)
        )
    del model_ref

Tri Dao's avatar
Tri Dao committed
272
273
274
    pretrained_state_dict = remap_state_dict_hf_baichuan(
        state_dict_from_pretrained(model_name), config
    )
275
276
277
278
    model = GPTLMHeadModel(config, device=device, dtype=dtype)
    model.load_state_dict(pretrained_state_dict)
    model.eval()

279
    model(input_ids)  # Warm up
280
281
282
283
284
285
286
287
288
    print("Without CUDA graph")
    torch.cuda.synchronize()
    start = time.time()
    out = model.generate(
        input_ids=input_ids,
        max_length=max_length,
        eos_token_id=eos_token_id,
        return_dict_in_generate=True,
        output_scores=True,
Tri Dao's avatar
Tri Dao committed
289
        enable_timing=True,
290
291
292
293
294
295
296
        teacher_outputs=out_hf.sequences,
    )
    torch.cuda.synchronize()
    print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")

    # Capture graph outside the timing loop
    batch_size, seqlen_og = input_ids.shape
297
    model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length)
298
299
300
301
302
303
304
305
306
    print("With CUDA graph")
    torch.cuda.synchronize()
    start = time.time()
    out_cg = model.generate(
        input_ids=input_ids,
        max_length=max_length,
        cg=True,
        return_dict_in_generate=True,
        output_scores=True,
Tri Dao's avatar
Tri Dao committed
307
        enable_timing=True,
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
        teacher_outputs=out_hf.sequences,
    )
    torch.cuda.synchronize()
    print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")

    with torch.no_grad():
        logits_parallel = model(out_hf.sequences).logits[:, (seqlen - 1) : -1]
    logits_hf = torch.stack(out_hf.scores, dim=1)
    logits = torch.stack(out.scores, dim=1)
    logits_cg = torch.stack(out_cg.scores, dim=1)

    del model

    hf_error = (logits_hf - logits_ref).abs().max().item()

    print(f"HF fp16 logits max diff: {hf_error}")
    print(f"Logits max diff: {(logits - logits_ref).abs().max().item() }")
    print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item() }")

    assert (logits_parallel - logits_ref).abs().max().item() < 2 * hf_error
    assert (logits - logits_ref).abs().max().item() < 2 * hf_error
    assert torch.equal(logits_cg, logits)


# torchrun --no_python --nproc_per_node=2 pytest -q -s tests/models/test_baichuan.py -k "baichuan_parallel_generation"
@pytest.mark.parametrize("world_size", [2])
Tri Dao's avatar
Tri Dao committed
334
@pytest.mark.parametrize("model_name", ["baichuan-inc/Baichuan-7B"])
335
336
337
338
339
340
341
342
343
def test_baichuan_parallel_generation(model_name, world_size):
    """Check that our implementation matches the HF implementation:
    the scores in fp16 should be around the same as the HF scores in fp16, when compared to
    the HF scores in fp32.
    """
    from apex.transformer import parallel_state

    dtype = torch.float16
    config = baichuan_config_to_gpt2_config(
Tri Dao's avatar
Tri Dao committed
344
        AutoConfig.from_pretrained(model_name, trust_remote_code=True)
345
    )
346
    config.use_flash_attn = True
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = False
    config.residual_in_fp32 = True
    config.pad_vocab_size_multiple = 8 * world_size
    config.sequence_parallel = False  # Need to set this to False for generation

    os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
    if not torch.distributed.is_initialized():
        torch.distributed.init_process_group(backend="nccl", init_method="env://")
    device = f"cuda:{torch.distributed.get_rank()}"
    assert world_size <= torch.distributed.get_world_size()
    parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
    rank = parallel_state.get_tensor_model_parallel_rank()
    process_group = parallel_state.get_tensor_model_parallel_group()

    torch.manual_seed(0)
    batch_size = 1
    seqlen = 100
    max_length = 150
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device
    )

    # Need this, otherwise when we capture the graph the process for GPU 1 would run on both
    # GPU0 and GPU1 and things would hang
    torch.cuda.set_device(device)

Tri Dao's avatar
Tri Dao committed
375
376
377
    pretrained_state_dict = remap_state_dict_hf_baichuan(
        state_dict_from_pretrained(model_name), config
    )
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395

    model = GPTLMHeadModel(
        config, process_group=process_group, device=device, dtype=dtype
    )
    model.load_state_dict(
        shard_state_dict_tp(pretrained_state_dict, config, world_size, rank)
    )
    model.eval()

    print("Without CUDA graph")
    out = model.generate(
        input_ids=input_ids,
        max_length=max_length,
        tensor_parallel=world_size,
        vocab_size=config.vocab_size,
        # teacher_outputs=out_hf.sequences,
        return_dict_in_generate=True,
        output_scores=True,
Tri Dao's avatar
Tri Dao committed
396
        enable_timing=True,
397
398
399
400
    )

    # Capture graph outside the timing loop
    batch_size, seqlen_og = input_ids.shape
401
    model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length)
402
403
404
405
406
407
408
409
410
411
    print("With CUDA graph")
    out_cg = model.generate(
        input_ids=input_ids,
        max_length=max_length,
        tensor_parallel=world_size,
        vocab_size=config.vocab_size,
        cg=True,
        # teacher_outputs=out_hf.sequences,
        return_dict_in_generate=True,
        output_scores=True,
Tri Dao's avatar
Tri Dao committed
412
        enable_timing=True,
413
414
415
416
417
418
419
    )
    del model
    parallel_state.destroy_model_parallel()

    if rank == 0:
        # Without device_map, the model is loaded on the CPU, which is very slow
        model_hf = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
420
            model_name, torch_dtype=dtype, device_map="auto", trust_remote_code=True
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
        )
        model_hf.eval()
        print("HF fp16")
        torch.cuda.synchronize()
        start = time.time()
        with torch.inference_mode():
            out_hf = model_hf.generate(
                input_ids=input_ids,
                max_length=max_length,
                return_dict_in_generate=True,
                output_scores=True,
            )
        torch.cuda.synchronize()
        print(
            f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms"
        )
        del model_hf

        model_ref = AutoModelForCausalLM.from_pretrained(
Tri Dao's avatar
Tri Dao committed
440
            model_name, device_map="auto", trust_remote_code=True
441
442
443
444
445
446
447
448
449
450
451
452
453
454
        )
        model_ref.eval()
        with torch.inference_mode():
            logits_ref = model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1]
        del model_ref
        logits_hf = torch.stack(out_hf.scores, dim=1)

        logits = torch.stack(out.scores, dim=1)
        logits_cg = torch.stack(out_cg.scores, dim=1)

        hf_error = (logits_hf - logits_ref).abs().max().item()
        print(f"HF fp16 logits max diff: {hf_error}")
        print(f"Logits max diff: {(logits - logits_ref).abs().max().item() }")
        print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item() }")
455
        assert (logits - logits_ref).abs().max().item() < 2 * hf_error
456
        assert torch.equal(logits_cg, logits)