test_llama.py 24.8 KB
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
# Copyright (c) 2023, Tri Dao.

# To run the huggingface implementation, we first need to convert the weights:
# https://github.com/huggingface/transformers/pull/21955
5
# python -m transformers.models.llama.convert_llama_weights_to_hf --input_dir $CHECKPOINT_DIR/llama --model_size 7B --output_dir $CHECKPOINT_DIR/llama/7B-hf
Tri Dao's avatar
Tri Dao committed
6
7
8
9
10
# and repeat for 13B, 30B, 65B

import os
import time
from pathlib import Path
11

Tri Dao's avatar
Tri Dao committed
12
13
current_dir = Path(__file__).parent.absolute()

14
import shutil
Tri Dao's avatar
Tri Dao committed
15

Tri Dao's avatar
Tri Dao committed
16
17
import pytest
import torch
Tri Dao's avatar
Tri Dao committed
18
19
from einops import rearrange
from flash_attn.models.gpt import GPTLMHeadModel, combine_state_dicts_tp, shard_state_dict_tp
20
from flash_attn.models.llama import (
Tri Dao's avatar
Tri Dao committed
21
22
    config_from_checkpoint,
    inv_remap_state_dict_hf_llama,
23
24
    llama_config_to_gpt2_config,
    remap_state_dict_hf_llama,
Tri Dao's avatar
Tri Dao committed
25
26
    remap_state_dict_meta_llama,
    state_dicts_from_checkpoint,
27
)
Tri Dao's avatar
Tri Dao committed
28
from flash_attn.utils.distributed import all_gather_raw
Tri Dao's avatar
Tri Dao committed
29
from flash_attn.utils.generation import update_graph_cache
Tri Dao's avatar
Tri Dao committed
30
31
32
from flash_attn.utils.pretrained import state_dict_from_pretrained
from transformers import LlamaConfig, LlamaTokenizer
from transformers.models.llama.modeling_llama import LlamaForCausalLM
Tri Dao's avatar
Tri Dao committed
33
34


35
36
37
38
39
40
def _pretrained_state_dict_from_checkpoint(checkpoint_path, model_name, config, checkpoint_format):
    if checkpoint_format == "meta":
        ckpt_state_dicts = state_dicts_from_checkpoint(checkpoint_path, model_name)
        pretrained_state_dicts = [remap_state_dict_meta_llama(s, config) for s in ckpt_state_dicts]
        pretrained_state_dict = combine_state_dicts_tp(pretrained_state_dicts, config)
    else:
41
42
43
        pretrained_state_dict = state_dict_from_pretrained(
            Path(checkpoint_path) / f"{model_name}-hf"
        )
44
45
46
47
        pretrained_state_dict = remap_state_dict_hf_llama(pretrained_state_dict, config)
    return pretrained_state_dict


48
@pytest.mark.parametrize("model_name", ["7B"])
Tri Dao's avatar
Tri Dao committed
49
def test_llama_state_dict(model_name):
50
51
52
    checkpoint_path = (
        Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama"
    )
Tri Dao's avatar
Tri Dao committed
53
54
55
    config = llama_config_to_gpt2_config(config_from_checkpoint(checkpoint_path, model_name))
    ckpt_state_dicts = state_dicts_from_checkpoint(checkpoint_path, model_name)
    pretrained_state_dict = remap_state_dict_meta_llama(ckpt_state_dicts[0], config)
56
    model = GPTLMHeadModel(config, device="meta")  # Without device='meta' init is very slow
Tri Dao's avatar
Tri Dao committed
57
    state_dict = model.state_dict()
58
59
    assert state_dict.keys() == pretrained_state_dict.keys()
    for k in state_dict.keys():
Tri Dao's avatar
Tri Dao committed
60
61
62
        assert state_dict[k].shape == pretrained_state_dict[k].shape


63
64
@pytest.mark.parametrize("model_name", ["7B", "13B"])
@pytest.mark.parametrize("checkpoint_format", ["meta", "hf"])
65
def test_llama_optimized(model_name, checkpoint_format):
Tri Dao's avatar
Tri Dao committed
66
67
68
69
    """Check that our implementation of LLaMa (with all optimizations enabled) matches the
    HF implementation: the output of our forward pass in fp16 should be around the same as the HF
    forward pass in fp16, when compared to the HF forward pass in fp32.
    """
70
71
72
    checkpoint_path = (
        Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama"
    )
Tri Dao's avatar
Tri Dao committed
73
74

    dtype = torch.float16
75
    device = "cuda"
76
77
    config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format)
    config = llama_config_to_gpt2_config(config)
Tri Dao's avatar
Tri Dao committed
78
79
80
81
82
83
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

84
85
86
    pretrained_state_dict = _pretrained_state_dict_from_checkpoint(
        checkpoint_path, model_name, config, checkpoint_format
    )
Tri Dao's avatar
Tri Dao committed
87
    model = GPTLMHeadModel(config, device=device, dtype=dtype)
Tri Dao's avatar
Tri Dao committed
88
    model.load_state_dict(pretrained_state_dict)
Tri Dao's avatar
Tri Dao committed
89
90
91
92
93
94
    model.eval()

    torch.manual_seed(0)
    batch_size = 2
    max_seqlen = 256
    seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device)
95
96
97
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
    )
Tri Dao's avatar
Tri Dao committed
98
99
100
101
102
103
104
    with torch.no_grad():
        out = model.transformer(input_ids)
        logits = model(input_ids).logits
    del model

    # Without device_map, the model is loaded on the CPU, which is very slow
    # Need auto here since the 13B fp32 model doesn't fit in memory on a A100 40GB
105
106
107
    model_ref = LlamaForCausalLM.from_pretrained(
        Path(checkpoint_path) / f"{model_name}-hf", device_map="auto"
    )
Tri Dao's avatar
Tri Dao committed
108
109
110
111
112
113
    model_ref.eval()
    with torch.no_grad():
        out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device)
        logits_ref = model_ref(input_ids).logits.to(device=device)
    del model_ref

114
115
116
    model_hf = LlamaForCausalLM.from_pretrained(
        Path(checkpoint_path) / f"{model_name}-hf", torch_dtype=dtype, device_map={"": device}
    )
Tri Dao's avatar
Tri Dao committed
117
    model_hf.eval()
Tri Dao's avatar
Tri Dao committed
118
119
120
    with torch.no_grad():
        out_hf = model_hf.model(input_ids).last_hidden_state
        logits_hf = model_hf(input_ids).logits
Tri Dao's avatar
Tri Dao committed
121
122
    del model_hf

123
124
125
126
    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
    print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
127
128
    assert (out - out_ref).abs().max().item() < 3 * (out_hf - out_ref).abs().max().item()

129
130
131
132
133
134
135
    print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
    print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
    print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
    print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
    assert (logits - logits_ref).abs().max().item() < 3 * (
        logits_hf - logits_ref
    ).abs().max().item()
Tri Dao's avatar
Tri Dao committed
136
137
138


# torchrun --no_python --nproc_per_node=2 pytest -q -s tests/models/test_llama.py -k "parallel"
139
140
141
@pytest.mark.parametrize("world_size", [2])
@pytest.mark.parametrize("model_name", ["13B"])
@pytest.mark.parametrize("checkpoint_format", ["meta", "hf"])
142
def test_llama_parallel(model_name, world_size, checkpoint_format):
Tri Dao's avatar
Tri Dao committed
143
144
145
146
147
148
    """Check that our implementation of LLaMa (with all optimizations enabled) matches the
    HF implementation: the output of our forward pass in fp16 should be around the same as the HF
    forward pass in fp16, when compared to the HF forward pass in fp32.
    """
    from apex.transformer import parallel_state

149
150
151
    checkpoint_path = (
        Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama"
    )
Tri Dao's avatar
Tri Dao committed
152
153

    dtype = torch.float16
154
155
    config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format)
    config = llama_config_to_gpt2_config(config)
Tri Dao's avatar
Tri Dao committed
156
157
158
159
160
161
162
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

    if not torch.distributed.is_initialized():
163
164
        torch.distributed.init_process_group(backend="nccl", init_method="env://")
    device = f"cuda:{torch.distributed.get_rank()}"
Tri Dao's avatar
Tri Dao committed
165
166
167
168
169
    assert world_size <= torch.distributed.get_world_size()
    parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
    rank = parallel_state.get_tensor_model_parallel_rank()
    process_group = parallel_state.get_tensor_model_parallel_group()

170
171
172
    pretrained_state_dict = _pretrained_state_dict_from_checkpoint(
        checkpoint_path, model_name, config, checkpoint_format
    )
Tri Dao's avatar
Tri Dao committed
173
    model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype)
Tri Dao's avatar
Tri Dao committed
174
    model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank))
Tri Dao's avatar
Tri Dao committed
175
176
177
178
179
180
    model.eval()

    torch.manual_seed(0)
    batch_size = 2
    max_seqlen = 256
    seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device)
181
182
183
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
    )
Tri Dao's avatar
Tri Dao committed
184
185
    with torch.no_grad():
        out = model.transformer(input_ids)
Tri Dao's avatar
Tri Dao committed
186
187
        out, _ = all_gather_raw(out, process_group=process_group)
        out = rearrange(out, "(b s) d -> b s d", b=batch_size)
Tri Dao's avatar
Tri Dao committed
188
        logits = model(input_ids).logits
Tri Dao's avatar
Tri Dao committed
189
190
        logits = rearrange(logits, "(b s) d -> b s d", b=batch_size)
        logits, _ = all_gather_raw(logits, process_group)
191
        logits = rearrange(logits, "(n b) ... d -> b ... (n d)", b=batch_size)
Tri Dao's avatar
Tri Dao committed
192
193
    del model

Tri Dao's avatar
Tri Dao committed
194
195
196
    if rank == 0:
        # Without device_map, the model is loaded on the CPU, which is very slow
        model_ref = LlamaForCausalLM.from_pretrained(
197
            Path(checkpoint_path) / f"{model_name}-hf", device_map="auto"
Tri Dao's avatar
Tri Dao committed
198
199
200
201
202
203
204
205
        )
        model_ref.eval()
        with torch.no_grad():
            out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device)
            logits_ref = model_ref(input_ids).logits.to(device=device)
        del model_ref

        model_hf = LlamaForCausalLM.from_pretrained(
206
            Path(checkpoint_path) / f"{model_name}-hf", torch_dtype=dtype, device_map="auto"
Tri Dao's avatar
Tri Dao committed
207
208
209
210
211
212
213
        )
        model_hf.eval()
        with torch.no_grad():
            out_hf = model_hf.model(input_ids).last_hidden_state.to(device=device)
            logits_hf = model_hf(input_ids).logits.to(device=device)
        del model_hf

214
215
216
217
        print(f"Output max diff: {(out - out_ref).abs().max().item()}")
        print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
        print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
        print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
218
219
        assert (out - out_ref).abs().max().item() < 2 * (out_hf - out_ref).abs().max().item()

220
221
222
223
224
225
226
        print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
        print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
        print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
        print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
        assert (logits - logits_ref).abs().max().item() < 2 * (
            logits_hf - logits_ref
        ).abs().max().item()
Tri Dao's avatar
Tri Dao committed
227
228
229


# @pytest.mark.parametrize('model_name', ["7B", "13B"])
230
231
@pytest.mark.parametrize("model_name", ["7B"])
@pytest.mark.parametrize("checkpoint_format", ["meta", "hf"])
232
def test_llama_generation(model_name, checkpoint_format):
233
234
235
    checkpoint_path = (
        Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama"
    )
Tri Dao's avatar
Tri Dao committed
236
237

    dtype = torch.float16
238
    device = "cuda"
239
240
    config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format)
    config = llama_config_to_gpt2_config(config)
Tri Dao's avatar
Tri Dao committed
241
242
243
244
245
246
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

247
    tokenizer = LlamaTokenizer.from_pretrained(Path(checkpoint_path) / f"{model_name}-hf")
Tri Dao's avatar
Tri Dao committed
248
249
250
251
252
253
    eos_token_id = tokenizer.eos_token_id

    torch.manual_seed(0)
    batch_size = 1
    seqlen = 100
    max_length = 150
254
255
256
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device
    )
Tri Dao's avatar
Tri Dao committed
257

258
259
260
    model_hf = LlamaForCausalLM.from_pretrained(
        Path(checkpoint_path) / f"{model_name}-hf", torch_dtype=dtype, device_map={"": device}
    )
Tri Dao's avatar
Tri Dao committed
261
262
263
264
    model_hf.eval()
    print("HF fp16")
    torch.cuda.synchronize()
    start = time.time()
265
266
267
    out_hf = model_hf.generate(
        input_ids=input_ids, max_length=max_length, return_dict_in_generate=True, output_scores=True
    )
Tri Dao's avatar
Tri Dao committed
268
    torch.cuda.synchronize()
269
    print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
Tri Dao's avatar
Tri Dao committed
270
271
    del model_hf

Tri Dao's avatar
Tri Dao committed
272
    # Need auto here since the 13B fp32 model doesn't fit in memory on a A100 40GB
273
274
275
    model_ref = LlamaForCausalLM.from_pretrained(
        Path(checkpoint_path) / f"{model_name}-hf", device_map="auto"
    )
Tri Dao's avatar
Tri Dao committed
276
277
    model_ref.eval()
    with torch.no_grad():
278
        logits_ref = model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1].to(device=device)
Tri Dao's avatar
Tri Dao committed
279
280
    del model_ref

281
282
283
    pretrained_state_dict = _pretrained_state_dict_from_checkpoint(
        checkpoint_path, model_name, config, checkpoint_format
    )
Tri Dao's avatar
Tri Dao committed
284
    model = GPTLMHeadModel(config, device=device, dtype=dtype)
Tri Dao's avatar
Tri Dao committed
285
    model.load_state_dict(pretrained_state_dict)
Tri Dao's avatar
Tri Dao committed
286
287
    model.eval()

288
    print("Without CUDA graph")
Tri Dao's avatar
Tri Dao committed
289
290
    torch.cuda.synchronize()
    start = time.time()
291
292
293
294
295
296
297
298
299
300
    out = model.generate(
        input_ids=input_ids,
        max_length=max_length,
        eos_token_id=eos_token_id,
        fused_ft_kernel=True,
        return_dict_in_generate=True,
        output_scores=True,
        timing=True,
        teacher_outputs=out_hf.sequences,
    )
Tri Dao's avatar
Tri Dao committed
301
    torch.cuda.synchronize()
302
    print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
Tri Dao's avatar
Tri Dao committed
303
304
305
306

    # Capture graph outside the timing loop
    batch_size, seqlen_og = input_ids.shape
    model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length)
307
    print("With CUDA graph")
Tri Dao's avatar
Tri Dao committed
308
309
    torch.cuda.synchronize()
    start = time.time()
310
311
312
313
314
315
316
317
318
319
    out_cg = model.generate(
        input_ids=input_ids,
        max_length=max_length,
        fused_ft_kernel=True,
        cg=True,
        return_dict_in_generate=True,
        output_scores=True,
        timing=True,
        teacher_outputs=out_hf.sequences,
    )
Tri Dao's avatar
Tri Dao committed
320
    torch.cuda.synchronize()
321
    print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
Tri Dao's avatar
Tri Dao committed
322
323

    with torch.no_grad():
324
        logits_parallel = model(out_hf.sequences).logits[:, (seqlen - 1) : -1]
Tri Dao's avatar
Tri Dao committed
325
326
327
328
329
330
331
332
    logits_hf = torch.stack(out_hf.scores, dim=1)
    logits = torch.stack(out.scores, dim=1)
    logits_cg = torch.stack(out_cg.scores, dim=1)

    del model

    hf_error = (logits_hf - logits_ref).abs().max().item()

333
334
335
    print(f"HF fp16 logits max diff: {hf_error}")
    print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
    print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item()}")
336
337
338

    assert (logits_parallel - logits_ref).abs().max().item() < 2 * hf_error
    assert (logits - logits_ref).abs().max().item() < 2 * hf_error
Tri Dao's avatar
Tri Dao committed
339
    assert torch.equal(logits_cg, logits)
Tri Dao's avatar
Tri Dao committed
340
341
342


# torchrun --no_python --nproc_per_node=2 pytest -q -s tests/models/test_llama.py -k "llama_parallel_generation"
343
344
345
@pytest.mark.parametrize("world_size", [2])
@pytest.mark.parametrize("model_name", ["13B"])
@pytest.mark.parametrize("checkpoint_format", ["meta", "hf"])
346
def test_llama_parallel_generation(model_name, world_size, checkpoint_format):
Tri Dao's avatar
Tri Dao committed
347
348
349
350
351
352
    """Check that our implementation matches the HF implementation:
    the scores in fp16 should be around the same as the HF scores in fp16, when compared to
    the HF scores in fp32.
    """
    from apex.transformer import parallel_state

353
354
355
    checkpoint_path = (
        Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama"
    )
Tri Dao's avatar
Tri Dao committed
356
357

    dtype = torch.float16
358
359
    config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format)
    config = llama_config_to_gpt2_config(config)
Tri Dao's avatar
Tri Dao committed
360
361
362
363
364
365
366
367
368
369
    config.use_flash_attn = False
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = False
    config.residual_in_fp32 = True
    config.pad_vocab_size_multiple = 8 * world_size
    config.sequence_parallel = False  # Need to set this to False for generation

    os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
    if not torch.distributed.is_initialized():
370
371
        torch.distributed.init_process_group(backend="nccl", init_method="env://")
    device = f"cuda:{torch.distributed.get_rank()}"
Tri Dao's avatar
Tri Dao committed
372
373
374
375
376
377
378
379
380
    assert world_size <= torch.distributed.get_world_size()
    parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
    rank = parallel_state.get_tensor_model_parallel_rank()
    process_group = parallel_state.get_tensor_model_parallel_group()

    torch.manual_seed(0)
    batch_size = 1
    seqlen = 100
    max_length = 150
381
382
383
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device
    )
Tri Dao's avatar
Tri Dao committed
384
385
386
387
388

    # Need this, otherwise when we capture the graph the process for GPU 1 would run on both
    # GPU0 and GPU1 and things would hang
    torch.cuda.set_device(device)

389
390
391
    pretrained_state_dict = _pretrained_state_dict_from_checkpoint(
        checkpoint_path, model_name, config, checkpoint_format
    )
Tri Dao's avatar
Tri Dao committed
392
393
394
395
    model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype)
    model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank))
    model.eval()

396
    print("Without CUDA graph")
Tri Dao's avatar
Tri Dao committed
397
    out = model.generate(
398
399
400
401
402
        input_ids=input_ids,
        max_length=max_length,
        tensor_parallel=world_size,
        vocab_size=config.vocab_size,
        fused_ft_kernel=True,
Tri Dao's avatar
Tri Dao committed
403
        # teacher_outputs=out_hf.sequences,
404
405
406
        return_dict_in_generate=True,
        output_scores=True,
        timing=True,
Tri Dao's avatar
Tri Dao committed
407
408
409
410
411
    )

    # Capture graph outside the timing loop
    batch_size, seqlen_og = input_ids.shape
    model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length)
412
    print("With CUDA graph")
Tri Dao's avatar
Tri Dao committed
413
    out_cg = model.generate(
414
415
416
417
418
419
        input_ids=input_ids,
        max_length=max_length,
        tensor_parallel=world_size,
        vocab_size=config.vocab_size,
        fused_ft_kernel=True,
        cg=True,
Tri Dao's avatar
Tri Dao committed
420
        # teacher_outputs=out_hf.sequences,
421
422
423
        return_dict_in_generate=True,
        output_scores=True,
        timing=True,
Tri Dao's avatar
Tri Dao committed
424
425
426
427
428
429
430
    )
    del model
    parallel_state.destroy_model_parallel()

    if rank == 0:
        # Without device_map, the model is loaded on the CPU, which is very slow
        model_hf = LlamaForCausalLM.from_pretrained(
431
            Path(checkpoint_path) / f"{model_name}-hf", torch_dtype=dtype, device_map="auto"
Tri Dao's avatar
Tri Dao committed
432
433
434
435
436
437
438
        )
        model_hf.eval()
        print("HF fp16")
        torch.cuda.synchronize()
        start = time.time()
        with torch.inference_mode():
            out_hf = model_hf.generate(
439
440
441
442
                input_ids=input_ids,
                max_length=max_length,
                return_dict_in_generate=True,
                output_scores=True,
Tri Dao's avatar
Tri Dao committed
443
444
            )
        torch.cuda.synchronize()
445
        print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
Tri Dao's avatar
Tri Dao committed
446
447
448
        del model_hf

        model_ref = LlamaForCausalLM.from_pretrained(
449
            Path(checkpoint_path) / f"{model_name}-hf", device_map="auto"
Tri Dao's avatar
Tri Dao committed
450
451
452
        )
        model_ref.eval()
        with torch.inference_mode():
453
            logits_ref = model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1]
Tri Dao's avatar
Tri Dao committed
454
455
456
457
458
459
460
        del model_ref
        logits_hf = torch.stack(out_hf.scores, dim=1)

        logits = torch.stack(out.scores, dim=1)
        logits_cg = torch.stack(out_cg.scores, dim=1)

        hf_error = (logits_hf - logits_ref).abs().max().item()
461
462
        print(f"HF fp16 logits max diff: {hf_error}")
        print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
Tri Dao's avatar
Tri Dao committed
463
        assert (logits - logits_ref).abs().max().item() < 2 * hf_error
464
        print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item()}")
Tri Dao's avatar
Tri Dao committed
465
        assert torch.equal(logits_cg, logits)
466
467
468


@torch.no_grad()
469
@pytest.mark.parametrize("world_size", [2])
470
471
472
def test_llama_parallel_uneven_num_heads(world_size):
    from apex.transformer import parallel_state

473
474
475
    checkpoint_path = (
        Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama"
    )
476
    num_attention_heads = world_size + 1
477
    model_name = f"teeny-{num_attention_heads}-heads"
478
479

    if not torch.distributed.is_initialized():
480
481
        torch.distributed.init_process_group(backend="nccl", init_method="env://")
    device = f"cuda:{torch.distributed.get_rank()}"
482
483
484
485
486
487
488
    assert world_size <= torch.distributed.get_world_size()
    parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
    rank = parallel_state.get_tensor_model_parallel_rank()
    process_group = parallel_state.get_tensor_model_parallel_group()

    dtype = torch.float16
    llama_config = LlamaConfig(
489
490
        hidden_size=256
        * num_attention_heads,  # ParallelGatedMlp hidden_features must be divisible by 256
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
        intermediate_size=256 * num_attention_heads * 4,
        num_hidden_layers=4,
        num_attention_heads=num_attention_heads,
        initializer_range=0.5,  # Set crazy init range so we don't have near zero weights implying a vacuous test.
    )
    config = llama_config_to_gpt2_config(llama_config)
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

    torch.manual_seed(0)
    batch_size = 2
    max_seqlen = 256
    seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device)
507
508
509
    input_ids = torch.randint(
        0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
    )
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530

    # Create a shared test model.
    if rank == 0:
        LlamaForCausalLM(config=llama_config).save_pretrained(checkpoint_path / f"{model_name}-hf")
    torch.distributed.barrier()

    # Run the standard forward pass test.
    pretrained_state_dict = _pretrained_state_dict_from_checkpoint(
        checkpoint_path, model_name, config, checkpoint_format="hf"
    )
    model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype)
    model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank))
    model.eval()

    # TODO: Avoid duplicate code. Modularize the comparison of two forward pass diffs.
    out = model.transformer(input_ids)
    out, _ = all_gather_raw(out, process_group=process_group)
    out = rearrange(out, "(b s) d -> b s d", b=batch_size)
    logits = model(input_ids).logits
    logits = rearrange(logits, "(b s) d -> b s d", b=batch_size)
    logits, _ = all_gather_raw(logits, process_group)
531
    logits = rearrange(logits, "(n b) ... d -> b ... (n d)", b=batch_size)
532
533
534

    if rank == 0:
        model_ref = LlamaForCausalLM.from_pretrained(
535
            Path(checkpoint_path) / f"{model_name}-hf", device_map="auto"
536
537
538
539
540
541
542
        )
        model_ref.eval()
        out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device)
        logits_ref = model_ref(input_ids).logits.to(device=device)
        del model_ref

        model_hf = LlamaForCausalLM.from_pretrained(
543
            Path(checkpoint_path) / f"{model_name}-hf", torch_dtype=dtype, device_map="auto"
544
545
546
547
548
549
        )
        model_hf.eval()
        out_hf = model_hf.model(input_ids).last_hidden_state.to(device=device)
        logits_hf = model_hf(input_ids).logits.to(device=device)
        del model_hf

550
551
552
553
        print(f"Output max diff: {(out - out_ref).abs().max().item()}")
        print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
        print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
        print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
554
555
        assert (out - out_ref).abs().max().item() < 2 * (out_hf - out_ref).abs().max().item()

556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
        print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
        print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
        print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
        print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
        assert (logits - logits_ref).abs().max().item() < 2 * (
            logits_hf - logits_ref
        ).abs().max().item()

        if os.path.exists(checkpoint_path / f"{model_name}-hf"):
            shutil.rmtree(checkpoint_path / f"{model_name}-hf")


@torch.no_grad()
def test_inv_remap_state_dict_hf_llama():
    checkpoint_path = (
        Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama"
    )
    model_name = f"teeny"

    llama_config = LlamaConfig(
        num_attention_heads=2,
        hidden_size=256 * 2,
        intermediate_size=256 * 2 * 4,
        num_hidden_layers=4,
    )
    config = llama_config_to_gpt2_config(llama_config)
    config.use_flash_attn = True
    config.fused_bias_fc = True
    config.fused_mlp = False  # We don't have fused GatedMLP yet
    config.fused_dropout_add_ln = True
    config.residual_in_fp32 = True

    # Set up.
    LlamaForCausalLM(config=llama_config).save_pretrained(checkpoint_path / f"{model_name}-hf")

    # inv_remap_state_dict_hf_llama should be the inverse of remap_state_dict_hf_llama
    state_dict = state_dict_from_pretrained(checkpoint_path / f"{model_name}-hf")
    state_dict = {key: val for key, val in state_dict.items() if "rotary_emb.inv_freq" not in key}
    pretrained_state_dict = remap_state_dict_hf_llama(state_dict, config)
    state_dict_recover = inv_remap_state_dict_hf_llama(pretrained_state_dict, config)

    assert set(state_dict_recover.keys()) == set(state_dict.keys())

    for key in state_dict_recover.keys():
        torch.testing.assert_close(state_dict_recover[key], state_dict[key])
601

602
603
604
    # Tear down.
    if os.path.exists(checkpoint_path / f"{model_name}-hf"):
        shutil.rmtree(checkpoint_path / f"{model_name}-hf")