test_gpt_generation.py 4.95 KB
Newer Older
1
import os
Tri Dao's avatar
Tri Dao committed
2
3
4
5
6
7
8
9
10
11
12
13
14
import re

import torch
import pytest

from einops import rearrange

from transformers import GPT2Config, GPT2Tokenizer
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel as GPT2LMHeadModelHF

from flash_attn.models.gpt import GPTLMHeadModel
from flash_attn.models.gpt import remap_state_dict_gpt2
from flash_attn.utils.pretrained import state_dict_from_pretrained
15
from flash_attn.utils.distributed import all_gather_raw
Tri Dao's avatar
Tri Dao committed
16
17


18
@pytest.mark.parametrize('fused_ft_kernel', [False, True])
19
# @pytest.mark.parametrize('fused_ft_kernel', [True])
Tri Dao's avatar
Tri Dao committed
20
@pytest.mark.parametrize('optimized', [False, True])
21
# @pytest.mark.parametrize('optimized', [False])
22
@pytest.mark.parametrize('rotary', [False, True])
23
# @pytest.mark.parametrize('rotary', [False])
Tri Dao's avatar
Tri Dao committed
24
@pytest.mark.parametrize('model_name', ["gpt2"])
25
def test_greedy_decode(model_name, rotary, optimized, fused_ft_kernel):
Tri Dao's avatar
Tri Dao committed
26
27
28
29
30
    """Check that our implementation of GPT2 generation matches the HF implementation:
    the scores in fp16 should be around the same as the HF scores in fp16, when compared to
    the HF scores in fp32.
    """
    dtype = torch.float16
31
    device = 'cuda'
Tri Dao's avatar
Tri Dao committed
32
33
    rtol, atol = 3e-3, 3e-1
    config = GPT2Config.from_pretrained(model_name)
34
35
36
    if rotary:
        config.n_positions = 0
        config.rotary_emb_dim = 64
37
    config.residual_in_fp32 = True
Tri Dao's avatar
Tri Dao committed
38
39
40
    if optimized:
        config.use_flash_attn = True
        config.fused_bias_fc = True
41
        config.fused_mlp = True
Tri Dao's avatar
Tri Dao committed
42
43
        config.fused_dropout_add_ln = True

44
45
    # if not rotary, we load the weight from HF but ignore the position embeddings.
    # The model would be nonsense but it doesn't matter for the test.
46
47
    model = GPTLMHeadModel.from_pretrained(model_name, config, strict=not rotary, device=device,
                                           dtype=dtype)
Tri Dao's avatar
Tri Dao committed
48
    model.eval()
49
50

    if not rotary:
51
52
        model_ref = GPT2LMHeadModelHF.from_pretrained(model_name).to(device=device)
        model_hf = GPT2LMHeadModelHF.from_pretrained(model_name).to(device=device, dtype=dtype)
53
54
        model_ref.eval()
        model_hf.eval()
Tri Dao's avatar
Tri Dao committed
55
56
57

    torch.manual_seed(0)
    tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
58
59
    input_ids = tokenizer("Hello, my dog is cute and ",
                          return_tensors="pt").input_ids.to(device=device)
Tri Dao's avatar
Tri Dao committed
60
    max_length = 30
61
    # input_ids = torch.randint(0, 100, (2, 10), dtype=torch.long, device='cuda')
Tri Dao's avatar
Tri Dao committed
62
    # max_length = input_ids.shape[1] + 40
Tri Dao's avatar
Tri Dao committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

    # Slow generation for reference
    sequences = []
    scores = []
    cur_input_ids = input_ids
    with torch.inference_mode():
        scores.append(model(cur_input_ids).logits[:, -1])
        sequences.append(scores[-1].argmax(dim=-1))
        for _ in range(input_ids.shape[1] + 1, max_length):
            cur_input_ids = torch.cat([cur_input_ids, rearrange(sequences[-1], 'b -> b 1')], dim=-1)
            scores.append(model(cur_input_ids).logits[:, -1])
            sequences.append(scores[-1].argmax(dim=-1))
    sequences = torch.cat([input_ids, torch.stack(sequences, dim=1)], dim=1)
    scores = tuple(scores)

    out = model.generate(input_ids=input_ids, max_length=max_length,
79
                         fused_ft_kernel=fused_ft_kernel,
Tri Dao's avatar
Tri Dao committed
80
81
                         return_dict_in_generate=True, output_scores=True, timing=True)
    print(out.sequences)
82
    print(tokenizer.batch_decode(out.sequences.tolist()))
Tri Dao's avatar
Tri Dao committed
83
84
85
86
87
    if fused_ft_kernel:
        out_cg = model.generate(input_ids=input_ids, max_length=max_length,
                                fused_ft_kernel=fused_ft_kernel, cg=True,
                                return_dict_in_generate=True, output_scores=True, timing=True)
        print(out_cg.sequences)
Tri Dao's avatar
Tri Dao committed
88

89
90
91
92
93
    if not rotary:
        out_hf = model_hf.generate(input_ids=input_ids, max_length=max_length,
                                return_dict_in_generate=True, output_scores=True)
        out_ref = model_ref.generate(input_ids=input_ids, max_length=max_length,
                                    return_dict_in_generate=True, output_scores=True)
94
95
96
97
98

        print(f'Scores max diff: {(torch.stack(out.scores, 1) - torch.stack(out_ref.scores, 1)).abs().max().item()}')
        print(f'Scores mean diff: {(torch.stack(out.scores, 1) - torch.stack(out_ref.scores, 1)).abs().mean().item()}')
        print(f'HF fp16 max diff: {(torch.stack(out_hf.scores, 1) - torch.stack(out_ref.scores, 1)).abs().max().item()}')
        print(f'HF fp16 mean diff: {(torch.stack(out_hf.scores, 1) - torch.stack(out_ref.scores, 1)).abs().mean().item()}')
99
        print(tokenizer.batch_decode(out_ref.sequences.tolist()))
Tri Dao's avatar
Tri Dao committed
100
101
102
103

    assert torch.all(out.sequences == sequences)
    assert torch.allclose(torch.stack(out.scores, dim=1), torch.stack(scores, dim=1),
                          rtol=rtol, atol=atol)
104
105
106
    if not rotary:
        assert torch.all(out.sequences == out_ref.sequences)
        assert torch.all(out.sequences == out_hf.sequences)
Tri Dao's avatar
Tri Dao committed
107

108
        assert (torch.stack(out.scores, 1) - torch.stack(out_ref.scores, 1)).abs().max().item() < 3 * (torch.stack(out_hf.scores, 1) - torch.stack(out_ref.scores, 1)).abs().max().item()