gpt2.py 3.65 KB
Newer Older
Jason Phang's avatar
gpt3  
Jason Phang committed
1
2
import transformers
import torch
3
from lm_eval.base import BaseLM
Jason Phang's avatar
gpt3  
Jason Phang committed
4
5


6
class HFLM(BaseLM):
Leo Gao's avatar
Leo Gao committed
7

8
    def __init__(self, device='cuda', pretrained='gpt2', revision='main', subfolder=None, tokenizer=None, batch_size=1):
Leo Gao's avatar
Leo Gao committed
9
        super().__init__()
10
11
12
13
14

        assert isinstance(device, str)
        assert isinstance(pretrained, str)
        assert isinstance(batch_size, int)

15
16
17
        if device:            
            if device not in ["cuda", "cpu"]:
                device = int(device)
researcher2's avatar
researcher2 committed
18
            self._device = torch.device(device)
19
            print(f"Using device '{device}'")
Leo Gao's avatar
Leo Gao committed
20
        else:
21
22
            print("Device not specificed")
            print(f"Cuda Available? {torch.cuda.is_available()}")
researcher2's avatar
researcher2 committed
23
            self._device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
24
25

        # TODO: update this to be less of a hack once subfolder is fixed in HF
26
27
28
        self.gpt2 = transformers.AutoModelForCausalLM.from_pretrained(
            pretrained, revision=revision + ("/" + subfolder if subfolder is not None else "")
        ).to(self.device)
Leo Gao's avatar
Leo Gao committed
29
        self.gpt2.eval()
Leo Gao's avatar
Leo Gao committed
30

31
32
33
        # pretrained tokenizer for neo is broken for now so just hard-coding this to gpt2
        self.tokenizer = transformers.AutoTokenizer.from_pretrained(
            pretrained if tokenizer is None else tokenizer, revision=revision, subfolder=subfolder)
34
35
36
37
38
39

        assert isinstance(self.tokenizer, (
            transformers.GPT2Tokenizer, transformers.GPT2TokenizerFast,
            transformers.T5Tokenizer, transformers.T5TokenizerFast,
        )), "this tokenizer has not been checked for compatibility yet!"

40
        self.vocab_size = self.tokenizer.vocab_size
41

42
        if isinstance(self.tokenizer, (transformers.GPT2Tokenizer, transformers.GPT2TokenizerFast)):
43
44
            assert self.tokenizer.encode('hello\n\nhello') == [31373, 198, 198, 31373], \
                self.tokenizer.encode('hello\n\nhello')
Leo Gao's avatar
Leo Gao committed
45

46
        # multithreading and batching
47
        self.batch_size_per_gpu = batch_size  # todo: adaptive batch size
48

Leo Gao's avatar
Leo Gao committed
49
        # TODO: fix multi-gpu
50
        # gpus = torch.cuda.device_count()
Leo Gao's avatar
Leo Gao committed
51
52
        # if gpus > 1:
        #     self.gpt2 = nn.DataParallel(self.gpt2)
53

54
55
56
57
    @property
    def eot_token_id(self):
        # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
        return self.tokenizer.eos_token_id
58

59
60
61
62
63
64
65
    @property
    def max_length(self):
        try:
            return self.gpt2.config.n_ctx
        except AttributeError:
            # gptneoconfig doesn't have n_ctx apparently
            return self.gpt2.config.max_position_embeddings
66

67
68
69
    @property
    def max_gen_toks(self):
        return 256
Leo Gao's avatar
Leo Gao committed
70

71
72
73
74
    @property
    def batch_size(self):
        # TODO: fix multi-gpu
        return self.batch_size_per_gpu  # * gpus
Leo Gao's avatar
Leo Gao committed
75

76
77
78
79
    @property
    def device(self):
        # TODO: fix multi-gpu
        return self._device
Leo Gao's avatar
Leo Gao committed
80

81
82
    def tok_encode(self, string: str):
        return self.tokenizer.encode(string, add_special_tokens=False)
Leo Gao's avatar
Leo Gao committed
83
    
84
85
86
    def tok_decode(self, tokens):
        return self.tokenizer.decode(tokens)

Leo Gao's avatar
Leo Gao committed
87
88
89
90
91
92
    def _model_call(self, inps):
        """
        inps: a torch tensor of shape [batch, sequence]
        the size of sequence may vary from call to call

        returns: a torch tensor of shape [batch, sequence, vocab] with the
93
        logits returned from the model
Leo Gao's avatar
Leo Gao committed
94
        """
95
96
        with torch.no_grad():
            return self.gpt2(inps)[0][:, :, :50257]
Leo Gao's avatar
Leo Gao committed
97
    
98
99
100
101
102
103
104
105
106
    def _model_generate(self, context, max_length, eos_token_id):
        return self.gpt2.generate(
            context,
            max_length=max_length,
            eos_token_id=eos_token_id,
            do_sample=False
        )


107
108
# for backwards compatibility
GPT2LM = HFLM