tokenization_openai.py 9.27 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
import os
thomwolf's avatar
thomwolf committed
17
18
19
import re
import json
from tqdm import tqdm
thomwolf's avatar
thomwolf committed
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import logging

from .file_utils import cached_path

logger = logging.getLogger(__name__)

PRETRAINED_VOCAB_ARCHIVE_MAP = {
    'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json",
}
PRETRAINED_MERGES_ARCHIVE_MAP = {
    'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
    'openai-gpt': 512,
}
VOCAB_NAME = 'vocab.json'
MERGES_NAME = 'merges.txt'
thomwolf's avatar
thomwolf committed
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64

def get_pairs(word):
    """
    Return set of symbol pairs in a word.
    word is represented as tuple of symbols (symbols being variable-length strings)
    """
    pairs = set()
    prev_char = word[0]
    for char in word[1:]:
        pairs.add((prev_char, char))
        prev_char = char
    return pairs

def text_standardize(text):
    """
    fixes some issues the spacy tokenizer had on books corpus
    also does some whitespace standardization
    """
    text = text.replace('—', '-')
    text = text.replace('–', '-')
    text = text.replace('―', '-')
    text = text.replace('…', '...')
    text = text.replace('´', "'")
    text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
    text = re.sub(r'\s*\n\s*', ' \n ', text)
    text = re.sub(r'[^\S\n]+', ' ', text)
    return text.strip()

thomwolf's avatar
thomwolf committed
65
class OpenAIGPTTokenizer(object):
thomwolf's avatar
thomwolf committed
66
67
68
    """
    mostly a wrapper for a public python bpe tokenizer
    """
thomwolf's avatar
thomwolf committed
69
    @classmethod
thomwolf's avatar
thomwolf committed
70
    def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
thomwolf's avatar
thomwolf committed
71
72
73
74
        """
        Instantiate a PreTrainedBertModel from a pre-trained model file.
        Download and cache the pre-trained model file if needed.
        """
thomwolf's avatar
thomwolf committed
75
76
77
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
            vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
            merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
thomwolf's avatar
thomwolf committed
78
        else:
thomwolf's avatar
thomwolf committed
79
80
            vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
            merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
thomwolf's avatar
thomwolf committed
81
82
83
84
85
86
87
        # redirect to the cache, if necessary
        try:
            resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
            resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
        except FileNotFoundError:
            logger.error(
                "Model name '{}' was not found in model name list ({}). "
thomwolf's avatar
thomwolf committed
88
89
90
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
                "at this path or url.".format(
                    pretrained_model_name_or_path,
thomwolf's avatar
thomwolf committed
91
                    ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
thomwolf's avatar
thomwolf committed
92
93
                    pretrained_model_name_or_path,
                    vocab_file, merges_file))
thomwolf's avatar
thomwolf committed
94
95
96
97
98
99
100
101
102
            return None
        if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
            logger.info("loading vocabulary file {}".format(vocab_file))
            logger.info("loading merges file {}".format(merges_file))
        else:
            logger.info("loading vocabulary file {} from cache at {}".format(
                vocab_file, resolved_vocab_file))
            logger.info("loading merges file {} from cache at {}".format(
                merges_file, resolved_merges_file))
thomwolf's avatar
thomwolf committed
103
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
thomwolf's avatar
thomwolf committed
104
105
            # if we're using a pretrained model, ensure the tokenizer wont index sequences longer
            # than the number of positional embeddings
thomwolf's avatar
thomwolf committed
106
            max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
thomwolf's avatar
thomwolf committed
107
108
109
110
111
            kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
        # Instantiate tokenizer.
        tokenizer = cls(resolved_vocab_file, resolved_merges_file, *inputs, **kwargs)
        return tokenizer

thomwolf's avatar
thomwolf committed
112
    def __init__(self, vocab_file, merges_file, special_tokens=None, max_len=None):
thomwolf's avatar
thomwolf committed
113
114
115
116
117
        try:
            import ftfy
            import spacy
        except ImportError:
            raise ImportError("Please install ftfy and spacy to use OpenAI GPT tokenizer.")
thomwolf's avatar
thomwolf committed
118

thomwolf's avatar
thomwolf committed
119
        self.max_len = max_len if max_len is not None else int(1e12)
thomwolf's avatar
thomwolf committed
120
        self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat'])
thomwolf's avatar
thomwolf committed
121
        self.fix_text = ftfy.fix_text
thomwolf's avatar
thomwolf committed
122
        self.encoder = json.load(open(vocab_file))
thomwolf's avatar
thomwolf committed
123
        self.decoder = {v:k for k,v in self.encoder.items()}
thomwolf's avatar
thomwolf committed
124
        merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
thomwolf's avatar
thomwolf committed
125
126
127
        merges = [tuple(merge.split()) for merge in merges]
        self.bpe_ranks = dict(zip(merges, range(len(merges))))
        self.cache = {}
thomwolf's avatar
logging  
thomwolf committed
128
        self.set_special_tokens(special_tokens)
thomwolf's avatar
thomwolf committed
129

130
131
132
    def __len__(self):
        return len(self.encoder) + len(self.special_tokens)

thomwolf's avatar
thomwolf committed
133
    def set_special_tokens(self, special_tokens):
thomwolf's avatar
thomwolf committed
134
135
136
137
        """ Add a list of additional tokens to the encoder.
            The additional tokens are indexed starting from the last index of the
            current vocabulary in the order of the `special_tokens` list.
        """
thomwolf's avatar
logging  
thomwolf committed
138
139
140
141
        if not special_tokens:
            self.special_tokens = {}
            self.special_tokens_decoder = {}
            return
thomwolf's avatar
thomwolf committed
142
        self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
thomwolf's avatar
logging  
thomwolf committed
143
144
        self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
        logger.info("Special tokens {}".format(self.special_tokens))
thomwolf's avatar
thomwolf committed
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188

    def bpe(self, token):
        word = tuple(token[:-1]) + ( token[-1] + '</w>',)
        if token in self.cache:
            return self.cache[token]
        pairs = get_pairs(word)

        if not pairs:
            return token+'</w>'

        while True:
            bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
            if bigram not in self.bpe_ranks:
                break
            first, second = bigram
            new_word = []
            i = 0
            while i < len(word):
                try:
                    j = word.index(first, i)
                    new_word.extend(word[i:j])
                    i = j
                except:
                    new_word.extend(word[i:])
                    break

                if word[i] == first and i < len(word)-1 and word[i+1] == second:
                    new_word.append(first+second)
                    i += 2
                else:
                    new_word.append(word[i])
                    i += 1
            new_word = tuple(new_word)
            word = new_word
            if len(word) == 1:
                break
            else:
                pairs = get_pairs(word)
        word = ' '.join(word)
        if word == '\n  </w>':
            word = '\n</w>'
        self.cache[token] = word
        return word

thomwolf's avatar
thomwolf committed
189
190
191
192
193
194
195
196
197
198
    def tokenize(self, text):
        split_tokens = []
        text = self.nlp(text_standardize(self.fix_text(text)))
        for token in text:
            split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
        return split_tokens

    def convert_tokens_to_ids(self, tokens):
        """Converts a sequence of tokens into ids using the vocab."""
        ids = []
thomwolf's avatar
logging  
thomwolf committed
199
200
201
202
203
        if isinstance(tokens, str):
            if tokens in self.special_tokens:
                return self.special_tokens[tokens]
            else:
                return self.encoder.get(tokens, 0)
thomwolf's avatar
thomwolf committed
204
205
206
207
208
209
210
211
212
213
214
215
216
        for token in tokens:
            if token in self.special_tokens:
                ids.append(self.special_tokens[token])
            else:
                ids.append(self.encoder.get(token, 0))
        if len(ids) > self.max_len:
            raise ValueError(
                "Token indices sequence length is longer than the specified maximum "
                " sequence length for this BERT model ({} > {}). Running this"
                " sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
            )
        return ids

thomwolf's avatar
thomwolf committed
217
    def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
thomwolf's avatar
thomwolf committed
218
219
220
        """Converts a sequence of ids in BPE tokens using the vocab."""
        tokens = []
        for i in ids:
thomwolf's avatar
logging  
thomwolf committed
221
            if i in self.special_tokens_decoder:
thomwolf's avatar
thomwolf committed
222
223
                if not skip_special_tokens:
                    tokens.append(self.special_tokens_decoder[i])
thomwolf's avatar
logging  
thomwolf committed
224
225
            else:
                tokens.append(self.decoder[i])
thomwolf's avatar
thomwolf committed
226
227
        return tokens

thomwolf's avatar
thomwolf committed
228
    def decode(self, ids, skip_special_tokens=False):
thomwolf's avatar
thomwolf committed
229
        """Converts a sequence of ids in a string."""
thomwolf's avatar
thomwolf committed
230
        tokens = self.convert_ids_to_tokens(ids, skip_special_tokens=skip_special_tokens)
thomwolf's avatar
thomwolf committed
231
232
        out_string = ''.join(tokens).replace('</w>', ' ')
        return out_string