tokenizer.py 5.57 KB
Newer Older
Mohammad's avatar
Mohammad committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15

Mohammad's avatar
Mohammad committed
16
"""Megatron tokenizers."""
17
18
19
20
21

from abc import ABC
from abc import abstractmethod

from .bert_tokenization import FullTokenizer as FullBertTokenizer
Mohammad's avatar
Mohammad committed
22
from .gpt2_tokenization import GPT2Tokenizer
23
24


Mohammad's avatar
Mohammad committed
25
def build_tokenizer(args):
Mohammad's avatar
Mohammad committed
26
27
    """Initialize tokenizer."""
    if args.rank == 0:
Mohammad's avatar
Mohammad committed
28
        print('> building {} tokenizer ...'.format(args.tokenizer_type),
Mohammad's avatar
Mohammad committed
29
              flush=True)
30
31

    # Select and instantiate the tokenizer.
Mohammad's avatar
Mohammad committed
32
    assert args.vocab_file is not None
Mohammad's avatar
Mohammad committed
33
34
35
    if args.tokenizer_type == 'BertWordPieceLowerCase':
        tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file,
                                                    lower_case=True)
Mohammad's avatar
Mohammad committed
36
37
38
    elif args.tokenizer_type == 'GPT2BPETokenizer':
        assert args.merge_file is not None
        tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
39
40
    else:
        raise NotImplementedError('{} tokenizer is not '
Mohammad's avatar
Mohammad committed
41
                                  'implemented.'.format(args.tokenizer_type))
42
43

    # Add vocab size.
Mohammad's avatar
Mohammad committed
44
45
    args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size,
                                                      args)
Mohammad's avatar
Mohammad committed
46
47
48
49

    return tokenizer


Mohammad's avatar
Mohammad committed
50
def _vocab_size_with_padding(orig_vocab_size, args):
Mohammad's avatar
Mohammad committed
51
52
53
54
55
56
57
58
59
60
61
62
63
    """Pad vocab size so it is divisible by model parallel size and
    still having GPU friendly size."""

    after = orig_vocab_size
    multiple = args.make_vocab_size_divisible_by * \
               args.model_parallel_size
    while (after % multiple) != 0:
        after += 1
    if args.rank == 0:
        print(' > padded vocab (size: {}) with {} dummy tokens '
              '(new size: {})'.format(
                  orig_vocab_size, after - orig_vocab_size, after), flush=True)
    return after
64
65
66
67
68
69
70
71
72
73
74
75
76
77


class AbstractTokenizer(ABC):
    """Abstract class for tokenizer."""

    def __init__(self, name):
        self.name = name
        super().__init__()

    @property
    @abstractmethod
    def vocab_size(self):
        pass

78
79
80
81
82
83
84
85
86
87
88
89
    @property
    @abstractmethod
    def vocab(self):
        """Dictionary from vocab text token to id token."""
        pass

    @property
    @abstractmethod
    def inv_vocab(self):
        """Dictionary from vocab id token to text token."""
        pass

90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    @abstractmethod
    def tokenize(self, text):
        pass

    @property
    def cls(self):
        raise NotImplementedError('CLS is not provided for {} '
                                  'tokenizer'.format(self.name))

    @property
    def sep(self):
        raise NotImplementedError('SEP is not provided for {} '
                                  'tokenizer'.format(self.name))

    @property
    def pad(self):
        raise NotImplementedError('PAD is not provided for {} '
                                  'tokenizer'.format(self.name))

    @property
    def eod(self):
        raise NotImplementedError('EOD is not provided for {} '
                                  'tokenizer'.format(self.name))

114
115
116
117
118
    @property
    def mask(self):
        raise NotImplementedError('MASK is not provided for {} '
                                  'tokenizer'.format(self.name))

119
120
121
122
123
124
125
126
127
128
129
130
131
132

class _BertWordPieceTokenizer(AbstractTokenizer):
    """Original BERT wordpiece tokenizer."""

    def __init__(self, vocab_file, lower_case=True):
        if lower_case:
            name = 'BERT Lower Case'
        else:
            name = 'BERT Upper Case'
        super().__init__(name)
        self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case)
        self.cls_id = self.tokenizer.vocab['[CLS]']
        self.sep_id = self.tokenizer.vocab['[SEP]']
        self.pad_id = self.tokenizer.vocab['[PAD]']
133
        self.mask_id = self.tokenizer.vocab['[MASK]']  
134
135
136
137
138

    @property
    def vocab_size(self):
        return self.tokenizer.vocab_size()

139
140
141
142
143
144
145
146
    @property
    def vocab(self):
        return self.tokenizer.vocab

    @property
    def inv_vocab(self):
        return self.tokenizer.inv_vocab

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
    def tokenize(self, text):
        text_tokens = self.tokenizer.tokenize(text)
        return self.tokenizer.convert_tokens_to_ids(text_tokens)

    @property
    def cls(self):
        return self.cls_id

    @property
    def sep(self):
        return self.sep_id

    @property
    def pad(self):
        return self.pad_id
Mohammad's avatar
Mohammad committed
162

163
164
165
    @property
    def mask(self):
        return self.mask_id
Mohammad's avatar
Mohammad committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181

class _GPT2BPETokenizer(AbstractTokenizer):
    """Original GPT2 BPE tokenizer."""

    def __init__(self, vocab_file, merge_file):
        name = 'GPT2 BPE'
        super().__init__(name)

        self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace',
                                       special_tokens=[], max_len=None)
        self.eod_id = self.tokenizer.encoder['<|endoftext|>']

    @property
    def vocab_size(self):
        return len(self.tokenizer.encoder)

182
183
184
185
186
187
188
189
    @property
    def vocab(self):
        return self.tokenizer.encoder

    @property
    def inv_vocab(self):
        return self.tokenizer.decoder

Mohammad's avatar
Mohammad committed
190
191
192
193
194
195
    def tokenize(self, text):
        return self.tokenizer.encode(text)

    @property
    def eod(self):
        return self.eod_id