tokenizer.py 22.8 KB
Newer Older
liangjing's avatar
liangjing committed
1
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2

Mohammad's avatar
Mohammad committed
3
"""Megatron tokenizers."""
4

liangjing's avatar
liangjing committed
5
6
7
8
9
10
11
12
13
14
import math
from abc import ABC, abstractmethod
import base64
import json
from pathlib import Path
from typing import Dict, List, Optional

import types

from megatron.core.datasets.megatron_tokenizer import MegatronTokenizer
15
16

from .bert_tokenization import FullTokenizer as FullBertTokenizer
Mohammad's avatar
Mohammad committed
17
from .gpt2_tokenization import GPT2Tokenizer
18
19


liangjing's avatar
liangjing committed
20
def build_tokenizer(args, **kwargs):
Mohammad's avatar
Mohammad committed
21
22
    """Initialize tokenizer."""
    if args.rank == 0:
Mohammad's avatar
Mohammad committed
23
        print('> building {} tokenizer ...'.format(args.tokenizer_type),
Mohammad's avatar
Mohammad committed
24
              flush=True)
25
26

    # Select and instantiate the tokenizer.
Mohammad's avatar
Mohammad committed
27
    if args.tokenizer_type == 'BertWordPieceLowerCase':
liangjing's avatar
v1  
liangjing committed
28
        assert args.vocab_file is not None
Mohammad's avatar
Mohammad committed
29
        tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file,
30
31
                                            lower_case=True,
                                            vocab_extra_ids=args.vocab_extra_ids)
Raul Puri's avatar
Raul Puri committed
32
    elif args.tokenizer_type == 'BertWordPieceCase':
liangjing's avatar
v1  
liangjing committed
33
        assert args.vocab_file is not None
Raul Puri's avatar
Raul Puri committed
34
        tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file,
35
36
                                            lower_case=False,
                                            vocab_extra_ids=args.vocab_extra_ids)
Mohammad's avatar
Mohammad committed
37
    elif args.tokenizer_type == 'GPT2BPETokenizer':
liangjing's avatar
v1  
liangjing committed
38
        assert args.vocab_file is not None
Mohammad's avatar
Mohammad committed
39
40
        assert args.merge_file is not None
        tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
41
42
43
    elif args.tokenizer_type == 'SentencePieceTokenizer':
        assert args.tokenizer_model is not None
        tokenizer = _SentencePieceTokenizer(args.tokenizer_model, vocab_extra_ids=args.vocab_extra_ids)
44
45
46
    elif args.tokenizer_type == 'GPTSentencePieceTokenizer':
        assert args.tokenizer_model is not None
        tokenizer = _GPTSentencePieceTokenizer(args.tokenizer_model)
liangjing's avatar
liangjing committed
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
    elif args.tokenizer_type == 'HuggingFaceTokenizer':
        tokenizer = _HuggingFaceTokenizer(args.tokenizer_model, **kwargs)
    elif args.tokenizer_type == 'Llama2Tokenizer':
        assert args.tokenizer_model is not None
        tokenizer = _Llama2Tokenizer(args.tokenizer_model)
    elif args.tokenizer_type == 'TikTokenizer':
        assert args.tokenizer_model is not None
        assert args.tiktoken_pattern is not None
        assert args.tiktoken_pattern in {"v1", "v2"}
        pattern = PATTERN_TIKTOKEN if args.tiktoken_pattern == "v1" else PATTERN_TIKTOKEN_V2
        tokenizer = CustomTikTokenizer(
            path=args.tokenizer_model,
            pattern=pattern,
            vocab_size=args.vocab_size,
            num_special_tokens=args.tiktoken_num_special_tokens,
            special_tokens=args.tiktoken_special_tokens,
        )
liangjing's avatar
v1  
liangjing committed
64
65
66
    elif args.tokenizer_type == 'NullTokenizer':
        assert args.vocab_size is not None
        tokenizer = _NullTokenizer(args.vocab_size)
67
68
    else:
        raise NotImplementedError('{} tokenizer is not '
Mohammad's avatar
Mohammad committed
69
                                  'implemented.'.format(args.tokenizer_type))
liangjing's avatar
liangjing committed
70
71
72
73
74

    # Add vocab size (if not already set from a checkpoint).
    if getattr(args, "padded_vocab_size", None) is None:
        args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size,
                                                          args)
Mohammad's avatar
Mohammad committed
75
76
77
78

    return tokenizer


liangjing's avatar
liangjing committed
79
def _vocab_size_with_padding(orig_vocab_size, args, logging_enabled=True):
Mohammad's avatar
Mohammad committed
80
81
82
83
84
    """Pad vocab size so it is divisible by model parallel size and
    still having GPU friendly size."""

    after = orig_vocab_size
    multiple = args.make_vocab_size_divisible_by * \
85
        args.tensor_model_parallel_size
liangjing's avatar
liangjing committed
86
87
    after = int(math.ceil(after / multiple) * multiple)
    if args.rank == 0 and logging_enabled:
Mohammad's avatar
Mohammad committed
88
89
90
91
        print(' > padded vocab (size: {}) with {} dummy tokens '
              '(new size: {})'.format(
                  orig_vocab_size, after - orig_vocab_size, after), flush=True)
    return after
92
93


liangjing's avatar
liangjing committed
94
95
96
97
98
99
100
class _HuggingFaceTokenizer(MegatronTokenizer):
    def __init__(self, pretrained_model_name_or_path, **kwargs):
        super().__init__(pretrained_model_name_or_path, **kwargs)
        try:
            import transformers
        except ImportError:
            raise EnvironmentError(f"The transformers library must be installed to use huggingface_tokenizer_provider")
101

liangjing's avatar
liangjing committed
102
103
104
105
        # TODO(bnorick): download tokenizer once to lustre and use force offline to make sure all tasks read it from there
        self._tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path, **kwargs)
        self._vocab = self._tokenizer.get_vocab()
        self._inv_vocab = {token_id: token for token, token_id in self._vocab.items()}
106
107
108

    @property
    def vocab_size(self):
liangjing's avatar
liangjing committed
109
        return len(self._tokenizer)
110

111
112
113
    @property
    def vocab(self):
        """Dictionary from vocab text token to id token."""
liangjing's avatar
liangjing committed
114
        return self._vocab
115
116
117
118

    @property
    def inv_vocab(self):
        """Dictionary from vocab id token to text token."""
liangjing's avatar
liangjing committed
119
        return self._inv_vocab
120

121
    @property
liangjing's avatar
liangjing committed
122
123
    def decoder(self):
        return self._inv_vocab
124

liangjing's avatar
liangjing committed
125
126
    def tokenize(self, text, **kwargs):
        return self._tokenizer(text, **kwargs).input_ids
127

liangjing's avatar
liangjing committed
128
129
    def detokenize(self, token_ids, **kwargs):
        return self._tokenizer.decode(token_ids, **kwargs)
130
131
132

    @property
    def eod(self):
liangjing's avatar
liangjing committed
133
        return self._tokenizer.eos_token_id
134

135

liangjing's avatar
liangjing committed
136
class _BertWordPieceTokenizer(MegatronTokenizer):
137
138
    """Original BERT wordpiece tokenizer."""

139
    def __init__(self, vocab_file, lower_case=True, vocab_extra_ids=0):
liangjing's avatar
liangjing committed
140
        super().__init__(vocab_file, lower_case=lower_case, vocab_extra_ids=vocab_extra_ids)
141
142
143
144
        self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case)
        self.cls_id = self.tokenizer.vocab['[CLS]']
        self.sep_id = self.tokenizer.vocab['[SEP]']
        self.pad_id = self.tokenizer.vocab['[PAD]']
Neel Kant's avatar
Neel Kant committed
145
        self.mask_id = self.tokenizer.vocab['[MASK]']
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
        self._additional_special_tokens = []

        # (dsachan) Add BOS and EOS tokens
        SPECIAL_TOKENS = {'eos_token': '[EOS]',
                          'bos_token': '[BOS]'}
        self._bos_token = '[BOS]'
        self.add_token(self._bos_token)
        self._bos_token_id = self.vocab.get(self._bos_token)

        self._eos_token = '[EOS]'
        self.add_token(self._eos_token)
        self._eos_token_id = self.vocab.get(self._eos_token)

        # (dsachan) Add additional special tokens
        # These can be used as sentinel tokens in T5 model inputs
        additional_special_tokens = []
        additional_special_tokens.extend(
            ["<extra_id_{}>".format(i) for i in range(vocab_extra_ids)])
        self.add_additional_special_tokens(additional_special_tokens)

    def add_token(self, token):
        if token not in self.vocab:
            self.inv_vocab[self.vocab_size] = token
            # self.vocab_size comes from len(vocab)
            # and it will increase as we add elements
            self.vocab[token] = self.vocab_size

    def add_additional_special_tokens(self, tokens_list):
        setattr(self, "additional_special_tokens", tokens_list)
        for value in tokens_list:
            self.add_token(value)
177
178
179
180
181

    @property
    def vocab_size(self):
        return self.tokenizer.vocab_size()

182
183
184
185
186
187
188
189
    @property
    def vocab(self):
        return self.tokenizer.vocab

    @property
    def inv_vocab(self):
        return self.tokenizer.inv_vocab

190
191
192
193
    def tokenize(self, text):
        text_tokens = self.tokenizer.tokenize(text)
        return self.tokenizer.convert_tokens_to_ids(text_tokens)

194
195
196
197
    def decode(self, ids):
        tokens = self.tokenizer.convert_ids_to_tokens(ids)
        return self.tokenizer.convert_tokens_to_string(tokens)

198
199
200
201
    def decode_token_ids(self, token_ids):
        tokens = self.tokenizer.convert_ids_to_tokens(token_ids)
        exclude_list = ['[PAD]', '[CLS]']
        non_pads = [t for t in tokens if t not in exclude_list]
202
203
204
205
206
207
208
209
210

        result = ""
        for s in non_pads:
            if s.startswith("##"):
                result += s[2:]
            else:
                result += " " + s

        return result
211

212
213
214
215
216
217
218
219
220
221
222
    @property
    def cls(self):
        return self.cls_id

    @property
    def sep(self):
        return self.sep_id

    @property
    def pad(self):
        return self.pad_id
Mohammad's avatar
Mohammad committed
223

224
225
226
    @property
    def mask(self):
        return self.mask_id
Mohammad's avatar
Mohammad committed
227

liangjing's avatar
liangjing committed
228
229
230
231
232
233
234
235
236
237
    @property
    def bos(self):
        """ Id of the beginning of sentence token in the vocabulary."""
        return self._bos_token_id

    @property
    def eos(self):
        """ Id of the end of sentence token in the vocabulary."""
        return self._eos_token_id

238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    @property
    def bos_token(self):
        """ Beginning of sentence token id """
        return self._bos_token

    @property
    def eos_token(self):
        """ End of sentence token id """
        return self._eos_token

    @property
    def additional_special_tokens(self):
        """ All the additional special tokens you may want to use (list of strings)."""
        return self._additional_special_tokens

    @property
    def additional_special_tokens_ids(self):
        """ Ids of all the additional special tokens in the vocabulary (list of integers)."""
        return [self.vocab.get(token) for token in self._additional_special_tokens]

    @additional_special_tokens.setter
    def additional_special_tokens(self, value):
        self._additional_special_tokens = value

Neel Kant's avatar
Neel Kant committed
262

liangjing's avatar
liangjing committed
263
class _GPT2BPETokenizer(MegatronTokenizer):
Mohammad's avatar
Mohammad committed
264
265
266
    """Original GPT2 BPE tokenizer."""

    def __init__(self, vocab_file, merge_file):
liangjing's avatar
liangjing committed
267
        super().__init__(vocab_file, merge_file)
Mohammad's avatar
Mohammad committed
268
269
270
271
272
273
274
275
276

        self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace',
                                       special_tokens=[], max_len=None)
        self.eod_id = self.tokenizer.encoder['<|endoftext|>']

    @property
    def vocab_size(self):
        return len(self.tokenizer.encoder)

277
278
279
280
281
282
283
284
    @property
    def vocab(self):
        return self.tokenizer.encoder

    @property
    def inv_vocab(self):
        return self.tokenizer.decoder

Mohammad's avatar
Mohammad committed
285
286
287
    def tokenize(self, text):
        return self.tokenizer.encode(text)

288
289
290
    def detokenize(self, token_ids):
        return self.tokenizer.decode(token_ids)

Mohammad's avatar
Mohammad committed
291
292
293
    @property
    def eod(self):
        return self.eod_id
294
295


liangjing's avatar
liangjing committed
296
class _SentencePieceTokenizer(MegatronTokenizer):
297
298
299
    """SentencePieceTokenizer-Megatron wrapper"""

    def __init__(self, model_file, vocab_extra_ids=0):
liangjing's avatar
liangjing committed
300
        super().__init__(model_file, vocab_extra_ids=vocab_extra_ids)
301

Vijay Korthikanti's avatar
Vijay Korthikanti committed
302
        import sentencepiece
303
        self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=model_file)
304
305
        self._initalize(vocab_extra_ids)

306
    def _populate_vocab(self):
307
308
309
        self._vocab = {}
        self._inv_vocab = {}

310
311
312
313
314
315
316
        for i in range(len(self.tokenizer)):
            t = self.tokenizer.id_to_piece(i)
            self._inv_vocab[i] = t
            self._vocab[t] = i

    def _initalize(self, vocab_extra_ids):
        self._populate_vocab()
317
318
319
320
321
322
323
324
325
326
327
328
329
        self._special_tokens = {}
        self._inv_special_tokens = {}

        self._t5_tokens = []

        def _add_special_token(t):
            if t not in self._vocab:
                next_id = len(self._vocab)
                self._vocab[t] = next_id
                self._inv_vocab[next_id] = t
            self._special_tokens[t] = self._vocab[t]
            self._inv_special_tokens[self._vocab[t]] = t

Vijay Korthikanti's avatar
Vijay Korthikanti committed
330
331
332
333
334
335
336
337
        _add_special_token('<CLS>')
        self._cls_id = self._vocab['<CLS>']
        _add_special_token('<SEP>')
        self._sep_id = self._vocab['<SEP>']
        _add_special_token('<EOD>')
        self._eod_id = self._vocab['<EOD>']
        _add_special_token('<MASK>')
        self._mask_id = self._vocab['<MASK>']
338

339
        pad_id = self.tokenizer.pad_id()
340
        try:
341
            pad_token = self.tokenizer.id_to_piece(pad_id)
342
343
        except IndexError:
            pad_token = '<PAD>'
Vijay Korthikanti's avatar
Vijay Korthikanti committed
344
345
        _add_special_token(pad_token)
        self._pad_id = self._vocab[pad_token]
346

347
        bos_id = self.tokenizer.bos_id()
348
        try:
349
            bos_token = self.tokenizer.id_to_piece(bos_id)
350
351
        except IndexError:
            bos_token = '<BOS>'
Vijay Korthikanti's avatar
Vijay Korthikanti committed
352
353
        _add_special_token(bos_token)
        self._bos_id = self._vocab[bos_token]
354

355
        eos_id = self.tokenizer.eos_id()
356
        try:
357
            eos_token = self.tokenizer.id_to_piece(eos_id)
358
359
        except IndexError:
            eos_token = '<EOS>'
Vijay Korthikanti's avatar
Vijay Korthikanti committed
360
361
        _add_special_token(eos_token)
        self._eos_id = self._vocab[eos_token]
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379

        for i in range(vocab_extra_ids):
            t = "<extra_id_{}>".format(i)
            _add_special_token(t)
            self._t5_tokens += [t]

    @property
    def vocab_size(self):
        return len(self._vocab)

    @property
    def vocab(self):
        return self._vocab

    @property
    def inv_vocab(self):
        return self._inv_vocab

380
381
382
383
384
385
386
387
    @property
    def decoder(self):
        return self._inv_vocab

    @property
    def encoder(self):
        return self._vocab

388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
    # From:
    # https://github.com/NVIDIA/NeMo/blob/c8fa217e811d60d11d014827c7f3845ff6c99ae7/nemo/collections/common/tokenizers/sentencepiece_tokenizer.py#L89
    def tokenize(self, text):
        ids = []
        idx = 0

        while 1:
            indices = {}
            for token in self._special_tokens:
                try:
                    indices[token] = text[idx:].index(token)
                except ValueError:
                    continue
            if len(indices) == 0:
                break

            next_token = min(indices, key=indices.get)
            next_idx = idx + indices[next_token]

407
            ids.extend(self.tokenizer.encode_as_ids(text[idx:next_idx]))
408
409
410
            ids.append(self._special_tokens[next_token])
            idx = next_idx + len(next_token)

411
        ids.extend(self.tokenizer.encode_as_ids(text[idx:]))
412
413
414
415
416
417
418
419
420
421
        return ids

    # From:
    # https://github.com/NVIDIA/NeMo/blob/c8fa217e811d60d11d014827c7f3845ff6c99ae7/nemo/collections/common/tokenizers/sentencepiece_tokenizer.py#L125
    def detokenize(self, ids):
        text = ""
        last_i = 0

        for i, id in enumerate(ids):
            if id in self._inv_special_tokens:
422
                text += self.tokenizer.decode_ids(ids[last_i:i]) + " "
423
424
425
                text += self._inv_special_tokens[id] + " "
                last_i = i + 1

426
427
        text += self.tokenizer.decode_ids(ids[last_i:])
        return text
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460

    @property
    def cls(self):
        return self._cls_id

    @property
    def sep(self):
        return self._sep_id

    @property
    def pad(self):
        return self._pad_id

    @property
    def bos(self):
        return self._bos_id

    @property
    def eod(self):
        return self._eod_id

    @property
    def eos(self):
        return self._eos_id

    @property
    def mask(self):
        return self._mask_id

    @property
    def additional_special_tokens_ids(self):
        return [self.vocab[k] for k in self._t5_tokens]

liangjing's avatar
liangjing committed
461

462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
class _GPTSentencePieceTokenizer(_SentencePieceTokenizer):
    """SentencePieceTokenizer-Megatron wrapper"""

    def __init__(self, model_file,):
        super().__init__(model_file, vocab_extra_ids=0)

    def _initalize(self, vocab_extra_ids):
        self._populate_vocab()

        self._pad_id = self.tokenizer.pad_id()
        self._bos_id = self.tokenizer.bos_id()
        self._eos_id = self.tokenizer.eos_id()

    def tokenize(self, text):
        return self.tokenizer.encode_as_ids(text)

    def detokenize(self, ids):
        return self.tokenizer.decode_ids(ids)

    @property
    def cls(self):
        return -1

    @property
    def sep(self):
        return -1

    @property
    def mask(self):
        return -1

    @property
    def eod(self):
        return self._eos_id

    @property
    def additional_special_tokens_ids(self):
        return None
liangjing's avatar
v1  
liangjing committed
500

liangjing's avatar
liangjing committed
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703

class _Llama2Tokenizer(_SentencePieceTokenizer):
    """SentencePieceTokenizer-Megatron wrapper"""

    def __init__(self, model_file,):
        super().__init__(model_file, vocab_extra_ids=0)

    def _initalize(self, vocab_extra_ids):
        self._populate_vocab()

        # BOS / EOS token IDs
        self.n_words: int = self.tokenizer.vocab_size()
        self.bos_id: int = self.tokenizer.bos_id()
        self.eos_id: int = self.tokenizer.eos_id()
        self.pad_id: int = self.tokenizer.pad_id()
        assert self.tokenizer.vocab_size() == self.tokenizer.get_piece_size()

    def tokenize(self, s: str, bos=True, eos=False):
        '''Default args for text completion, not chat/dialog.'''
        assert type(s) is str
        t = self.tokenizer.encode(s)
        if bos:
            t = [self.bos_id] + t
        if eos:
            t = t + [self.eos_id]
        return t

    def detokenize(self, ids):
        return self.tokenizer.decode_ids(ids)

    @property
    def cls(self):
        return -1

    @property
    def sep(self):
        return -1

    @property
    def mask(self):
        return -1

    @property
    def eod(self):
        return self.eos_id

    @property
    def additional_special_tokens_ids(self):
        return None


def reload_mergeable_ranks(
    path: str,
    max_vocab: Optional[int] = None,
) -> Dict[bytes, int]:
    """
    Reload our tokenizer JSON file and convert it to Tiktoken format.
    """
    from ..utils import print_rank_0  # To prevent circular import.

    assert path.endswith(".json")

    # reload vocab
    with open(path, "r") as f:
        vocab = json.load(f)
    assert isinstance(vocab, list)
    print_rank_0(f"Vocab size: {len(vocab)}")
    if max_vocab is not None:
        vocab = vocab[:max_vocab]
        print_rank_0(f"Cutting vocab to first {len(vocab)} tokens.")

    # build ranks
    ranks: Dict[bytes, int] = {}
    for i, x in enumerate(vocab):
        assert x.keys() == {"rank", "token_bytes", "token_str"}
        assert x["rank"] == i
        merge = base64.b64decode(x["token_bytes"])
        assert i >= 256 or merge == bytes([i])
        ranks[merge] = x["rank"]

    # sanity check
    assert len(ranks) == len(vocab)
    assert set(ranks.values()) == set(range(len(ranks)))

    return ranks


PATTERN_TIKTOKEN = r"[^\r\n\p{L}\p{N}]?+\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]++[\r\n]*|\s*[\r\n]|\s+(?!\S)|\s+"
PATTERN_TIKTOKEN_V2 = "[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"

class CustomTikTokenizer(MegatronTokenizer):
    def __init__(
        self,
        path: str,
        pattern: str,
        vocab_size: Optional[int],
        num_special_tokens: int,
        special_tokens: Optional[List[str]],
    ):
        super().__init__(
            path,
            pattern=pattern,
            vocab_size=vocab_size,
            num_special_tokens=num_special_tokens,
            special_tokens=special_tokens
        )
        import tiktoken
        from .. import print_rank_0  # To prevent circular import.

        if vocab_size is None:
            vocab_size = 2**17  # Fallback vocab size is 131072.
        self._vocab_size = vocab_size

        SPECIAL_TOKENS = ["<unk>", "<s>", "</s>"]
        if special_tokens is None:
            special_tokens = SPECIAL_TOKENS.copy()
        assert len(special_tokens) == len(set(special_tokens)), f"Special tokens should be unique: {special_tokens}"
        assert len(special_tokens) <= num_special_tokens < self._vocab_size
        assert set(SPECIAL_TOKENS) <= set(special_tokens), f"Custom special tokens should include {SPECIAL_TOKENS}"

        special_filler = ["<SPECIAL_{id}>".format(id=i) for i in range(len(special_tokens), num_special_tokens)]
        if special_filler:
            print_rank_0(f"Adding special tokens {special_filler[0]}, ..., {special_filler[-1]}")
        special_tokens = special_tokens + special_filler
        assert len(set(special_tokens)) == len(special_tokens) == num_special_tokens, special_tokens
        inner_vocab_size = self._vocab_size - num_special_tokens

        token_to_id_without_special_tokens = reload_mergeable_ranks(path, max_vocab=inner_vocab_size)
        # Create space for special tokens.
        token_to_id_without_special_tokens = {t: i + num_special_tokens for t, i in token_to_id_without_special_tokens.items()}

        special_tokens = {t: i for i, t in enumerate(special_tokens)}
        self._unk_id = special_tokens["<unk>"]
        self._bos_id = special_tokens["<s>"]
        self._eos_id = special_tokens["</s>"]

        # Create tiktoken model.
        self._model = tiktoken.Encoding(
            name=Path(path).parent.name,
            pat_str=pattern,
            mergeable_ranks=token_to_id_without_special_tokens,
            special_tokens=special_tokens,
        )

        # Create final _id_to_token and _token_to_id data structures with special tokens inserted
        # into appropriate locations.
        assert set(token_to_id_without_special_tokens.keys()).isdisjoint(set(special_tokens.keys()))
        self._token_to_id = token_to_id_without_special_tokens.copy()
        self._token_to_id.update(special_tokens)
        self._id_to_token = {v: k for k, v in self._token_to_id.items()}
        assert set(range(self._vocab_size)) == set(self._id_to_token.keys())


    @property
    def bos(self) -> int:
        return self._bos_id

    @property
    def eos(self) -> int:
        return self._eos_id

    @property
    def unk(self) -> int:
        return self._unk_id

    @property
    def eod(self) -> int:
        return self._eos_id

    @property
    def vocab(self):
        return self._token_to_id

    @property
    def inv_vocab(self):
        return self._id_to_token

    def tokenize(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:
        tokens = self._model.encode_ordinary(s)
        if bos:
            tokens = [self.bos, *tokens]
        if eos:
            tokens = [*tokens, self.eos]

        return tokens

    def detokenize(self, tokens: List[int]) -> str:
        return self._model.decode(tokens)

    @property
    def vocab_size(self) -> int:
        return self._vocab_size

    @property
    def encoder(self):
        return self._token_to_id

    @property
    def decoder(self):
        return self._id_to_token


class _NullTokenizer(MegatronTokenizer):
liangjing's avatar
v1  
liangjing committed
704
    def __init__(self, vocab_size):
liangjing's avatar
liangjing committed
705
706
707
        super().__init__(None, vocab_size=vocab_size)
        self._vocab_size_without_eod = int(vocab_size)
        self._eod_id = self._vocab_size_without_eod
liangjing's avatar
v1  
liangjing committed
708
709
710
711
712
713
714
715

    def tokenize(self, text):
        return [int(x) for x in text.split(' ')]

    def detokenize(self, ids):
        text = [str(x) for x in ids]
        return ' '.join(text)

liangjing's avatar
liangjing committed
716
717
718
719
720
721
722
723
724
725
726
727
    @property
    def vocab_size(self):
        return self._vocab_size_without_eod + 1

    @property
    def vocab(self):
        raise NotImplementedError

    @property
    def inv_vocab(self):
        raise NotImplementedError

liangjing's avatar
v1  
liangjing committed
728
729
730
731
732
733
734
735
736
737
738
739
740
741
    @property
    def cls(self):
        return -1

    @property
    def sep(self):
        return -1

    @property
    def mask(self):
        return -1

    @property
    def eod(self):
liangjing's avatar
liangjing committed
742
        return self._eod_id
liangjing's avatar
v1  
liangjing committed
743
744
745
746

    @property
    def additional_special_tokens_ids(self):
        return None