tokenization_roberta.py 4.58 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15
"""Tokenization classes for RoBERTa."""
16
17
18
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

19
import sys
20
21
import json
import logging
22
import os
23
24
import regex as re
from io import open
25

26
from .tokenization_gpt2 import GPT2Tokenizer
27
28
29
30
31
32
33
34

try:
    from functools import lru_cache
except ImportError:
    # Just a dummy decorator to get the checks to run on python2
    # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
    def lru_cache():
        return lambda func: func
35
36
37

logger = logging.getLogger(__name__)

38
39
40
VOCAB_FILES_NAMES = {
    'vocab_file': 'vocab.json',
    'merges_file': 'merges.txt',
41
42
}

43
44
45
46
47
48
49
50
51
52
53
54
55
PRETRAINED_VOCAB_FILES_MAP = {
    'vocab_file':
    {
        'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json",
        'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json",
        'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-vocab.json",
    },
    'merges_file':
    {
        'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt",
        'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt",
        'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-merges.txt",
    },
56
57
58
}

PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
59
60
61
    'roberta-base': 512,
    'roberta-large': 512,
    'roberta-large-mnli': 512,
62
63
64
}


65
class RobertaTokenizer(GPT2Tokenizer):
66
    """
thomwolf's avatar
thomwolf committed
67
68
69
70
71
    RoBERTa BPE tokenizer, derived from the GPT-2 tokenizer. Peculiarities:
        - Byte-level Byte-Pair-Encoding
        - Requires a space to start the input string => will add a space is there isn't.
          As a consequence, this tokenizer `encode` and `decode` method will not conserve
          the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"
72
    """
73
74
    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
75
76
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES

77
    def __init__(self, vocab_file, merges_file, errors='replace', bos_token="<s>", eos_token="</s>", sep_token="</s>",
78
                 cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>', **kwargs):
thomwolf's avatar
thomwolf committed
79
80
        super(RobertaTokenizer, self).__init__(vocab_file=vocab_file, merges_file=merges_file, errors=errors,
                                               bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,
81
82
                                               sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,
                                               mask_token=mask_token, **kwargs)
83

84
    def add_special_tokens_single_sequence(self, token_ids):
85
86
        """
        Adds special tokens to a sequence for sequence classification tasks.
LysandreJik's avatar
LysandreJik committed
87
        A RoBERTa sequence has the following format: <s> X </s>
88
        """
89
        return [self.cls_token_id] + token_ids + [self.sep_token_id]
90

91
    def add_special_tokens_sequence_pair(self, token_ids_0, token_ids_1):
92
93
        """
        Adds special tokens to a sequence pair for sequence classification tasks.
LysandreJik's avatar
LysandreJik committed
94
        A RoBERTa sequence pair has the following format: <s> A </s></s> B </s>
95
        """
96
97
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
98
        return cls + token_ids_0 + sep + sep + token_ids_1 + sep
99

thomwolf's avatar
thomwolf committed
100
    def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1):
101
102
103
104
105
106
107
108
109
        """
        Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
        A RoBERTa sequence pair mask has the following format:
        0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
        | first sequence    | second sequence
        """
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]

thomwolf's avatar
thomwolf committed
110
        return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]