Commit 22ac004a authored by LysandreJik's avatar LysandreJik
Browse files

Added documentation and changed parameters for special_tokens_sentences_pair.

parent 912fdff8
...@@ -167,12 +167,20 @@ class BertTokenizer(PreTrainedTokenizer): ...@@ -167,12 +167,20 @@ class BertTokenizer(PreTrainedTokenizer):
return out_string return out_string
def add_special_tokens_single_sentence(self, token_ids): def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)] return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, *token_ids): def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)] sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)] cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids[0] + sep + token_ids[1] + sep return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, vocab_path): def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file.""" """Save the tokenizer vocabulary to a directory or file."""
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Tokenization classes for OpenAI GPT.""" """Tokenization classes for RoBERTa."""
from __future__ import (absolute_import, division, print_function, from __future__ import (absolute_import, division, print_function,
unicode_literals) unicode_literals)
...@@ -57,15 +57,15 @@ PRETRAINED_VOCAB_FILES_MAP = { ...@@ -57,15 +57,15 @@ PRETRAINED_VOCAB_FILES_MAP = {
} }
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'roberta-base': 1024, 'roberta-base': 512,
'roberta-large': 1024, 'roberta-large': 512,
'roberta-large-mnli': 1024, 'roberta-large-mnli': 512,
} }
class RobertaTokenizer(PreTrainedTokenizer): class RobertaTokenizer(PreTrainedTokenizer):
""" """
GPT-2 BPE tokenizer. Peculiarities: RoBERTa BPE tokenizer, derived from the GPT-2 tokenizer. Peculiarities:
- Byte-level BPE - Byte-level BPE
""" """
vocab_files_names = VOCAB_FILES_NAMES vocab_files_names = VOCAB_FILES_NAMES
...@@ -161,12 +161,20 @@ class RobertaTokenizer(PreTrainedTokenizer): ...@@ -161,12 +161,20 @@ class RobertaTokenizer(PreTrainedTokenizer):
return text return text
def add_special_tokens_single_sentence(self, token_ids): def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to a sequence for sequence classification tasks.
A RoBERTa sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)] return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, *token_ids): def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A RoBERTa sequence pair has the following format: [CLS] A [SEP][SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)] sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)] cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids[0] + sep + sep + token_ids[1] + sep return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory): def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory.""" """Save the tokenizer vocabulary and merge files to a directory."""
......
...@@ -546,7 +546,7 @@ class PreTrainedTokenizer(object): ...@@ -546,7 +546,7 @@ class PreTrainedTokenizer(object):
def add_special_tokens_single_sentence(self, token_ids): def add_special_tokens_single_sentence(self, token_ids):
raise NotImplementedError raise NotImplementedError
def add_special_tokens_sentences_pair(self, *token_ids): def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
raise NotImplementedError raise NotImplementedError
def convert_ids_to_tokens(self, ids, skip_special_tokens=False): def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
......
...@@ -215,12 +215,20 @@ class XLMTokenizer(PreTrainedTokenizer): ...@@ -215,12 +215,20 @@ class XLMTokenizer(PreTrainedTokenizer):
return out_string return out_string
def add_special_tokens_single_sentence(self, token_ids): def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to a sequence for sequence classification tasks.
An XLM sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)] return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, *token_ids): def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
An XLM sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)] sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)] cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids[0] + sep + token_ids[1] + sep return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory): def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory.""" """Save the tokenizer vocabulary and merge files to a directory."""
......
...@@ -178,14 +178,22 @@ class XLNetTokenizer(PreTrainedTokenizer): ...@@ -178,14 +178,22 @@ class XLNetTokenizer(PreTrainedTokenizer):
return out_string return out_string
def add_special_tokens_single_sentence(self, token_ids): def add_special_tokens_single_sentence(self, token_ids):
logger.warning("No method was defined for special tokens and single sentence streams in XLNet. " """
"Returning token_ids") Adds special tokens to a sequence pair for sequence classification tasks.
return token_ids An XLNet sequence pair has the following format: A [SEP] B [SEP][CLS]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return token_ids + sep + cls
def add_special_tokens_sentences_pair(self, *token_ids): def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence for sequence classification tasks.
An XLNet sequence has the following format: X [SEP][CLS]
"""
sep = [self._convert_token_to_id(self.sep_token)] sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)] cls = [self._convert_token_to_id(self.cls_token)]
return token_ids[0] + sep + token_ids[1] + sep + cls return token_ids_0 + sep + token_ids_1 + sep + cls
def save_vocabulary(self, save_directory): def save_vocabulary(self, save_directory):
""" Save the sentencepiece vocabulary (copy original file) and special tokens file """ Save the sentencepiece vocabulary (copy original file) and special tokens file
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment