Commit a31d4a29 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Reraise ImportError when sentencepiece isn't installed.

Else, the next line fails with a confusion exception because the spm
variable isn't defined.
parent c8b0c1e5
...@@ -100,6 +100,7 @@ class AlbertTokenizer(PreTrainedTokenizer): ...@@ -100,6 +100,7 @@ class AlbertTokenizer(PreTrainedTokenizer):
"You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece" "You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece" "pip install sentencepiece"
) )
raise
self.do_lower_case = do_lower_case self.do_lower_case = do_lower_case
self.remove_space = remove_space self.remove_space = remove_space
...@@ -127,6 +128,7 @@ class AlbertTokenizer(PreTrainedTokenizer): ...@@ -127,6 +128,7 @@ class AlbertTokenizer(PreTrainedTokenizer):
"You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece" "You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece" "pip install sentencepiece"
) )
raise
self.sp_model = spm.SentencePieceProcessor() self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file) self.sp_model.Load(self.vocab_file)
......
...@@ -107,6 +107,7 @@ class T5Tokenizer(PreTrainedTokenizer): ...@@ -107,6 +107,7 @@ class T5Tokenizer(PreTrainedTokenizer):
"https://github.com/google/sentencepiece" "https://github.com/google/sentencepiece"
"pip install sentencepiece" "pip install sentencepiece"
) )
raise
self.vocab_file = vocab_file self.vocab_file = vocab_file
self._extra_ids = extra_ids self._extra_ids = extra_ids
...@@ -132,6 +133,7 @@ class T5Tokenizer(PreTrainedTokenizer): ...@@ -132,6 +133,7 @@ class T5Tokenizer(PreTrainedTokenizer):
"You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece" "You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece" "pip install sentencepiece"
) )
raise
self.sp_model = spm.SentencePieceProcessor() self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file) self.sp_model.Load(self.vocab_file)
......
...@@ -100,6 +100,7 @@ class XLNetTokenizer(PreTrainedTokenizer): ...@@ -100,6 +100,7 @@ class XLNetTokenizer(PreTrainedTokenizer):
"You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece" "You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece" "pip install sentencepiece"
) )
raise
self.do_lower_case = do_lower_case self.do_lower_case = do_lower_case
self.remove_space = remove_space self.remove_space = remove_space
...@@ -127,6 +128,7 @@ class XLNetTokenizer(PreTrainedTokenizer): ...@@ -127,6 +128,7 @@ class XLNetTokenizer(PreTrainedTokenizer):
"You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece" "You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece" "pip install sentencepiece"
) )
raise
self.sp_model = spm.SentencePieceProcessor() self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file) self.sp_model.Load(self.vocab_file)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment