Commit 0ffc8eaf authored by Aymeric Augustin's avatar Aymeric Augustin Committed by Julien Chaumond
Browse files

Enforce target version for black.

This should stabilize formatting.
parent f01b3e66
......@@ -101,7 +101,7 @@ jobs:
# we need a version of isort with https://github.com/timothycrosley/isort/pull/1000
- run: sudo pip install git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
- run: sudo pip install .[tf,torch,quality]
- run: black --check --line-length 119 examples templates tests src utils
- run: black --check --line-length 119 --target-version py35 examples templates tests src utils
- run: isort --check-only --recursive examples templates tests src utils
- run: flake8 examples templates tests src utils
check_repository_consistency:
......
......@@ -3,14 +3,14 @@
# Check that source code meets quality standards
quality:
black --check --line-length 119 examples templates tests src utils
black --check --line-length 119 --target-version py35 examples templates tests src utils
isort --check-only --recursive examples templates tests src utils
flake8 examples templates tests src utils
# Format source code automatically
style:
black --line-length 119 examples templates tests src utils
black --line-length 119 --target-version py35 examples templates tests src utils
isort --recursive examples templates tests src utils
# Run tests for the library
......
......@@ -325,7 +325,7 @@ class Model2Model(PreTrainedEncoderDecoder):
encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
*args,
**kwargs
**kwargs,
)
return model
......
......@@ -250,7 +250,7 @@ class TFPreTrainedModel(tf.keras.Model):
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
**kwargs
**kwargs,
)
else:
model_kwargs = kwargs
......
......@@ -355,7 +355,7 @@ class PreTrainedModel(nn.Module):
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
**kwargs
**kwargs,
)
else:
model_kwargs = kwargs
......
......@@ -643,7 +643,7 @@ class QuestionAnsweringPipeline(Pipeline):
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
**kwargs
**kwargs,
)
@staticmethod
......
......@@ -87,7 +87,7 @@ class AlbertTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
......
......@@ -169,7 +169,7 @@ class BertTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
......@@ -560,7 +560,7 @@ class BertTokenizerFast(PreTrainedTokenizerFast):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))
......
......@@ -113,7 +113,7 @@ class BertJapaneseTokenizer(BertTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
......
......@@ -76,7 +76,7 @@ class CamembertTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
......
......@@ -95,7 +95,7 @@ class RobertaTokenizer(GPT2Tokenizer):
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
......
......@@ -96,7 +96,7 @@ class T5Tokenizer(PreTrainedTokenizer):
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
try:
......
......@@ -817,7 +817,7 @@ class PreTrainedTokenizer(object):
truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors,
**kwargs
**kwargs,
)
return encoded_inputs["input_ids"]
......
......@@ -586,7 +586,7 @@ class XLMTokenizer(PreTrainedTokenizer):
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
......
......@@ -83,7 +83,7 @@ class XLMRobertaTokenizer(PreTrainedTokenizer):
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
......
......@@ -86,7 +86,7 @@ class XLNetTokenizer(PreTrainedTokenizer):
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
......
......@@ -115,7 +115,7 @@ class XxxTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
......
......@@ -84,7 +84,7 @@ class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False)
sequence = u"UNwant\u00E9d,running"
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
......
......@@ -96,7 +96,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False, add_prefix_space=True)
sequence = u"lower newer"
sequence = "lower newer"
# Testing tokenization
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment