"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "ee5a6856caec83e7f2f305418f3199b87ea6cc2d"
Commit 0ffc8eaf authored by Aymeric Augustin's avatar Aymeric Augustin Committed by Julien Chaumond
Browse files

Enforce target version for black.

This should stabilize formatting.
parent f01b3e66
...@@ -101,7 +101,7 @@ jobs: ...@@ -101,7 +101,7 @@ jobs:
# we need a version of isort with https://github.com/timothycrosley/isort/pull/1000 # we need a version of isort with https://github.com/timothycrosley/isort/pull/1000
- run: sudo pip install git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort - run: sudo pip install git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
- run: sudo pip install .[tf,torch,quality] - run: sudo pip install .[tf,torch,quality]
- run: black --check --line-length 119 examples templates tests src utils - run: black --check --line-length 119 --target-version py35 examples templates tests src utils
- run: isort --check-only --recursive examples templates tests src utils - run: isort --check-only --recursive examples templates tests src utils
- run: flake8 examples templates tests src utils - run: flake8 examples templates tests src utils
check_repository_consistency: check_repository_consistency:
......
...@@ -3,14 +3,14 @@ ...@@ -3,14 +3,14 @@
# Check that source code meets quality standards # Check that source code meets quality standards
quality: quality:
black --check --line-length 119 examples templates tests src utils black --check --line-length 119 --target-version py35 examples templates tests src utils
isort --check-only --recursive examples templates tests src utils isort --check-only --recursive examples templates tests src utils
flake8 examples templates tests src utils flake8 examples templates tests src utils
# Format source code automatically # Format source code automatically
style: style:
black --line-length 119 examples templates tests src utils black --line-length 119 --target-version py35 examples templates tests src utils
isort --recursive examples templates tests src utils isort --recursive examples templates tests src utils
# Run tests for the library # Run tests for the library
......
...@@ -325,7 +325,7 @@ class Model2Model(PreTrainedEncoderDecoder): ...@@ -325,7 +325,7 @@ class Model2Model(PreTrainedEncoderDecoder):
encoder_pretrained_model_name_or_path=pretrained_model_name_or_path, encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
decoder_pretrained_model_name_or_path=pretrained_model_name_or_path, decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
*args, *args,
**kwargs **kwargs,
) )
return model return model
......
...@@ -250,7 +250,7 @@ class TFPreTrainedModel(tf.keras.Model): ...@@ -250,7 +250,7 @@ class TFPreTrainedModel(tf.keras.Model):
return_unused_kwargs=True, return_unused_kwargs=True,
force_download=force_download, force_download=force_download,
resume_download=resume_download, resume_download=resume_download,
**kwargs **kwargs,
) )
else: else:
model_kwargs = kwargs model_kwargs = kwargs
......
...@@ -355,7 +355,7 @@ class PreTrainedModel(nn.Module): ...@@ -355,7 +355,7 @@ class PreTrainedModel(nn.Module):
force_download=force_download, force_download=force_download,
resume_download=resume_download, resume_download=resume_download,
proxies=proxies, proxies=proxies,
**kwargs **kwargs,
) )
else: else:
model_kwargs = kwargs model_kwargs = kwargs
......
...@@ -643,7 +643,7 @@ class QuestionAnsweringPipeline(Pipeline): ...@@ -643,7 +643,7 @@ class QuestionAnsweringPipeline(Pipeline):
framework=framework, framework=framework,
args_parser=QuestionAnsweringArgumentHandler(), args_parser=QuestionAnsweringArgumentHandler(),
device=device, device=device,
**kwargs **kwargs,
) )
@staticmethod @staticmethod
......
...@@ -87,7 +87,7 @@ class AlbertTokenizer(PreTrainedTokenizer): ...@@ -87,7 +87,7 @@ class AlbertTokenizer(PreTrainedTokenizer):
pad_token=pad_token, pad_token=pad_token,
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
......
...@@ -169,7 +169,7 @@ class BertTokenizer(PreTrainedTokenizer): ...@@ -169,7 +169,7 @@ class BertTokenizer(PreTrainedTokenizer):
pad_token=pad_token, pad_token=pad_token,
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
...@@ -560,7 +560,7 @@ class BertTokenizerFast(PreTrainedTokenizerFast): ...@@ -560,7 +560,7 @@ class BertTokenizerFast(PreTrainedTokenizerFast):
pad_token=pad_token, pad_token=pad_token,
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs **kwargs,
) )
self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token)) self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))
......
...@@ -113,7 +113,7 @@ class BertJapaneseTokenizer(BertTokenizer): ...@@ -113,7 +113,7 @@ class BertJapaneseTokenizer(BertTokenizer):
pad_token=pad_token, pad_token=pad_token,
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
......
...@@ -76,7 +76,7 @@ class CamembertTokenizer(PreTrainedTokenizer): ...@@ -76,7 +76,7 @@ class CamembertTokenizer(PreTrainedTokenizer):
pad_token=pad_token, pad_token=pad_token,
mask_token=mask_token, mask_token=mask_token,
additional_special_tokens=additional_special_tokens, additional_special_tokens=additional_special_tokens,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
......
...@@ -95,7 +95,7 @@ class RobertaTokenizer(GPT2Tokenizer): ...@@ -95,7 +95,7 @@ class RobertaTokenizer(GPT2Tokenizer):
cls_token=cls_token, cls_token=cls_token,
pad_token=pad_token, pad_token=pad_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
......
...@@ -96,7 +96,7 @@ class T5Tokenizer(PreTrainedTokenizer): ...@@ -96,7 +96,7 @@ class T5Tokenizer(PreTrainedTokenizer):
unk_token=unk_token, unk_token=unk_token,
pad_token=pad_token, pad_token=pad_token,
additional_special_tokens=additional_special_tokens, additional_special_tokens=additional_special_tokens,
**kwargs **kwargs,
) )
try: try:
......
...@@ -817,7 +817,7 @@ class PreTrainedTokenizer(object): ...@@ -817,7 +817,7 @@ class PreTrainedTokenizer(object):
truncation_strategy=truncation_strategy, truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length, pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors, return_tensors=return_tensors,
**kwargs **kwargs,
) )
return encoded_inputs["input_ids"] return encoded_inputs["input_ids"]
......
...@@ -586,7 +586,7 @@ class XLMTokenizer(PreTrainedTokenizer): ...@@ -586,7 +586,7 @@ class XLMTokenizer(PreTrainedTokenizer):
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
additional_special_tokens=additional_special_tokens, additional_special_tokens=additional_special_tokens,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
......
...@@ -83,7 +83,7 @@ class XLMRobertaTokenizer(PreTrainedTokenizer): ...@@ -83,7 +83,7 @@ class XLMRobertaTokenizer(PreTrainedTokenizer):
cls_token=cls_token, cls_token=cls_token,
pad_token=pad_token, pad_token=pad_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
......
...@@ -86,7 +86,7 @@ class XLNetTokenizer(PreTrainedTokenizer): ...@@ -86,7 +86,7 @@ class XLNetTokenizer(PreTrainedTokenizer):
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
additional_special_tokens=additional_special_tokens, additional_special_tokens=additional_special_tokens,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
......
...@@ -115,7 +115,7 @@ class XxxTokenizer(PreTrainedTokenizer): ...@@ -115,7 +115,7 @@ class XxxTokenizer(PreTrainedTokenizer):
pad_token=pad_token, pad_token=pad_token,
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs **kwargs,
) )
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
......
...@@ -84,7 +84,7 @@ class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -84,7 +84,7 @@ class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False) rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False)
sequence = u"UNwant\u00E9d,running" sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence) tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence)
......
...@@ -96,7 +96,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -96,7 +96,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False, add_prefix_space=True) rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False, add_prefix_space=True)
sequence = u"lower newer" sequence = "lower newer"
# Testing tokenization # Testing tokenization
tokens = tokenizer.tokenize(sequence, add_prefix_space=True) tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment