Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
57420b10
"tests/utils/test_doc_samples.py" did not exist on "84caa23301f2f25dfca0737198ff26c3d711ed63"
Unverified
Commit
57420b10
authored
Oct 07, 2021
by
Alex Hedges
Committed by
GitHub
Oct 07, 2021
Browse files
Add missing whitespace to multiline strings (#13916)
parent
319beb64
Changes
56
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
39 additions
and
39 deletions
+39
-39
src/transformers/models/canine/modeling_canine.py
src/transformers/models/canine/modeling_canine.py
+1
-1
src/transformers/models/clip/feature_extraction_clip.py
src/transformers/models/clip/feature_extraction_clip.py
+1
-1
src/transformers/models/cpm/tokenization_cpm.py
src/transformers/models/cpm/tokenization_cpm.py
+1
-1
src/transformers/models/cpm/tokenization_cpm_fast.py
src/transformers/models/cpm/tokenization_cpm_fast.py
+1
-1
src/transformers/models/deit/feature_extraction_deit.py
src/transformers/models/deit/feature_extraction_deit.py
+1
-1
src/transformers/models/detr/feature_extraction_detr.py
src/transformers/models/detr/feature_extraction_detr.py
+1
-1
src/transformers/models/electra/modeling_electra.py
src/transformers/models/electra/modeling_electra.py
+1
-1
src/transformers/models/electra/modeling_tf_electra.py
src/transformers/models/electra/modeling_tf_electra.py
+1
-1
src/transformers/models/encoder_decoder/modeling_encoder_decoder.py
...ormers/models/encoder_decoder/modeling_encoder_decoder.py
+1
-1
src/transformers/models/gpt_neo/configuration_gpt_neo.py
src/transformers/models/gpt_neo/configuration_gpt_neo.py
+5
-5
src/transformers/models/hubert/configuration_hubert.py
src/transformers/models/hubert/configuration_hubert.py
+3
-3
src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py
...ormers/models/layoutlmv2/feature_extraction_layoutlmv2.py
+1
-1
src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py
...transformers/models/layoutlmv2/tokenization_layoutlmv2.py
+6
-6
src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py
...formers/models/layoutlmv2/tokenization_layoutlmv2_fast.py
+1
-1
src/transformers/models/luke/tokenization_luke.py
src/transformers/models/luke/tokenization_luke.py
+4
-4
src/transformers/models/rag/retrieval_rag.py
src/transformers/models/rag/retrieval_rag.py
+1
-1
src/transformers/models/roformer/tokenization_roformer.py
src/transformers/models/roformer/tokenization_roformer.py
+1
-1
src/transformers/models/roformer/tokenization_utils.py
src/transformers/models/roformer/tokenization_utils.py
+1
-1
src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
...speech_encoder_decoder/modeling_speech_encoder_decoder.py
+4
-4
src/transformers/models/speech_to_text/configuration_speech_to_text.py
...ers/models/speech_to_text/configuration_speech_to_text.py
+3
-3
No files found.
src/transformers/models/canine/modeling_canine.py
View file @
57420b10
...
...
@@ -549,7 +549,7 @@ class CanineAttention(nn.Module):
self
.
local
=
local
if
attend_from_chunk_width
<
attend_from_chunk_stride
:
raise
ValueError
(
"`attend_from_chunk_width` < `attend_from_chunk_stride`"
"`attend_from_chunk_width` < `attend_from_chunk_stride`
"
"would cause sequence positions to get skipped."
)
if
attend_to_chunk_width
<
attend_to_chunk_stride
:
...
...
src/transformers/models/clip/feature_extraction_clip.py
View file @
57420b10
...
...
@@ -129,7 +129,7 @@ class CLIPFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if
not
valid_images
:
raise
ValueError
(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),
"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
...
...
src/transformers/models/cpm/tokenization_cpm.py
View file @
57420b10
...
...
@@ -92,7 +92,7 @@ class CpmTokenizer(XLNetTokenizer):
import
jieba
except
ModuleNotFoundError
as
error
:
raise
error
.
__class__
(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast."
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast.
"
"See https://pypi.org/project/jieba/ for installation."
)
self
.
jieba
=
jieba
...
...
src/transformers/models/cpm/tokenization_cpm_fast.py
View file @
57420b10
...
...
@@ -95,7 +95,7 @@ class CpmTokenizerFast(XLNetTokenizerFast):
import
jieba
except
ModuleNotFoundError
as
error
:
raise
error
.
__class__
(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast."
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast.
"
"See https://pypi.org/project/jieba/ for installation."
)
self
.
jieba
=
jieba
...
...
src/transformers/models/deit/feature_extraction_deit.py
View file @
57420b10
...
...
@@ -132,7 +132,7 @@ class DeiTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if
not
valid_images
:
raise
ValueError
(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),
"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
...
...
src/transformers/models/detr/feature_extraction_detr.py
View file @
57420b10
...
...
@@ -483,7 +483,7 @@ class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if
not
valid_images
:
raise
ValueError
(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),
"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
...
...
src/transformers/models/electra/modeling_electra.py
View file @
57420b10
...
...
@@ -799,7 +799,7 @@ ELECTRA_INPUTS_DOCSTRING = r"""
@
add_start_docstrings
(
"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
"hidden size and embedding size are different."
"hidden size and embedding size are different.
"
""
"Both the generator and discriminator checkpoints may be loaded into this model."
,
ELECTRA_START_DOCSTRING
,
...
...
src/transformers/models/electra/modeling_tf_electra.py
View file @
57420b10
...
...
@@ -719,7 +719,7 @@ ELECTRA_INPUTS_DOCSTRING = r"""
@
add_start_docstrings
(
"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
"hidden size and embedding size are different."
"hidden size and embedding size are different.
"
""
"Both the generator and discriminator checkpoints may be loaded into this model."
,
ELECTRA_START_DOCSTRING
,
...
...
src/transformers/models/encoder_decoder/modeling_encoder_decoder.py
View file @
57420b10
...
...
@@ -482,7 +482,7 @@ class EncoderDecoderModel(PreTrainedModel):
def
resize_token_embeddings
(
self
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"Resizing the embedding layers via the EncoderDecoderModel directly is not supported."
"Resizing the embedding layers via the EncoderDecoderModel directly is not supported.
"
"Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))"
)
...
...
src/transformers/models/gpt_neo/configuration_gpt_neo.py
View file @
57420b10
...
...
@@ -151,11 +151,11 @@ class GPTNeoConfig(PretrainedConfig):
if
len
(
self
.
attention_layers
)
!=
self
.
num_layers
:
raise
ValueError
(
"Configuration for convolutional module is incorrect."
"It is required that `len(config.attention_layers)` == `config.num_layers`"
f
"but is `len(config.attention_layers) =
{
len
(
self
.
attention_layers
)
}
`,"
f
"`config.num_layers =
{
self
.
num_layers
}
`."
"`config.attention_layers` is prepared using `config.attention_types`."
"Configuration for convolutional module is incorrect.
"
"It is required that `len(config.attention_layers)` == `config.num_layers`
"
f
"but is `len(config.attention_layers) =
{
len
(
self
.
attention_layers
)
}
`,
"
f
"`config.num_layers =
{
self
.
num_layers
}
`.
"
"`config.attention_layers` is prepared using `config.attention_types`.
"
"Please verify the value of `config.attention_types` argument."
)
...
...
src/transformers/models/hubert/configuration_hubert.py
View file @
57420b10
...
...
@@ -211,9 +211,9 @@ class HubertConfig(PretrainedConfig):
or
(
len
(
self
.
conv_dim
)
!=
self
.
num_feat_extract_layers
)
):
raise
ValueError
(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f
"but is `len(config.conv_dim) =
{
len
(
self
.
conv_dim
)
}
`, `len(config.conv_stride)"
"Configuration for convolutional layers is incorrect.
"
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,
"
f
"but is `len(config.conv_dim) =
{
len
(
self
.
conv_dim
)
}
`, `len(config.conv_stride)
"
f
"=
{
len
(
self
.
conv_stride
)
}
`, `len(config.conv_kernel) =
{
len
(
self
.
conv_kernel
)
}
`."
)
...
...
src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py
View file @
57420b10
...
...
@@ -181,7 +181,7 @@ class LayoutLMv2FeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionM
if
not
valid_images
:
raise
ValueError
(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),
"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples), "
f
"but is of type
{
type
(
images
)
}
."
)
...
...
src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py
View file @
57420b10
...
...
@@ -431,7 +431,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
raise
ValueError
(
"text input must of type `str` (single example) or `List[str]` (batch of examples). "
)
if
not
isinstance
(
text_pair
,
(
list
,
tuple
)):
raise
ValueError
(
"words must of type `List[str]` (single pretokenized example),"
"words must of type `List[str]` (single pretokenized example),
"
"or `List[List[str]]` (batch of pretokenized examples)."
)
else
:
...
...
@@ -599,7 +599,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
if
return_offsets_mapping
:
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
...
...
@@ -838,9 +838,9 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
)
->
BatchEncoding
:
if
return_offsets_mapping
:
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"transformers.PreTrainedTokenizerFast.
"
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
...
...
@@ -1158,7 +1158,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
labels
=
labels
[:
-
num_tokens_to_remove
]
else
:
logger
.
error
(
f
"We need to remove
{
num_tokens_to_remove
}
to truncate the input"
f
"We need to remove
{
num_tokens_to_remove
}
to truncate the input
"
f
"but the first sequence has a length
{
len
(
ids
)
}
. "
f
"Please select another truncation strategy than
{
truncation_strategy
}
, "
f
"for instance 'longest_first' or 'only_second'."
...
...
@@ -1172,7 +1172,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
pair_token_boxes
=
pair_token_boxes
[:
-
num_tokens_to_remove
]
else
:
logger
.
error
(
f
"We need to remove
{
num_tokens_to_remove
}
to truncate the input"
f
"We need to remove
{
num_tokens_to_remove
}
to truncate the input
"
f
"but the second sequence has a length
{
len
(
pair_ids
)
}
. "
f
"Please select another truncation strategy than
{
truncation_strategy
}
, "
f
"for instance 'longest_first' or 'only_first'."
...
...
src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py
View file @
57420b10
...
...
@@ -235,7 +235,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
raise
ValueError
(
"text input must of type `str` (single example) or `List[str]` (batch of examples). "
)
if
not
isinstance
(
text_pair
,
(
list
,
tuple
)):
raise
ValueError
(
"words must of type `List[str]` (single pretokenized example),"
"words must of type `List[str]` (single pretokenized example),
"
"or `List[List[str]]` (batch of pretokenized examples)."
)
else
:
...
...
src/transformers/models/luke/tokenization_luke.py
View file @
57420b10
...
...
@@ -519,9 +519,9 @@ class LukeTokenizer(RobertaTokenizer):
if
return_offsets_mapping
:
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"transformers.PreTrainedTokenizerFast.
"
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
...
...
@@ -683,7 +683,7 @@ class LukeTokenizer(RobertaTokenizer):
)
->
BatchEncoding
:
if
return_offsets_mapping
:
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
...
...
@@ -1308,7 +1308,7 @@ class LukeTokenizer(RobertaTokenizer):
# The model's main input name, usually `input_ids`, has be passed for padding
if
self
.
model_input_names
[
0
]
not
in
encoded_inputs
:
raise
ValueError
(
"You should supply an encoding or a list of encodings to this method"
"You should supply an encoding or a list of encodings to this method
"
f
"that includes
{
self
.
model_input_names
[
0
]
}
, but you provided
{
list
(
encoded_inputs
.
keys
())
}
"
)
...
...
src/transformers/models/rag/retrieval_rag.py
View file @
57420b10
...
...
@@ -122,7 +122,7 @@ class LegacyIndex(Index):
except
EnvironmentError
:
msg
=
(
f
"Can't load '
{
archive_file
}
'. Make sure that:
\n\n
"
f
"- '
{
index_path
}
' is a correct remote path to a directory containing a file named
{
filename
}
"
f
"- '
{
index_path
}
' is a correct remote path to a directory containing a file named
{
filename
}
\n\n
"
f
"- or '
{
index_path
}
' is the correct path to a directory containing a file named
{
filename
}
.
\n\n
"
)
raise
EnvironmentError
(
msg
)
...
...
src/transformers/models/roformer/tokenization_roformer.py
View file @
57420b10
...
...
@@ -161,7 +161,7 @@ class RoFormerTokenizer(PreTrainedTokenizer):
import
rjieba
except
ImportError
:
raise
ImportError
(
"You need to install rjieba to use RoFormerTokenizer."
"You need to install rjieba to use RoFormerTokenizer.
"
"See https://pypi.org/project/rjieba/ for installation."
)
self
.
jieba
=
rjieba
...
...
src/transformers/models/roformer/tokenization_utils.py
View file @
57420b10
...
...
@@ -32,7 +32,7 @@ class JiebaPreTokenizer:
import
rjieba
except
ImportError
:
raise
ImportError
(
"You need to install rjieba to use RoFormerTokenizer."
"You need to install rjieba to use RoFormerTokenizer.
"
"See https://pypi.org/project/rjieba/ for installation."
)
self
.
jieba
=
rjieba
...
...
src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
View file @
57420b10
...
...
@@ -352,7 +352,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
decoder_config
=
AutoConfig
.
from_pretrained
(
decoder_pretrained_model_name_or_path
)
if
decoder_config
.
is_decoder
is
False
or
decoder_config
.
add_cross_attention
is
False
:
logger
.
info
(
f
"Initializing
{
decoder_pretrained_model_name_or_path
}
as a decoder model."
f
"Initializing
{
decoder_pretrained_model_name_or_path
}
as a decoder model.
"
"Cross attention layers are added to {decoder_pretrained_model_name_or_path} "
"and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
...
...
@@ -363,9 +363,9 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
if
kwargs_decoder
[
"config"
].
is_decoder
is
False
or
kwargs_decoder
[
"config"
].
add_cross_attention
is
False
:
logger
.
warning
(
f
"Decoder model
{
decoder_pretrained_model_name_or_path
}
is not initialized as a decoder."
f
"Decoder model
{
decoder_pretrained_model_name_or_path
}
is not initialized as a decoder.
"
f
"In order to initialize
{
decoder_pretrained_model_name_or_path
}
as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config`"
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config`
"
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
...
...
@@ -513,7 +513,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
def
resize_token_embeddings
(
self
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported."
"Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported.
"
"Please use the respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))"
)
...
...
src/transformers/models/speech_to_text/configuration_speech_to_text.py
View file @
57420b10
...
...
@@ -175,9 +175,9 @@ class Speech2TextConfig(PretrainedConfig):
if
len
(
self
.
conv_kernel_sizes
)
!=
self
.
num_conv_layers
:
raise
ValueError
(
"Configuration for convolutional module is incorrect."
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers`"
f
"but is `len(config.conv_kernel_sizes) =
{
len
(
self
.
conv_kernel_sizes
)
}
`,"
"Configuration for convolutional module is incorrect.
"
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers`
"
f
"but is `len(config.conv_kernel_sizes) =
{
len
(
self
.
conv_kernel_sizes
)
}
`,
"
f
"`config.num_conv_layers =
{
self
.
num_conv_layers
}
`."
)
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment