"vscode:/vscode.git/clone" did not exist on "5d0bf59b4d5be72c8c956e0240a67d7c3100fdaf"
Unverified Commit 57420b10 authored by Alex Hedges's avatar Alex Hedges Committed by GitHub
Browse files

Add missing whitespace to multiline strings (#13916)

parent 319beb64
...@@ -549,7 +549,7 @@ class CanineAttention(nn.Module): ...@@ -549,7 +549,7 @@ class CanineAttention(nn.Module):
self.local = local self.local = local
if attend_from_chunk_width < attend_from_chunk_stride: if attend_from_chunk_width < attend_from_chunk_stride:
raise ValueError( raise ValueError(
"`attend_from_chunk_width` < `attend_from_chunk_stride`" "`attend_from_chunk_width` < `attend_from_chunk_stride` "
"would cause sequence positions to get skipped." "would cause sequence positions to get skipped."
) )
if attend_to_chunk_width < attend_to_chunk_stride: if attend_to_chunk_width < attend_to_chunk_stride:
......
...@@ -129,7 +129,7 @@ class CLIPFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): ...@@ -129,7 +129,7 @@ class CLIPFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if not valid_images: if not valid_images:
raise ValueError( raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
) )
......
...@@ -92,7 +92,7 @@ class CpmTokenizer(XLNetTokenizer): ...@@ -92,7 +92,7 @@ class CpmTokenizer(XLNetTokenizer):
import jieba import jieba
except ModuleNotFoundError as error: except ModuleNotFoundError as error:
raise error.__class__( raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast." "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." "See https://pypi.org/project/jieba/ for installation."
) )
self.jieba = jieba self.jieba = jieba
......
...@@ -95,7 +95,7 @@ class CpmTokenizerFast(XLNetTokenizerFast): ...@@ -95,7 +95,7 @@ class CpmTokenizerFast(XLNetTokenizerFast):
import jieba import jieba
except ModuleNotFoundError as error: except ModuleNotFoundError as error:
raise error.__class__( raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast." "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." "See https://pypi.org/project/jieba/ for installation."
) )
self.jieba = jieba self.jieba = jieba
......
...@@ -132,7 +132,7 @@ class DeiTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): ...@@ -132,7 +132,7 @@ class DeiTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if not valid_images: if not valid_images:
raise ValueError( raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
) )
......
...@@ -483,7 +483,7 @@ class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): ...@@ -483,7 +483,7 @@ class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if not valid_images: if not valid_images:
raise ValueError( raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
) )
......
...@@ -799,7 +799,7 @@ ELECTRA_INPUTS_DOCSTRING = r""" ...@@ -799,7 +799,7 @@ ELECTRA_INPUTS_DOCSTRING = r"""
@add_start_docstrings( @add_start_docstrings(
"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to " "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the " "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
"hidden size and embedding size are different." "hidden size and embedding size are different. "
"" ""
"Both the generator and discriminator checkpoints may be loaded into this model.", "Both the generator and discriminator checkpoints may be loaded into this model.",
ELECTRA_START_DOCSTRING, ELECTRA_START_DOCSTRING,
......
...@@ -719,7 +719,7 @@ ELECTRA_INPUTS_DOCSTRING = r""" ...@@ -719,7 +719,7 @@ ELECTRA_INPUTS_DOCSTRING = r"""
@add_start_docstrings( @add_start_docstrings(
"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to " "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the " "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
"hidden size and embedding size are different." "hidden size and embedding size are different. "
"" ""
"Both the generator and discriminator checkpoints may be loaded into this model.", "Both the generator and discriminator checkpoints may be loaded into this model.",
ELECTRA_START_DOCSTRING, ELECTRA_START_DOCSTRING,
......
...@@ -482,7 +482,7 @@ class EncoderDecoderModel(PreTrainedModel): ...@@ -482,7 +482,7 @@ class EncoderDecoderModel(PreTrainedModel):
def resize_token_embeddings(self, *args, **kwargs): def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError( raise NotImplementedError(
"Resizing the embedding layers via the EncoderDecoderModel directly is not supported." "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. "
"Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))" "Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))"
) )
......
...@@ -151,11 +151,11 @@ class GPTNeoConfig(PretrainedConfig): ...@@ -151,11 +151,11 @@ class GPTNeoConfig(PretrainedConfig):
if len(self.attention_layers) != self.num_layers: if len(self.attention_layers) != self.num_layers:
raise ValueError( raise ValueError(
"Configuration for convolutional module is incorrect." "Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers`" "It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers)}`," f"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
f"`config.num_layers = {self.num_layers}`." f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`." "`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." "Please verify the value of `config.attention_types` argument."
) )
......
...@@ -211,9 +211,9 @@ class HubertConfig(PretrainedConfig): ...@@ -211,9 +211,9 @@ class HubertConfig(PretrainedConfig):
or (len(self.conv_dim) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)
): ):
raise ValueError( raise ValueError(
"Configuration for convolutional layers is incorrect." "Configuration for convolutional layers is incorrect. "
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)" f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
) )
......
...@@ -181,7 +181,7 @@ class LayoutLMv2FeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionM ...@@ -181,7 +181,7 @@ class LayoutLMv2FeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionM
if not valid_images: if not valid_images:
raise ValueError( raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples), " "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples), "
f"but is of type {type(images)}." f"but is of type {type(images)}."
) )
......
...@@ -431,7 +431,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer): ...@@ -431,7 +431,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)): if not isinstance(text_pair, (list, tuple)):
raise ValueError( raise ValueError(
"words must of type `List[str]` (single pretokenized example)," "words must of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)." "or `List[List[str]]` (batch of pretokenized examples)."
) )
else: else:
...@@ -599,7 +599,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer): ...@@ -599,7 +599,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast."
) )
...@@ -838,9 +838,9 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer): ...@@ -838,9 +838,9 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
) -> BatchEncoding: ) -> BatchEncoding:
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at " "More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674" "https://github.com/huggingface/transformers/pull/2674"
) )
...@@ -1158,7 +1158,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer): ...@@ -1158,7 +1158,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
labels = labels[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove]
else: else:
logger.error( logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input" f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the first sequence has a length {len(ids)}. " f"but the first sequence has a length {len(ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, " f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_second'." f"for instance 'longest_first' or 'only_second'."
...@@ -1172,7 +1172,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer): ...@@ -1172,7 +1172,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
else: else:
logger.error( logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input" f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the second sequence has a length {len(pair_ids)}. " f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, " f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_first'." f"for instance 'longest_first' or 'only_first'."
......
...@@ -235,7 +235,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast): ...@@ -235,7 +235,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)): if not isinstance(text_pair, (list, tuple)):
raise ValueError( raise ValueError(
"words must of type `List[str]` (single pretokenized example)," "words must of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)." "or `List[List[str]]` (batch of pretokenized examples)."
) )
else: else:
......
...@@ -519,9 +519,9 @@ class LukeTokenizer(RobertaTokenizer): ...@@ -519,9 +519,9 @@ class LukeTokenizer(RobertaTokenizer):
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at " "More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674" "https://github.com/huggingface/transformers/pull/2674"
) )
...@@ -683,7 +683,7 @@ class LukeTokenizer(RobertaTokenizer): ...@@ -683,7 +683,7 @@ class LukeTokenizer(RobertaTokenizer):
) -> BatchEncoding: ) -> BatchEncoding:
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast."
) )
...@@ -1308,7 +1308,7 @@ class LukeTokenizer(RobertaTokenizer): ...@@ -1308,7 +1308,7 @@ class LukeTokenizer(RobertaTokenizer):
# The model's main input name, usually `input_ids`, has be passed for padding # The model's main input name, usually `input_ids`, has be passed for padding
if self.model_input_names[0] not in encoded_inputs: if self.model_input_names[0] not in encoded_inputs:
raise ValueError( raise ValueError(
"You should supply an encoding or a list of encodings to this method" "You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}" f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
) )
......
...@@ -122,7 +122,7 @@ class LegacyIndex(Index): ...@@ -122,7 +122,7 @@ class LegacyIndex(Index):
except EnvironmentError: except EnvironmentError:
msg = ( msg = (
f"Can't load '{archive_file}'. Make sure that:\n\n" f"Can't load '{archive_file}'. Make sure that:\n\n"
f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}" f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n"
f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n"
) )
raise EnvironmentError(msg) raise EnvironmentError(msg)
......
...@@ -161,7 +161,7 @@ class RoFormerTokenizer(PreTrainedTokenizer): ...@@ -161,7 +161,7 @@ class RoFormerTokenizer(PreTrainedTokenizer):
import rjieba import rjieba
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"You need to install rjieba to use RoFormerTokenizer." "You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation." "See https://pypi.org/project/rjieba/ for installation."
) )
self.jieba = rjieba self.jieba = rjieba
......
...@@ -32,7 +32,7 @@ class JiebaPreTokenizer: ...@@ -32,7 +32,7 @@ class JiebaPreTokenizer:
import rjieba import rjieba
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"You need to install rjieba to use RoFormerTokenizer." "You need to install rjieba to use RoFormerTokenizer. "
"See https://pypi.org/project/rjieba/ for installation." "See https://pypi.org/project/rjieba/ for installation."
) )
self.jieba = rjieba self.jieba = rjieba
......
...@@ -352,7 +352,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel): ...@@ -352,7 +352,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info( logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model." f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. "
"Cross attention layers are added to {decoder_pretrained_model_name_or_path} " "Cross attention layers are added to {decoder_pretrained_model_name_or_path} "
"and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." "and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
) )
...@@ -363,9 +363,9 @@ class SpeechEncoderDecoderModel(PreTrainedModel): ...@@ -363,9 +363,9 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning( logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder." f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config`" "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`" "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`"
) )
...@@ -513,7 +513,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel): ...@@ -513,7 +513,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
def resize_token_embeddings(self, *args, **kwargs): def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError( raise NotImplementedError(
"Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported." "Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported. "
"Please use the respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))" "Please use the respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))"
) )
......
...@@ -175,9 +175,9 @@ class Speech2TextConfig(PretrainedConfig): ...@@ -175,9 +175,9 @@ class Speech2TextConfig(PretrainedConfig):
if len(self.conv_kernel_sizes) != self.num_conv_layers: if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError( raise ValueError(
"Configuration for convolutional module is incorrect." "Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers`" "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`," f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." f"`config.num_conv_layers = {self.num_conv_layers}`."
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment