Unverified Commit 57420b10 authored by Alex Hedges's avatar Alex Hedges Committed by GitHub
Browse files

Add missing whitespace to multiline strings (#13916)

parent 319beb64
...@@ -189,12 +189,12 @@ class Speech2TextFeatureExtractor(SequenceFeatureExtractor): ...@@ -189,12 +189,12 @@ class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
if sampling_rate is not None: if sampling_rate is not None:
if sampling_rate != self.sampling_rate: if sampling_rate != self.sampling_rate:
raise ValueError( raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}." f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. "
f"Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}." f"Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}."
) )
else: else:
logger.warning( logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function." "It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." "Failing to do so can result in silent errors that might be hard to debug."
) )
......
...@@ -300,7 +300,7 @@ class SqueezeBertEncoder(nn.Module): ...@@ -300,7 +300,7 @@ class SqueezeBertEncoder(nn.Module):
super().__init__() super().__init__()
assert config.embedding_size == config.hidden_size, ( assert config.embedding_size == config.hidden_size, (
"If you want embedding_size != intermediate hidden_size," "If you want embedding_size != intermediate hidden_size, "
"please insert a Conv1d layer to adjust the number of channels " "please insert a Conv1d layer to adjust the number of channels "
"before the first SqueezeBertModule." "before the first SqueezeBertModule."
) )
......
...@@ -54,8 +54,8 @@ if is_scatter_available(): ...@@ -54,8 +54,8 @@ if is_scatter_available():
from torch_scatter import scatter from torch_scatter import scatter
except OSError: except OSError:
logger.error( logger.error(
"TAPAS models are not usable since `torch_scatter` can't be loaded." "TAPAS models are not usable since `torch_scatter` can't be loaded. "
"It seems you have `torch_scatter` installed with the wrong CUDA version." "It seems you have `torch_scatter` installed with the wrong CUDA version. "
"Please try to reinstall it following the instructions here: https://github.com/rusty1s/pytorch_scatter." "Please try to reinstall it following the instructions here: https://github.com/rusty1s/pytorch_scatter."
) )
......
...@@ -712,7 +712,7 @@ class TapasTokenizer(PreTrainedTokenizer): ...@@ -712,7 +712,7 @@ class TapasTokenizer(PreTrainedTokenizer):
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast."
) )
...@@ -981,7 +981,7 @@ class TapasTokenizer(PreTrainedTokenizer): ...@@ -981,7 +981,7 @@ class TapasTokenizer(PreTrainedTokenizer):
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast."
) )
...@@ -1159,7 +1159,7 @@ class TapasTokenizer(PreTrainedTokenizer): ...@@ -1159,7 +1159,7 @@ class TapasTokenizer(PreTrainedTokenizer):
if max_length is not None and len(input_ids) > max_length: if max_length is not None and len(input_ids) > max_length:
raise ValueError( raise ValueError(
"Could not encode the query and table header given the maximum length. Encoding the query and table" "Could not encode the query and table header given the maximum length. Encoding the query and table "
f"header results in a length of {len(input_ids)} which is higher than the max_length of {max_length}" f"header results in a length of {len(input_ids)} which is higher than the max_length of {max_length}"
) )
......
...@@ -236,7 +236,7 @@ class TransfoXLTokenizer(PreTrainedTokenizer): ...@@ -236,7 +236,7 @@ class TransfoXLTokenizer(PreTrainedTokenizer):
except Exception as e: except Exception as e:
raise ValueError( raise ValueError(
f"Unable to parse file {pretrained_vocab_file}. Unknown format. " f"Unable to parse file {pretrained_vocab_file}. Unknown format. "
"If you tried to load a model saved through TransfoXLTokenizerFast," "If you tried to load a model saved through TransfoXLTokenizerFast, "
"please note they are not compatible." "please note they are not compatible."
) from e ) from e
......
...@@ -174,7 +174,7 @@ class VisualBertEmbeddings(nn.Module): ...@@ -174,7 +174,7 @@ class VisualBertEmbeddings(nn.Module):
if visual_position_embeddings.size(1) != visual_embeds.size(1): if visual_position_embeddings.size(1) != visual_embeds.size(1):
if visual_position_embeddings.size(1) < visual_embeds.size(1): if visual_position_embeddings.size(1) < visual_embeds.size(1):
raise ValueError( raise ValueError(
f"Visual position embeddings length: {visual_position_embeddings.size(1)}" f"Visual position embeddings length: {visual_position_embeddings.size(1)} "
f"should be the same as `visual_embeds` length: {visual_embeds.size(1)}" f"should be the same as `visual_embeds` length: {visual_embeds.size(1)}"
) )
visual_position_embeddings = visual_position_embeddings[:, : visual_embeds.size(1), :] visual_position_embeddings = visual_position_embeddings[:, : visual_embeds.size(1), :]
...@@ -973,7 +973,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel): ...@@ -973,7 +973,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel):
total_size = attention_mask.size(-1) + visual_attention_mask.size(-1) total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)
if labels.size(-1) != total_size: if labels.size(-1) != total_size:
raise ValueError( raise ValueError(
f"The labels provided should have same sequence length as total attention mask." f"The labels provided should have same sequence length as total attention mask. "
f"Found labels with sequence length {labels.size(-1)}, expected {total_size}." f"Found labels with sequence length {labels.size(-1)}, expected {total_size}."
) )
...@@ -986,7 +986,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel): ...@@ -986,7 +986,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel):
total_size = attention_mask.size(-1) + visual_attention_mask.size(-1) total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)
if labels.size(-1) != total_size: if labels.size(-1) != total_size:
raise ValueError( raise ValueError(
f"The labels provided should have same sequence length as total attention mask." f"The labels provided should have same sequence length as total attention mask. "
f"Found labels with sequence length {labels.size(-1)}, expected {total_size}." f"Found labels with sequence length {labels.size(-1)}, expected {total_size}."
) )
......
...@@ -122,7 +122,7 @@ class ViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): ...@@ -122,7 +122,7 @@ class ViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if not valid_images: if not valid_images:
raise ValueError( raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
) )
......
...@@ -237,9 +237,9 @@ class Wav2Vec2Config(PretrainedConfig): ...@@ -237,9 +237,9 @@ class Wav2Vec2Config(PretrainedConfig):
or (len(self.conv_dim) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)
): ):
raise ValueError( raise ValueError(
"Configuration for convolutional layers is incorrect." "Configuration for convolutional layers is incorrect. "
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)" f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
) )
......
...@@ -170,12 +170,12 @@ class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor): ...@@ -170,12 +170,12 @@ class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor):
if sampling_rate is not None: if sampling_rate is not None:
if sampling_rate != self.sampling_rate: if sampling_rate != self.sampling_rate:
raise ValueError( raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}." f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. "
f"Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}." f"Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}."
) )
else: else:
logger.warning( logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function." "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." "Failing to do so can result in silent errors that might be hard to debug."
) )
......
...@@ -1421,7 +1421,7 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel): ...@@ -1421,7 +1421,7 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel):
raise ValueError( raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that " f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please " "does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`." "instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration." "or define `vocab_size` of your model's configuration."
) )
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
......
...@@ -152,7 +152,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer): ...@@ -152,7 +152,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer):
import sentencepiece as spm import sentencepiece as spm
except ImportError: except ImportError:
logger.warning( logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece "
"pip install sentencepiece" "pip install sentencepiece"
) )
raise raise
...@@ -191,7 +191,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer): ...@@ -191,7 +191,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer):
import sentencepiece as spm import sentencepiece as spm
except ImportError: except ImportError:
logger.warning( logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece "
"pip install sentencepiece" "pip install sentencepiece"
) )
raise raise
......
...@@ -109,7 +109,7 @@ class FeaturesManager: ...@@ -109,7 +109,7 @@ class FeaturesManager:
task = FeaturesManager.feature_to_task(feature) task = FeaturesManager.feature_to_task(feature)
if task not in FeaturesManager._TASKS_TO_AUTOMODELS: if task not in FeaturesManager._TASKS_TO_AUTOMODELS:
raise KeyError( raise KeyError(
f"Unknown task: {feature}." f"Unknown task: {feature}. "
f"Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}" f"Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}"
) )
......
...@@ -596,9 +596,9 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase): ...@@ -596,9 +596,9 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at " "More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674" "https://github.com/huggingface/transformers/pull/2674"
) )
...@@ -673,7 +673,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase): ...@@ -673,7 +673,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
if return_offsets_mapping: if return_offsets_mapping:
raise NotImplementedError( raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers." "return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from " "To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast." "transformers.PreTrainedTokenizerFast."
) )
......
...@@ -3059,7 +3059,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): ...@@ -3059,7 +3059,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
pair_ids = pair_ids[:-num_tokens_to_remove] pair_ids = pair_ids[:-num_tokens_to_remove]
else: else:
logger.error( logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input" f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the second sequence has a length {len(pair_ids)}. " f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, " f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_first'." f"for instance 'longest_first' or 'only_first'."
...@@ -3250,7 +3250,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): ...@@ -3250,7 +3250,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
""" """
assert already_has_special_tokens and token_ids_1 is None, ( assert already_has_special_tokens and token_ids_1 is None, (
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. " "You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"Please use a slow (full python) tokenizer to activate this argument." "Please use a slow (full python) tokenizer to activate this argument. "
"Or set `return_special_tokens_mask=True` when calling the encoding method " "Or set `return_special_tokens_mask=True` when calling the encoding method "
"to get the special tokens mask in any tokenizer. " "to get the special tokens mask in any tokenizer. "
) )
......
...@@ -385,7 +385,7 @@ class Trainer: ...@@ -385,7 +385,7 @@ class Trainer:
self.optimizer, self.lr_scheduler = optimizers self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError( raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument." "Passing a `model_init` is incompatible with providing the `optimizers` argument. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
) )
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
...@@ -1737,8 +1737,8 @@ class Trainer: ...@@ -1737,8 +1737,8 @@ class Trainer:
if backend is None: if backend is None:
raise RuntimeError( raise RuntimeError(
"At least one of optuna or ray should be installed. " "At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`." "To install optuna run `pip install optuna`. "
"To install ray run `pip install ray[tune]`." "To install ray run `pip install ray[tune]`. "
"To install sigopt run `pip install sigopt`." "To install sigopt run `pip install sigopt`."
) )
backend = HPSearchBackend(backend) backend = HPSearchBackend(backend)
......
...@@ -385,7 +385,7 @@ class TrainingArguments: ...@@ -385,7 +385,7 @@ class TrainingArguments:
default=False, default=False,
metadata={ metadata={
"help": ( "help": (
"Overwrite the content of the output directory." "Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory." "Use this to continue training if output_dir points to a checkpoint directory."
) )
}, },
...@@ -420,7 +420,7 @@ class TrainingArguments: ...@@ -420,7 +420,7 @@ class TrainingArguments:
per_gpu_eval_batch_size: Optional[int] = field( per_gpu_eval_batch_size: Optional[int] = field(
default=None, default=None,
metadata={ metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred." "help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for evaluation." "Batch size per GPU/TPU core/CPU for evaluation."
}, },
) )
...@@ -492,7 +492,7 @@ class TrainingArguments: ...@@ -492,7 +492,7 @@ class TrainingArguments:
default=None, default=None,
metadata={ metadata={
"help": ( "help": (
"Limit the total amount of checkpoints." "Limit the total amount of checkpoints. "
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints" "Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
) )
}, },
...@@ -514,7 +514,7 @@ class TrainingArguments: ...@@ -514,7 +514,7 @@ class TrainingArguments:
default="O1", default="O1",
metadata={ metadata={
"help": ( "help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html" "See details at https://nvidia.github.io/apex/amp.html"
) )
}, },
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment