Unverified Commit 57420b10 authored by Alex Hedges's avatar Alex Hedges Committed by GitHub
Browse files

Add missing whitespace to multiline strings (#13916)

parent 319beb64
......@@ -69,7 +69,7 @@ class PyTorchBenchmarkArguments(BenchmarkArguments):
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
},
......
......@@ -231,10 +231,10 @@ class TensorFlowBenchmark(Benchmark):
def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than"
"it might need to speed up computation."
"The memory reported here corresponds to the memory"
"reported by `nvidia-smi`, which can vary depending"
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used."
)
with self.args.strategy.scope():
......
......@@ -801,7 +801,7 @@ class Benchmark(ABC):
info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total)
else:
logger.warning(
"Psutil not installed, we won't log available CPU memory."
"Psutil not installed, we won't log available CPU memory. "
"Install psutil (pip install psutil) to log available CPU memory."
)
info["cpu_ram_mb"] = "N/A"
......
......@@ -314,7 +314,7 @@ class PretrainedConfig(PushToHubMixin):
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type}"
f"The config parameter `problem_type` was not understood: received {self.problem_type} "
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
......
......@@ -444,7 +444,7 @@ if __name__ == "__main__":
type=str,
help="The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name"
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS",
)
parser.add_argument(
......
......@@ -905,7 +905,7 @@ class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers."
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
"Please refer to the documentation for more information."
)
......
......@@ -137,7 +137,7 @@ class SequenceFeatureExtractor(FeatureExtractionMixin):
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of :class:`~transformers.BatchFeature` or list of :class:`~transformers.BatchFeature` to this method"
"You should supply an instance of :class:`~transformers.BatchFeature` or list of :class:`~transformers.BatchFeature` to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(processed_features.keys())}"
)
......
......@@ -194,9 +194,9 @@ class BeamSearchScorer(BeamScorer):
if "max_length" in kwargs:
warnings.warn(
"Passing `max_length` to BeamSearchScorer is deprecated and has no effect."
"Passing `max_length` to BeamSearchScorer is deprecated and has no effect. "
"`max_length` should be passed directly to `beam_search(...)`, `beam_sample(...)`"
",or `group_beam_search(...)`."
", or `group_beam_search(...)`."
)
@property
......
......@@ -438,7 +438,7 @@ class NoBadWordsLogitsProcessor(LogitsProcessor):
banned_mask_list.append([idx, token])
else:
logger.error(
f"An invalid bad word ID is defined: {token}. This ID is not contained in the"
f"An invalid bad word ID is defined: {token}. This ID is not contained in the "
f"vocabulary, and is therefore ignored."
)
if not banned_mask_list and self.static_bad_words_mask is None:
......
......@@ -533,7 +533,7 @@ class TFGenerationMixin:
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"You tried to generate sequences with a model that does not have a LM Head. "
"Please use another model class (e.g. `TFOpenAIGPTLMHeadModel`, `TFXLNetLMHeadModel`, `TFGPT2LMHeadModel`, `TFCTRLLMHeadModel`, `TFT5ForConditionalGeneration`, `TFTransfoXLLMHeadModel`)"
)
......
......@@ -935,7 +935,7 @@ class GenerationMixin:
if input_ids.shape[-1] >= max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids.shape[-1]}, but ``max_length`` is set to {max_length}."
f"Input length of {input_ids_string} is {input_ids.shape[-1]}, but ``max_length`` is set to {max_length}. "
"This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``."
)
......
......@@ -84,8 +84,8 @@ class HfArgumentParser(ArgumentParser):
# it is provided as a third-party extension mechanism.
if isinstance(field.type, str):
raise ImportError(
"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),"
"which can be opted in from Python 3.7 with `from __future__ import annotations`."
"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563), "
"which can be opted in from Python 3.7 with `from __future__ import annotations`. "
"We will add compatibility when Python 3.9 is released."
)
typestring = str(field.type)
......
......@@ -230,7 +230,7 @@ def load_flax_weights_in_pytorch_model(pt_model, flax_state):
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected"
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}."
)
else:
......
......@@ -304,7 +304,7 @@ def booleans_processing(config, **kwargs):
or ("use_cache" in kwargs and kwargs["use_cache"] not in (None, config.use_cache))
):
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model. "
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
......
......@@ -777,7 +777,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
if not isinstance(old_embeddings, nn.Embedding):
raise TypeError(
f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}."
f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. "
f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
)
......@@ -848,7 +848,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
if not isinstance(old_lm_head, nn.Linear):
raise TypeError(
f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}."
f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. "
f"You should either use a different resize function or make sure that `old_lm_head` are an instance of {nn.Linear}."
)
......@@ -1344,8 +1344,8 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
except (UnicodeDecodeError, ValueError):
raise OSError(
f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
f"at '{resolved_archive_file}'"
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
f"at '{resolved_archive_file}'. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True."
)
# set dtype to instantiate the model under:
......
......@@ -175,7 +175,7 @@ class BartConfig(PretrainedConfig):
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed."
)
......
......@@ -132,7 +132,7 @@ class BeitFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
......
......@@ -272,7 +272,7 @@ class MecabTokenizer:
dic_dir = unidic.DICDIR
if not os.path.isdir(dic_dir):
raise RuntimeError(
"The unidic dictionary itself is not found."
"The unidic dictionary itself is not found. "
"See https://github.com/polm/unidic-py for installation."
)
......
......@@ -2066,7 +2066,7 @@ class BigBirdModel(BigBirdPreTrainedModel):
"+ additional buffer: config.num_random_blocks * config.block_size "
f"= {max_tokens_to_attend} with config.block_size "
f"= {self.config.block_size}, config.num_random_blocks "
f"= {self.config.num_random_blocks}."
f"= {self.config.num_random_blocks}. "
"Changing attention type to 'original_full'..."
)
self.set_attention_type("original_full")
......
......@@ -1858,7 +1858,7 @@ class BigBirdPegasusEncoder(BigBirdPegasusPreTrainedModel):
"+ additional buffer: config.num_random_blocks * config.block_size "
f"= {max_tokens_to_attend} with config.block_size "
f"= {self.config.block_size}, config.num_random_blocks "
f"= {self.config.num_random_blocks}."
f"= {self.config.num_random_blocks}. "
"Changing attention type to 'original_full'..."
)
self.set_attention_type("original_full")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment