Unverified Commit 65687520 authored by code-review-doctor's avatar code-review-doctor Committed by GitHub
Browse files

Fix issue probably-meant-fstring found at https://codereview.doctor (#16913)

parent fea94d67
......@@ -639,7 +639,7 @@ class BARTBeamSearchGenerator(BARTGenerator):
assert (
num_beams * batch_size == batch_beam_size
), "Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
), f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
......
......@@ -634,7 +634,7 @@ class PretrainedConfig(PushToHubMixin):
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached "
f"files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a "
"{configuration_file} file.\nCheckout your internet connection or see how to run the library in "
f"{configuration_file} file.\nCheckout your internet connection or see how to run the library in "
"offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EnvironmentError:
......
......@@ -311,7 +311,7 @@ class AutoFeatureExtractor:
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}"
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}"
)
@staticmethod
......
......@@ -1050,7 +1050,7 @@ class BartDecoder(BartPretrainedModel):
if attn_mask is not None:
if attn_mask.size()[0] != (len(self.layers)):
raise ValueError(
"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
......
......@@ -129,7 +129,7 @@ def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
trace.append("token_type_embeddings")
pointer = getattr(pointer, "token_type_embeddings")
else:
raise ValueError("Unknown embedding layer with name {full_name}")
raise ValueError(f"Unknown embedding layer with name {full_name}")
trace.append("weight")
pointer = getattr(pointer, "weight")
elif m_name == "_attention_layer":
......
......@@ -1022,7 +1022,7 @@ class PLBartDecoder(PLBartPreTrainedModel):
if attn_mask is not None:
if attn_mask.size()[0] != (len(self.layers)):
raise ValueError(
"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
......
......@@ -757,7 +757,7 @@ class ProphetNetAttention(nn.Module):
batch_size * self.num_attn_heads,
tgt_len,
self.head_dim,
), "`attn_output` should be of shape {batch_size * self.num_attn_heads, tgt_len, self.head_dim}, but is of shape {attn_output.size()}"
), f"`attn_output` should be of shape {batch_size * self.num_attn_heads, tgt_len, self.head_dim}, but is of shape {attn_output.size()}"
attn_output = (
attn_output.view(batch_size, self.num_attn_heads, tgt_len, self.head_dim)
......
......@@ -153,7 +153,7 @@ class FlaxXGLMAttention(nn.Module):
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} "
"and `num_heads`: {self.num_heads})."
f"and `num_heads`: {self.num_heads})."
)
dense = partial(
......
......@@ -55,7 +55,7 @@ def ffmpeg_microphone(
elif format_for_conversion == "f32le":
size_of_sample = 4
else:
raise ValueError("Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
system = platform.system()
if system == "Linux":
......@@ -144,7 +144,7 @@ def ffmpeg_microphone_live(
dtype = np.float32
size_of_sample = 4
else:
raise ValueError("Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
if stride_length_s is None:
stride_length_s = chunk_length_s / 6
......
......@@ -748,7 +748,7 @@ def has_file(
logger.error(e)
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions."
f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions."
)
except requests.HTTPError:
# We return false for EntryNotFoundError (logical) as well as any connection error.
......
......@@ -266,7 +266,7 @@ class TestTrainerExt(TestCasePlus):
)
self.assertEqual(
loss_orig, loss_bnb, "loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}"
loss_orig, loss_bnb, f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}"
)
# Additionally let's test that the absolute gpu memory difference is larger or about the
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment