Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
65687520
Unverified
Commit
65687520
authored
Apr 25, 2022
by
code-review-doctor
Committed by
GitHub
Apr 25, 2022
Browse files
Fix issue probably-meant-fstring found at
https://codereview.doctor
(#16913)
parent
fea94d67
Changes
11
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
12 additions
and
12 deletions
+12
-12
examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py
..._projects/onnx/summarization/bart_onnx/generation_onnx.py
+1
-1
src/transformers/configuration_utils.py
src/transformers/configuration_utils.py
+1
-1
src/transformers/models/auto/feature_extraction_auto.py
src/transformers/models/auto/feature_extraction_auto.py
+1
-1
src/transformers/models/bart/modeling_bart.py
src/transformers/models/bart/modeling_bart.py
+1
-1
src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py
...s/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py
+1
-1
src/transformers/models/plbart/modeling_plbart.py
src/transformers/models/plbart/modeling_plbart.py
+1
-1
src/transformers/models/prophetnet/modeling_prophetnet.py
src/transformers/models/prophetnet/modeling_prophetnet.py
+1
-1
src/transformers/models/xglm/modeling_flax_xglm.py
src/transformers/models/xglm/modeling_flax_xglm.py
+1
-1
src/transformers/pipelines/audio_utils.py
src/transformers/pipelines/audio_utils.py
+2
-2
src/transformers/utils/hub.py
src/transformers/utils/hub.py
+1
-1
tests/extended/test_trainer_ext.py
tests/extended/test_trainer_ext.py
+1
-1
No files found.
examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py
View file @
65687520
...
@@ -639,7 +639,7 @@ class BARTBeamSearchGenerator(BARTGenerator):
...
@@ -639,7 +639,7 @@ class BARTBeamSearchGenerator(BARTGenerator):
assert
(
assert
(
num_beams
*
batch_size
==
batch_beam_size
num_beams
*
batch_size
==
batch_beam_size
),
"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
),
f
"Batch dimension of `input_ids` should be
{
num_beams
*
batch_size
}
, but is
{
batch_beam_size
}
."
beam_scores
=
torch
.
zeros
((
batch_size
,
num_beams
),
dtype
=
torch
.
float
,
device
=
input_ids
.
device
)
beam_scores
=
torch
.
zeros
((
batch_size
,
num_beams
),
dtype
=
torch
.
float
,
device
=
input_ids
.
device
)
beam_scores
[:,
1
:]
=
-
1e9
beam_scores
[:,
1
:]
=
-
1e9
...
...
src/transformers/configuration_utils.py
View file @
65687520
...
@@ -634,7 +634,7 @@ class PretrainedConfig(PushToHubMixin):
...
@@ -634,7 +634,7 @@ class PretrainedConfig(PushToHubMixin):
raise
EnvironmentError
(
raise
EnvironmentError
(
f
"We couldn't connect to '
{
HUGGINGFACE_CO_RESOLVE_ENDPOINT
}
' to load this model, couldn't find it in the cached "
f
"We couldn't connect to '
{
HUGGINGFACE_CO_RESOLVE_ENDPOINT
}
' to load this model, couldn't find it in the cached "
f
"files and it looks like
{
pretrained_model_name_or_path
}
is not the path to a directory containing a "
f
"files and it looks like
{
pretrained_model_name_or_path
}
is not the path to a directory containing a "
"{configuration_file} file.
\n
Checkout your internet connection or see how to run the library in "
f
"
{
configuration_file
}
file.
\n
Checkout your internet connection or see how to run the library in "
"offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'."
"offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
)
except
EnvironmentError
:
except
EnvironmentError
:
...
...
src/transformers/models/auto/feature_extraction_auto.py
View file @
65687520
...
@@ -311,7 +311,7 @@ class AutoFeatureExtractor:
...
@@ -311,7 +311,7 @@ class AutoFeatureExtractor:
raise
ValueError
(
raise
ValueError
(
f
"Unrecognized feature extractor in
{
pretrained_model_name_or_path
}
. Should have a "
f
"Unrecognized feature extractor in
{
pretrained_model_name_or_path
}
. Should have a "
f
"`feature_extractor_type` key in its
{
FEATURE_EXTRACTOR_NAME
}
of
{
CONFIG_NAME
}
, or one of the following "
f
"`feature_extractor_type` key in its
{
FEATURE_EXTRACTOR_NAME
}
of
{
CONFIG_NAME
}
, or one of the following "
"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}"
f
"`model_type` keys in its
{
CONFIG_NAME
}
:
{
', '
.
join
(
c
for
c
in
FEATURE_EXTRACTOR_MAPPING_NAMES
.
keys
())
}
"
)
)
@
staticmethod
@
staticmethod
...
...
src/transformers/models/bart/modeling_bart.py
View file @
65687520
...
@@ -1050,7 +1050,7 @@ class BartDecoder(BartPretrainedModel):
...
@@ -1050,7 +1050,7 @@ class BartDecoder(BartPretrainedModel):
if
attn_mask
is
not
None
:
if
attn_mask
is
not
None
:
if
attn_mask
.
size
()[
0
]
!=
(
len
(
self
.
layers
)):
if
attn_mask
.
size
()[
0
]
!=
(
len
(
self
.
layers
)):
raise
ValueError
(
raise
ValueError
(
"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
f
"The `
{
mask_name
}
` should be specified for
{
len
(
self
.
layers
)
}
layers, but it is for
{
head_mask
.
size
()[
0
]
}
."
)
)
for
idx
,
decoder_layer
in
enumerate
(
self
.
layers
):
for
idx
,
decoder_layer
in
enumerate
(
self
.
layers
):
...
...
src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py
View file @
65687520
...
@@ -129,7 +129,7 @@ def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
...
@@ -129,7 +129,7 @@ def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
trace
.
append
(
"token_type_embeddings"
)
trace
.
append
(
"token_type_embeddings"
)
pointer
=
getattr
(
pointer
,
"token_type_embeddings"
)
pointer
=
getattr
(
pointer
,
"token_type_embeddings"
)
else
:
else
:
raise
ValueError
(
"Unknown embedding layer with name {full_name}"
)
raise
ValueError
(
f
"Unknown embedding layer with name
{
full_name
}
"
)
trace
.
append
(
"weight"
)
trace
.
append
(
"weight"
)
pointer
=
getattr
(
pointer
,
"weight"
)
pointer
=
getattr
(
pointer
,
"weight"
)
elif
m_name
==
"_attention_layer"
:
elif
m_name
==
"_attention_layer"
:
...
...
src/transformers/models/plbart/modeling_plbart.py
View file @
65687520
...
@@ -1022,7 +1022,7 @@ class PLBartDecoder(PLBartPreTrainedModel):
...
@@ -1022,7 +1022,7 @@ class PLBartDecoder(PLBartPreTrainedModel):
if
attn_mask
is
not
None
:
if
attn_mask
is
not
None
:
if
attn_mask
.
size
()[
0
]
!=
(
len
(
self
.
layers
)):
if
attn_mask
.
size
()[
0
]
!=
(
len
(
self
.
layers
)):
raise
ValueError
(
raise
ValueError
(
"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
f
"The `
{
mask_name
}
` should be specified for
{
len
(
self
.
layers
)
}
layers, but it is for
{
head_mask
.
size
()[
0
]
}
."
)
)
for
idx
,
decoder_layer
in
enumerate
(
self
.
layers
):
for
idx
,
decoder_layer
in
enumerate
(
self
.
layers
):
...
...
src/transformers/models/prophetnet/modeling_prophetnet.py
View file @
65687520
...
@@ -757,7 +757,7 @@ class ProphetNetAttention(nn.Module):
...
@@ -757,7 +757,7 @@ class ProphetNetAttention(nn.Module):
batch_size
*
self
.
num_attn_heads
,
batch_size
*
self
.
num_attn_heads
,
tgt_len
,
tgt_len
,
self
.
head_dim
,
self
.
head_dim
,
),
"`attn_output` should be of shape {batch_size * self.num_attn_heads, tgt_len, self.head_dim}, but is of shape {attn_output.size()}"
),
f
"`attn_output` should be of shape
{
batch_size
*
self
.
num_attn_heads
,
tgt_len
,
self
.
head_dim
}
, but is of shape
{
attn_output
.
size
()
}
"
attn_output
=
(
attn_output
=
(
attn_output
.
view
(
batch_size
,
self
.
num_attn_heads
,
tgt_len
,
self
.
head_dim
)
attn_output
.
view
(
batch_size
,
self
.
num_attn_heads
,
tgt_len
,
self
.
head_dim
)
...
...
src/transformers/models/xglm/modeling_flax_xglm.py
View file @
65687520
...
@@ -153,7 +153,7 @@ class FlaxXGLMAttention(nn.Module):
...
@@ -153,7 +153,7 @@ class FlaxXGLMAttention(nn.Module):
if
self
.
head_dim
*
self
.
num_heads
!=
self
.
embed_dim
:
if
self
.
head_dim
*
self
.
num_heads
!=
self
.
embed_dim
:
raise
ValueError
(
raise
ValueError
(
f
"embed_dim must be divisible by num_heads (got `embed_dim`:
{
self
.
embed_dim
}
"
f
"embed_dim must be divisible by num_heads (got `embed_dim`:
{
self
.
embed_dim
}
"
"and `num_heads`: {self.num_heads})."
f
"and `num_heads`:
{
self
.
num_heads
}
)."
)
)
dense
=
partial
(
dense
=
partial
(
...
...
src/transformers/pipelines/audio_utils.py
View file @
65687520
...
@@ -55,7 +55,7 @@ def ffmpeg_microphone(
...
@@ -55,7 +55,7 @@ def ffmpeg_microphone(
elif
format_for_conversion
==
"f32le"
:
elif
format_for_conversion
==
"f32le"
:
size_of_sample
=
4
size_of_sample
=
4
else
:
else
:
raise
ValueError
(
"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`"
)
raise
ValueError
(
f
"Unhandled format `
{
format_for_conversion
}
`. Please use `s16le` or `f32le`"
)
system
=
platform
.
system
()
system
=
platform
.
system
()
if
system
==
"Linux"
:
if
system
==
"Linux"
:
...
@@ -144,7 +144,7 @@ def ffmpeg_microphone_live(
...
@@ -144,7 +144,7 @@ def ffmpeg_microphone_live(
dtype
=
np
.
float32
dtype
=
np
.
float32
size_of_sample
=
4
size_of_sample
=
4
else
:
else
:
raise
ValueError
(
"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`"
)
raise
ValueError
(
f
"Unhandled format `
{
format_for_conversion
}
`. Please use `s16le` or `f32le`"
)
if
stride_length_s
is
None
:
if
stride_length_s
is
None
:
stride_length_s
=
chunk_length_s
/
6
stride_length_s
=
chunk_length_s
/
6
...
...
src/transformers/utils/hub.py
View file @
65687520
...
@@ -748,7 +748,7 @@ def has_file(
...
@@ -748,7 +748,7 @@ def has_file(
logger
.
error
(
e
)
logger
.
error
(
e
)
raise
EnvironmentError
(
raise
EnvironmentError
(
f
"
{
revision
}
is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f
"
{
revision
}
is not a valid git identifier (branch name, tag name or commit id) that exists for this "
"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions."
f
"model name. Check the model page at 'https://huggingface.co/
{
path_or_repo
}
' for available revisions."
)
)
except
requests
.
HTTPError
:
except
requests
.
HTTPError
:
# We return false for EntryNotFoundError (logical) as well as any connection error.
# We return false for EntryNotFoundError (logical) as well as any connection error.
...
...
tests/extended/test_trainer_ext.py
View file @
65687520
...
@@ -266,7 +266,7 @@ class TestTrainerExt(TestCasePlus):
...
@@ -266,7 +266,7 @@ class TestTrainerExt(TestCasePlus):
)
)
self
.
assertEqual
(
self
.
assertEqual
(
loss_orig
,
loss_bnb
,
"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}"
loss_orig
,
loss_bnb
,
f
"loss should be the same, but got loss_orig=
{
loss_orig
}
, loss_bnb=
{
loss_bnb
}
"
)
)
# Additionally let's test that the absolute gpu memory difference is larger or about the
# Additionally let's test that the absolute gpu memory difference is larger or about the
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment