Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
648d0deb
Unverified
Commit
648d0deb
authored
Mar 02, 2023
by
Kashif Rasul
Committed by
GitHub
Mar 02, 2023
Browse files
fix typo in Bart's attention (#21898)
parent
c87654dc
Changes
22
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
40 additions
and
40 deletions
+40
-40
src/transformers/models/bart/modeling_bart.py
src/transformers/models/bart/modeling_bart.py
+2
-2
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
...ormers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
+2
-2
src/transformers/models/biogpt/modeling_biogpt.py
src/transformers/models/biogpt/modeling_biogpt.py
+2
-2
src/transformers/models/blenderbot/modeling_blenderbot.py
src/transformers/models/blenderbot/modeling_blenderbot.py
+2
-2
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
...mers/models/blenderbot_small/modeling_blenderbot_small.py
+2
-2
src/transformers/models/data2vec/modeling_data2vec_audio.py
src/transformers/models/data2vec/modeling_data2vec_audio.py
+2
-2
src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
...ormers/models/gptsan_japanese/modeling_gptsan_japanese.py
+2
-2
src/transformers/models/hubert/modeling_hubert.py
src/transformers/models/hubert/modeling_hubert.py
+2
-2
src/transformers/models/m2m_100/modeling_m2m_100.py
src/transformers/models/m2m_100/modeling_m2m_100.py
+2
-2
src/transformers/models/marian/modeling_marian.py
src/transformers/models/marian/modeling_marian.py
+2
-2
src/transformers/models/mbart/modeling_mbart.py
src/transformers/models/mbart/modeling_mbart.py
+2
-2
src/transformers/models/pegasus/modeling_pegasus.py
src/transformers/models/pegasus/modeling_pegasus.py
+2
-2
src/transformers/models/pegasus_x/modeling_pegasus_x.py
src/transformers/models/pegasus_x/modeling_pegasus_x.py
+2
-2
src/transformers/models/plbart/modeling_plbart.py
src/transformers/models/plbart/modeling_plbart.py
+2
-2
src/transformers/models/sew/modeling_sew.py
src/transformers/models/sew/modeling_sew.py
+2
-2
src/transformers/models/speech_to_text/modeling_speech_to_text.py
...sformers/models/speech_to_text/modeling_speech_to_text.py
+2
-2
src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py
...mers/models/speech_to_text_2/modeling_speech_to_text_2.py
+2
-2
src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
...me_series_transformer/modeling_time_series_transformer.py
+2
-2
src/transformers/models/unispeech/modeling_unispeech.py
src/transformers/models/unispeech/modeling_unispeech.py
+2
-2
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
...ansformers/models/unispeech_sat/modeling_unispeech_sat.py
+2
-2
No files found.
src/transformers/models/bart/modeling_bart.py
View file @
648d0deb
...
@@ -276,7 +276,7 @@ class BartAttention(nn.Module):
...
@@ -276,7 +276,7 @@ class BartAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -284,7 +284,7 @@ class BartAttention(nn.Module):
...
@@ -284,7 +284,7 @@ class BartAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
View file @
648d0deb
...
@@ -1335,7 +1335,7 @@ class BigBirdPegasusDecoderAttention(nn.Module):
...
@@ -1335,7 +1335,7 @@ class BigBirdPegasusDecoderAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -1343,7 +1343,7 @@ class BigBirdPegasusDecoderAttention(nn.Module):
...
@@ -1343,7 +1343,7 @@ class BigBirdPegasusDecoderAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/biogpt/modeling_biogpt.py
View file @
648d0deb
...
@@ -237,7 +237,7 @@ class BioGptAttention(nn.Module):
...
@@ -237,7 +237,7 @@ class BioGptAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -245,7 +245,7 @@ class BioGptAttention(nn.Module):
...
@@ -245,7 +245,7 @@ class BioGptAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/blenderbot/modeling_blenderbot.py
View file @
648d0deb
...
@@ -263,7 +263,7 @@ class BlenderbotAttention(nn.Module):
...
@@ -263,7 +263,7 @@ class BlenderbotAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -271,7 +271,7 @@ class BlenderbotAttention(nn.Module):
...
@@ -271,7 +271,7 @@ class BlenderbotAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
View file @
648d0deb
...
@@ -260,7 +260,7 @@ class BlenderbotSmallAttention(nn.Module):
...
@@ -260,7 +260,7 @@ class BlenderbotSmallAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -268,7 +268,7 @@ class BlenderbotSmallAttention(nn.Module):
...
@@ -268,7 +268,7 @@ class BlenderbotSmallAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/data2vec/modeling_data2vec_audio.py
View file @
648d0deb
...
@@ -466,7 +466,7 @@ class Data2VecAudioAttention(nn.Module):
...
@@ -466,7 +466,7 @@ class Data2VecAudioAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -474,7 +474,7 @@ class Data2VecAudioAttention(nn.Module):
...
@@ -474,7 +474,7 @@ class Data2VecAudioAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
View file @
648d0deb
...
@@ -498,7 +498,7 @@ class GPTSanJapaneseAttention(nn.Module):
...
@@ -498,7 +498,7 @@ class GPTSanJapaneseAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -506,7 +506,7 @@ class GPTSanJapaneseAttention(nn.Module):
...
@@ -506,7 +506,7 @@ class GPTSanJapaneseAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/hubert/modeling_hubert.py
View file @
648d0deb
...
@@ -528,7 +528,7 @@ class HubertAttention(nn.Module):
...
@@ -528,7 +528,7 @@ class HubertAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -536,7 +536,7 @@ class HubertAttention(nn.Module):
...
@@ -536,7 +536,7 @@ class HubertAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/m2m_100/modeling_m2m_100.py
View file @
648d0deb
...
@@ -331,7 +331,7 @@ class M2M100Attention(nn.Module):
...
@@ -331,7 +331,7 @@ class M2M100Attention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -339,7 +339,7 @@ class M2M100Attention(nn.Module):
...
@@ -339,7 +339,7 @@ class M2M100Attention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/marian/modeling_marian.py
View file @
648d0deb
...
@@ -278,7 +278,7 @@ class MarianAttention(nn.Module):
...
@@ -278,7 +278,7 @@ class MarianAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -286,7 +286,7 @@ class MarianAttention(nn.Module):
...
@@ -286,7 +286,7 @@ class MarianAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/mbart/modeling_mbart.py
View file @
648d0deb
...
@@ -272,7 +272,7 @@ class MBartAttention(nn.Module):
...
@@ -272,7 +272,7 @@ class MBartAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -280,7 +280,7 @@ class MBartAttention(nn.Module):
...
@@ -280,7 +280,7 @@ class MBartAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/pegasus/modeling_pegasus.py
View file @
648d0deb
...
@@ -278,7 +278,7 @@ class PegasusAttention(nn.Module):
...
@@ -278,7 +278,7 @@ class PegasusAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -286,7 +286,7 @@ class PegasusAttention(nn.Module):
...
@@ -286,7 +286,7 @@ class PegasusAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/pegasus_x/modeling_pegasus_x.py
View file @
648d0deb
...
@@ -287,7 +287,7 @@ class PegasusXAttention(nn.Module):
...
@@ -287,7 +287,7 @@ class PegasusXAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -295,7 +295,7 @@ class PegasusXAttention(nn.Module):
...
@@ -295,7 +295,7 @@ class PegasusXAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/plbart/modeling_plbart.py
View file @
648d0deb
...
@@ -271,7 +271,7 @@ class PLBartAttention(nn.Module):
...
@@ -271,7 +271,7 @@ class PLBartAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -279,7 +279,7 @@ class PLBartAttention(nn.Module):
...
@@ -279,7 +279,7 @@ class PLBartAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/sew/modeling_sew.py
View file @
648d0deb
...
@@ -528,7 +528,7 @@ class SEWAttention(nn.Module):
...
@@ -528,7 +528,7 @@ class SEWAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -536,7 +536,7 @@ class SEWAttention(nn.Module):
...
@@ -536,7 +536,7 @@ class SEWAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/speech_to_text/modeling_speech_to_text.py
View file @
648d0deb
...
@@ -338,7 +338,7 @@ class Speech2TextAttention(nn.Module):
...
@@ -338,7 +338,7 @@ class Speech2TextAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -346,7 +346,7 @@ class Speech2TextAttention(nn.Module):
...
@@ -346,7 +346,7 @@ class Speech2TextAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py
View file @
648d0deb
...
@@ -284,7 +284,7 @@ class Speech2Text2Attention(nn.Module):
...
@@ -284,7 +284,7 @@ class Speech2Text2Attention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -292,7 +292,7 @@ class Speech2Text2Attention(nn.Module):
...
@@ -292,7 +292,7 @@ class Speech2Text2Attention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
View file @
648d0deb
...
@@ -764,7 +764,7 @@ class TimeSeriesTransformerAttention(nn.Module):
...
@@ -764,7 +764,7 @@ class TimeSeriesTransformerAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -772,7 +772,7 @@ class TimeSeriesTransformerAttention(nn.Module):
...
@@ -772,7 +772,7 @@ class TimeSeriesTransformerAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/unispeech/modeling_unispeech.py
View file @
648d0deb
...
@@ -564,7 +564,7 @@ class UniSpeechAttention(nn.Module):
...
@@ -564,7 +564,7 @@ class UniSpeechAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -572,7 +572,7 @@ class UniSpeechAttention(nn.Module):
...
@@ -572,7 +572,7 @@ class UniSpeechAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
View file @
648d0deb
...
@@ -578,7 +578,7 @@ class UniSpeechSatAttention(nn.Module):
...
@@ -578,7 +578,7 @@ class UniSpeechSatAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -586,7 +586,7 @@ class UniSpeechSatAttention(nn.Module):
...
@@ -586,7 +586,7 @@ class UniSpeechSatAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment