Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
648d0deb
"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "a3345c1f1333fb6d751826477395fc922cde43e5"
Unverified
Commit
648d0deb
authored
Mar 02, 2023
by
Kashif Rasul
Committed by
GitHub
Mar 02, 2023
Browse files
fix typo in Bart's attention (#21898)
parent
c87654dc
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
4 additions
and
4 deletions
+4
-4
src/transformers/models/wav2vec2/modeling_wav2vec2.py
src/transformers/models/wav2vec2/modeling_wav2vec2.py
+2
-2
src/transformers/models/whisper/modeling_whisper.py
src/transformers/models/whisper/modeling_whisper.py
+2
-2
No files found.
src/transformers/models/wav2vec2/modeling_wav2vec2.py
View file @
648d0deb
...
@@ -621,7 +621,7 @@ class Wav2Vec2Attention(nn.Module):
...
@@ -621,7 +621,7 @@ class Wav2Vec2Attention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -629,7 +629,7 @@ class Wav2Vec2Attention(nn.Module):
...
@@ -629,7 +629,7 @@ class Wav2Vec2Attention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
src/transformers/models/whisper/modeling_whisper.py
View file @
648d0deb
...
@@ -366,7 +366,7 @@ class WhisperAttention(nn.Module):
...
@@ -366,7 +366,7 @@ class WhisperAttention(nn.Module):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
if
attn_output
.
size
()
!=
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
):
raise
ValueError
(
raise
ValueError
(
f
"`attn_output` should be of size
{
(
bsz
,
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"`attn_output` should be of size
{
(
bsz
*
self
.
num_heads
,
tgt_len
,
self
.
head_dim
)
}
, but is"
f
"
{
attn_output
.
size
()
}
"
f
"
{
attn_output
.
size
()
}
"
)
)
...
@@ -374,7 +374,7 @@ class WhisperAttention(nn.Module):
...
@@ -374,7 +374,7 @@ class WhisperAttention(nn.Module):
attn_output
=
attn_output
.
transpose
(
1
,
2
)
attn_output
=
attn_output
.
transpose
(
1
,
2
)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned a
c
ross GPUs when using tensor-parallelism.
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
attn_output
.
reshape
(
bsz
,
tgt_len
,
self
.
embed_dim
)
attn_output
=
self
.
out_proj
(
attn_output
)
attn_output
=
self
.
out_proj
(
attn_output
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment