Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
5c14fcea
"tests/test_modeling_tf_convbert.py" did not exist on "e983da0e7d91c100e6e35efcb8a69c8cd41d6e09"
Unverified
Commit
5c14fcea
authored
Sep 13, 2021
by
Patrick von Platen
Committed by
GitHub
Sep 13, 2021
Browse files
return attention mask in int32 (#13543)
parent
149c833b
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
4 additions
and
4 deletions
+4
-4
src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
...odels/speech_to_text/feature_extraction_speech_to_text.py
+2
-2
src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py
...ansformers/models/wav2vec2/feature_extraction_wav2vec2.py
+2
-2
No files found.
src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
View file @
5c14fcea
...
...
@@ -240,12 +240,12 @@ class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
attention_mask
=
padded_inputs
.
get
(
"attention_mask"
)
if
attention_mask
is
not
None
:
padded_inputs
[
"attention_mask"
]
=
[
np
.
asarray
(
array
,
dtype
=
np
.
bool
)
for
array
in
attention_mask
]
padded_inputs
[
"attention_mask"
]
=
[
np
.
asarray
(
array
,
dtype
=
np
.
int32
)
for
array
in
attention_mask
]
# Utterance-level cepstral mean and variance normalization
if
self
.
do_ceptral_normalize
:
attention_mask
=
(
np
.
array
(
attention_mask
,
dtype
=
np
.
bool
)
np
.
array
(
attention_mask
,
dtype
=
np
.
int32
)
if
self
.
_get_padding_strategies
(
padding
,
max_length
=
max_length
)
is
not
PaddingStrategy
.
DO_NOT_PAD
else
None
)
...
...
src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py
View file @
5c14fcea
...
...
@@ -86,7 +86,7 @@ class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor):
Every array in the list is normalized to have zero mean and unit variance
"""
if
attention_mask
is
not
None
:
attention_mask
=
np
.
array
(
attention_mask
,
np
.
bool
)
attention_mask
=
np
.
array
(
attention_mask
,
np
.
int32
)
normed_input_values
=
[]
for
vector
,
length
in
zip
(
input_values
,
attention_mask
.
sum
(
-
1
)):
...
...
@@ -216,7 +216,7 @@ class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor):
# convert attention_mask to correct format
attention_mask
=
padded_inputs
.
get
(
"attention_mask"
)
if
attention_mask
is
not
None
:
padded_inputs
[
"attention_mask"
]
=
[
np
.
asarray
(
array
,
dtype
=
np
.
bool
)
for
array
in
attention_mask
]
padded_inputs
[
"attention_mask"
]
=
[
np
.
asarray
(
array
,
dtype
=
np
.
int32
)
for
array
in
attention_mask
]
# zero-mean and unit-variance normalization
if
self
.
do_normalize
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment