Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
3499c49c
Unverified
Commit
3499c49c
authored
Feb 15, 2023
by
amyeroberts
Committed by
GitHub
Feb 15, 2023
Browse files
Skipping more high mem tests - Wav2Vec2 Hubert (#21647)
Skipping more tests
parent
0c9c8472
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
12 additions
and
21 deletions
+12
-21
tests/models/hubert/test_modeling_tf_hubert.py
tests/models/hubert/test_modeling_tf_hubert.py
+6
-10
tests/models/wav2vec2/test_modeling_tf_wav2vec2.py
tests/models/wav2vec2/test_modeling_tf_wav2vec2.py
+6
-11
No files found.
tests/models/hubert/test_modeling_tf_hubert.py
View file @
3499c49c
...
@@ -321,19 +321,15 @@ class TFHubertModelTest(TFModelTesterMixin, unittest.TestCase):
...
@@ -321,19 +321,15 @@ class TFHubertModelTest(TFModelTesterMixin, unittest.TestCase):
model
=
TFHubertModel
.
from_pretrained
(
"facebook/hubert-base-ls960"
)
model
=
TFHubertModel
.
from_pretrained
(
"facebook/hubert-base-ls960"
)
self
.
assertIsNotNone
(
model
)
self
.
assertIsNotNone
(
model
)
# We override he
re
as
passing a full batch of 13 samples results in OOM errors for CTC
@
unittest
.
skip
(
reas
on
=
"Fix me! Hubert hits OOM errors when loss is computed on full batch"
)
def
test_dataset_conversion
(
self
):
def
test_dataset_conversion
(
self
):
default_batch_size
=
self
.
model_tester
.
batch_size
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
self
.
model_tester
.
batch_size
=
2
pass
super
().
test_dataset_conversion
()
self
.
model_tester
.
batch_size
=
default_batch_size
# We override he
re
as
passing a full batch of 13 samples results in OOM errors for CTC
@
unittest
.
skip
(
reas
on
=
"Fix me! Hubert hits OOM errors when loss is computed on full batch"
)
def
test_keras_fit
(
self
):
def
test_keras_fit
(
self
):
default_batch_size
=
self
.
model_tester
.
batch_size
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
self
.
model_tester
.
batch_size
=
2
pass
super
().
test_keras_fit
()
self
.
model_tester
.
batch_size
=
default_batch_size
@
require_tf
@
require_tf
...
...
tests/models/wav2vec2/test_modeling_tf_wav2vec2.py
View file @
3499c49c
...
@@ -512,20 +512,15 @@ class TFWav2Vec2RobustModelTest(TFModelTesterMixin, unittest.TestCase):
...
@@ -512,20 +512,15 @@ class TFWav2Vec2RobustModelTest(TFModelTesterMixin, unittest.TestCase):
model
=
TFWav2Vec2Model
.
from_pretrained
(
"facebook/wav2vec2-base-960h"
)
model
=
TFWav2Vec2Model
.
from_pretrained
(
"facebook/wav2vec2-base-960h"
)
self
.
assertIsNotNone
(
model
)
self
.
assertIsNotNone
(
model
)
# We override here as passing a full batch of 13 samples results in OOM errors for CTC
@
unittest
.
skip
(
reason
=
"Fix me! Wav2Vec2 hits OOM errors when loss is computed on full batch"
)
@
unittest
.
skip
(
"Fix me!"
)
def
test_dataset_conversion
(
self
):
def
test_dataset_conversion
(
self
):
default_batch_size
=
self
.
model_tester
.
batch_size
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
self
.
model_tester
.
batch_size
=
2
pass
super
().
test_dataset_conversion
()
self
.
model_tester
.
batch_size
=
default_batch_size
# We override here as passing a full batch of 13 samples results in OOM errors for CTC
@
unittest
.
skip
(
reason
=
"Fix me! Wav2Vec2 hits OOM errors when loss is computed on full batch"
)
def
test_keras_fit
(
self
):
def
test_keras_fit
(
self
):
default_batch_size
=
self
.
model_tester
.
batch_size
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
self
.
model_tester
.
batch_size
=
2
pass
super
().
test_keras_fit
()
self
.
model_tester
.
batch_size
=
default_batch_size
@
require_tf
@
require_tf
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment