Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
3c2d4d60
Unverified
Commit
3c2d4d60
authored
Jun 24, 2024
by
Pavel Iakubovskii
Committed by
GitHub
Jun 24, 2024
Browse files
Correct @is_flaky test decoration (#31480)
* Correct @is_flaky decorator
parent
4b822560
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
5 deletions
+5
-5
tests/models/gemma/test_modeling_gemma.py
tests/models/gemma/test_modeling_gemma.py
+1
-1
tests/test_modeling_common.py
tests/test_modeling_common.py
+4
-4
No files found.
tests/models/gemma/test_modeling_gemma.py
View file @
3c2d4d60
...
@@ -493,7 +493,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
...
@@ -493,7 +493,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
@
require_flash_attn
@
require_flash_attn
@
require_torch_gpu
@
require_torch_gpu
@
pytest
.
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
is_flaky
@
is_flaky
()
@
slow
@
slow
def
test_flash_attn_2_equivalence
(
self
):
def
test_flash_attn_2_equivalence
(
self
):
for
model_class
in
self
.
all_model_classes
:
for
model_class
in
self
.
all_model_classes
:
...
...
tests/test_modeling_common.py
View file @
3c2d4d60
...
@@ -3407,7 +3407,7 @@ class ModelTesterMixin:
...
@@ -3407,7 +3407,7 @@ class ModelTesterMixin:
@
require_torch_gpu
@
require_torch_gpu
@
mark
.
flash_attn_test
@
mark
.
flash_attn_test
@
slow
@
slow
@
is_flaky
@
is_flaky
()
def
test_flash_attn_2_inference_equivalence
(
self
):
def
test_flash_attn_2_inference_equivalence
(
self
):
for
model_class
in
self
.
all_model_classes
:
for
model_class
in
self
.
all_model_classes
:
if
not
model_class
.
_supports_flash_attn_2
:
if
not
model_class
.
_supports_flash_attn_2
:
...
@@ -3501,7 +3501,7 @@ class ModelTesterMixin:
...
@@ -3501,7 +3501,7 @@ class ModelTesterMixin:
@
require_torch_gpu
@
require_torch_gpu
@
mark
.
flash_attn_test
@
mark
.
flash_attn_test
@
slow
@
slow
@
is_flaky
@
is_flaky
()
def
test_flash_attn_2_inference_equivalence_right_padding
(
self
):
def
test_flash_attn_2_inference_equivalence_right_padding
(
self
):
for
model_class
in
self
.
all_model_classes
:
for
model_class
in
self
.
all_model_classes
:
if
not
model_class
.
_supports_flash_attn_2
:
if
not
model_class
.
_supports_flash_attn_2
:
...
@@ -3591,7 +3591,7 @@ class ModelTesterMixin:
...
@@ -3591,7 +3591,7 @@ class ModelTesterMixin:
@
require_torch_gpu
@
require_torch_gpu
@
mark
.
flash_attn_test
@
mark
.
flash_attn_test
@
slow
@
slow
@
is_flaky
@
is_flaky
()
def
test_flash_attn_2_generate_left_padding
(
self
):
def
test_flash_attn_2_generate_left_padding
(
self
):
for
model_class
in
self
.
all_generative_model_classes
:
for
model_class
in
self
.
all_generative_model_classes
:
if
not
model_class
.
_supports_flash_attn_2
:
if
not
model_class
.
_supports_flash_attn_2
:
...
@@ -3635,7 +3635,7 @@ class ModelTesterMixin:
...
@@ -3635,7 +3635,7 @@ class ModelTesterMixin:
@
require_flash_attn
@
require_flash_attn
@
require_torch_gpu
@
require_torch_gpu
@
mark
.
flash_attn_test
@
mark
.
flash_attn_test
@
is_flaky
@
is_flaky
()
@
slow
@
slow
def
test_flash_attn_2_generate_padding_right
(
self
):
def
test_flash_attn_2_generate_padding_right
(
self
):
for
model_class
in
self
.
all_generative_model_classes
:
for
model_class
in
self
.
all_generative_model_classes
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment