Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
3258ff93
Unverified
Commit
3258ff93
authored
Nov 09, 2023
by
Yih-Dar
Committed by
GitHub
Nov 09, 2023
Browse files
use `pytest.mark` directly (#27390)
fix Co-authored-by:
ydshieh
<
ydshieh@users.noreply.github.com
>
parent
791ec370
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
14 additions
and
14 deletions
+14
-14
tests/models/bark/test_modeling_bark.py
tests/models/bark/test_modeling_bark.py
+3
-3
tests/models/distilbert/test_modeling_distilbert.py
tests/models/distilbert/test_modeling_distilbert.py
+3
-3
tests/models/llama/test_modeling_llama.py
tests/models/llama/test_modeling_llama.py
+2
-2
tests/models/mistral/test_modeling_mistral.py
tests/models/mistral/test_modeling_mistral.py
+3
-3
tests/models/whisper/test_modeling_whisper.py
tests/models/whisper/test_modeling_whisper.py
+3
-3
No files found.
tests/models/bark/test_modeling_bark.py
View file @
3258ff93
...
...
@@ -20,7 +20,7 @@ import inspect
import
tempfile
import
unittest
from
pytest
import
mark
import
pytest
from
transformers
import
(
BarkCoarseConfig
,
...
...
@@ -877,7 +877,7 @@ class BarkFineModelTest(ModelTesterMixin, unittest.TestCase):
@
require_flash_attn
@
require_torch_gpu
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_inference
(
self
):
for
model_class
in
self
.
all_model_classes
:
...
...
@@ -936,7 +936,7 @@ class BarkFineModelTest(ModelTesterMixin, unittest.TestCase):
@
require_flash_attn
@
require_torch_gpu
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_inference_padding_right
(
self
):
for
model_class
in
self
.
all_model_classes
:
...
...
tests/models/distilbert/test_modeling_distilbert.py
View file @
3258ff93
...
...
@@ -16,7 +16,7 @@ import os
import
tempfile
import
unittest
from
pytest
import
mark
import
pytest
from
transformers
import
DistilBertConfig
,
is_torch_available
from
transformers.testing_utils
import
require_flash_attn
,
require_torch
,
require_torch_accelerator
,
slow
,
torch_device
...
...
@@ -290,7 +290,7 @@ class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
# Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test.
@
require_flash_attn
@
require_torch_accelerator
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_inference
(
self
):
import
torch
...
...
@@ -344,7 +344,7 @@ class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
# Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test.
@
require_flash_attn
@
require_torch_accelerator
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_inference_padding_right
(
self
):
import
torch
...
...
tests/models/llama/test_modeling_llama.py
View file @
3258ff93
...
...
@@ -17,8 +17,8 @@
import
unittest
import
pytest
from
parameterized
import
parameterized
from
pytest
import
mark
from
transformers
import
LlamaConfig
,
is_torch_available
,
set_seed
from
transformers.testing_utils
import
(
...
...
@@ -385,7 +385,7 @@ class LlamaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
@
require_flash_attn
@
require_torch_gpu
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_generate_padding_right
(
self
):
"""
...
...
tests/models/mistral/test_modeling_mistral.py
View file @
3258ff93
...
...
@@ -19,7 +19,7 @@ import gc
import
tempfile
import
unittest
from
pytest
import
mark
import
pytest
from
transformers
import
AutoTokenizer
,
MistralConfig
,
is_torch_available
from
transformers.testing_utils
import
(
...
...
@@ -369,7 +369,7 @@ class MistralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
@
require_flash_attn
@
require_torch_gpu
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_generate_padding_right
(
self
):
import
torch
...
...
@@ -403,7 +403,7 @@ class MistralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
@
require_flash_attn
@
require_torch_gpu
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_inference_padding_right
(
self
):
import
torch
...
...
tests/models/whisper/test_modeling_whisper.py
View file @
3258ff93
...
...
@@ -21,7 +21,7 @@ import tempfile
import
unittest
import
numpy
as
np
from
pytest
import
mark
import
pytest
import
transformers
from
transformers
import
WhisperConfig
...
...
@@ -800,7 +800,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
@
require_flash_attn
@
require_torch_gpu
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_inference
(
self
):
import
torch
...
...
@@ -845,7 +845,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
@
require_flash_attn
@
require_torch_gpu
@
mark
.
flash_attn_test
@
pytest
.
mark
.
flash_attn_test
@
slow
def
test_flash_attn_2_inference_padding_right
(
self
):
import
torch
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment