Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
cbc8ced2
Unverified
Commit
cbc8ced2
authored
Jul 08, 2025
by
Dhruv Nair
Committed by
GitHub
Jul 08, 2025
Browse files
[CI] Fix big GPU test marker (#11786)
* update * update
parent
01240fec
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
9 additions
and
23 deletions
+9
-23
.github/workflows/nightly_tests.yml
.github/workflows/nightly_tests.yml
+1
-1
src/diffusers/utils/testing_utils.py
src/diffusers/utils/testing_utils.py
+4
-0
tests/conftest.py
tests/conftest.py
+4
-0
tests/lora/test_lora_layers_flux.py
tests/lora/test_lora_layers_flux.py
+0
-3
tests/lora/test_lora_layers_hunyuanvideo.py
tests/lora/test_lora_layers_hunyuanvideo.py
+0
-2
tests/lora/test_lora_layers_sd3.py
tests/lora/test_lora_layers_sd3.py
+0
-2
tests/pipelines/controlnet_flux/test_controlnet_flux.py
tests/pipelines/controlnet_flux/test_controlnet_flux.py
+0
-2
tests/pipelines/controlnet_sd3/test_controlnet_sd3.py
tests/pipelines/controlnet_sd3/test_controlnet_sd3.py
+0
-2
tests/pipelines/flux/test_pipeline_flux.py
tests/pipelines/flux/test_pipeline_flux.py
+0
-3
tests/pipelines/flux/test_pipeline_flux_redux.py
tests/pipelines/flux/test_pipeline_flux_redux.py
+0
-2
tests/pipelines/mochi/test_mochi.py
tests/pipelines/mochi/test_mochi.py
+0
-2
tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py
...es/stable_diffusion_3/test_pipeline_stable_diffusion_3.py
+0
-2
tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py
...e_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py
+0
-2
No files found.
.github/workflows/nightly_tests.yml
View file @
cbc8ced2
...
@@ -248,7 +248,7 @@ jobs:
...
@@ -248,7 +248,7 @@ jobs:
BIG_GPU_MEMORY
:
40
BIG_GPU_MEMORY
:
40
run
:
|
run
:
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
-m "big_
gpu_with_torch_cuda
" \
-m "big_
accelerator
" \
--make-reports=tests_big_gpu_torch_cuda \
--make-reports=tests_big_gpu_torch_cuda \
--report-log=tests_big_gpu_torch_cuda.log \
--report-log=tests_big_gpu_torch_cuda.log \
tests/
tests/
...
...
src/diffusers/utils/testing_utils.py
View file @
cbc8ced2
...
@@ -421,6 +421,10 @@ def require_big_accelerator(test_case):
...
@@ -421,6 +421,10 @@ def require_big_accelerator(test_case):
Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines:
Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines:
Flux, SD3, Cog, etc.
Flux, SD3, Cog, etc.
"""
"""
import
pytest
test_case
=
pytest
.
mark
.
big_accelerator
(
test_case
)
if
not
is_torch_available
():
if
not
is_torch_available
():
return
unittest
.
skip
(
"test requires PyTorch"
)(
test_case
)
return
unittest
.
skip
(
"test requires PyTorch"
)(
test_case
)
...
...
tests/conftest.py
View file @
cbc8ced2
...
@@ -30,6 +30,10 @@ sys.path.insert(1, git_repo_path)
...
@@ -30,6 +30,10 @@ sys.path.insert(1, git_repo_path)
warnings
.
simplefilter
(
action
=
"ignore"
,
category
=
FutureWarning
)
warnings
.
simplefilter
(
action
=
"ignore"
,
category
=
FutureWarning
)
def
pytest_configure
(
config
):
config
.
addinivalue_line
(
"markers"
,
"big_accelerator: marks tests as requiring big accelerator resources"
)
def
pytest_addoption
(
parser
):
def
pytest_addoption
(
parser
):
from
diffusers.utils.testing_utils
import
pytest_addoption_shared
from
diffusers.utils.testing_utils
import
pytest_addoption_shared
...
...
tests/lora/test_lora_layers_flux.py
View file @
cbc8ced2
...
@@ -20,7 +20,6 @@ import tempfile
...
@@ -20,7 +20,6 @@ import tempfile
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
safetensors.torch
import
safetensors.torch
import
torch
import
torch
from
parameterized
import
parameterized
from
parameterized
import
parameterized
...
@@ -813,7 +812,6 @@ class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
...
@@ -813,7 +812,6 @@ class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@
require_torch_accelerator
@
require_torch_accelerator
@
require_peft_backend
@
require_peft_backend
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
FluxLoRAIntegrationTests
(
unittest
.
TestCase
):
class
FluxLoRAIntegrationTests
(
unittest
.
TestCase
):
"""internal note: The integration slices were obtained on audace.
"""internal note: The integration slices were obtained on audace.
...
@@ -960,7 +958,6 @@ class FluxLoRAIntegrationTests(unittest.TestCase):
...
@@ -960,7 +958,6 @@ class FluxLoRAIntegrationTests(unittest.TestCase):
@
require_torch_accelerator
@
require_torch_accelerator
@
require_peft_backend
@
require_peft_backend
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
FluxControlLoRAIntegrationTests
(
unittest
.
TestCase
):
class
FluxControlLoRAIntegrationTests
(
unittest
.
TestCase
):
num_inference_steps
=
10
num_inference_steps
=
10
seed
=
0
seed
=
0
...
...
tests/lora/test_lora_layers_hunyuanvideo.py
View file @
cbc8ced2
...
@@ -17,7 +17,6 @@ import sys
...
@@ -17,7 +17,6 @@ import sys
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
transformers
import
CLIPTextModel
,
CLIPTokenizer
,
LlamaModel
,
LlamaTokenizerFast
from
transformers
import
CLIPTextModel
,
CLIPTokenizer
,
LlamaModel
,
LlamaTokenizerFast
...
@@ -198,7 +197,6 @@ class HunyuanVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
...
@@ -198,7 +197,6 @@ class HunyuanVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@
require_torch_accelerator
@
require_torch_accelerator
@
require_peft_backend
@
require_peft_backend
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
HunyuanVideoLoRAIntegrationTests
(
unittest
.
TestCase
):
class
HunyuanVideoLoRAIntegrationTests
(
unittest
.
TestCase
):
"""internal note: The integration slices were obtained on DGX.
"""internal note: The integration slices were obtained on DGX.
...
...
tests/lora/test_lora_layers_sd3.py
View file @
cbc8ced2
...
@@ -17,7 +17,6 @@ import sys
...
@@ -17,7 +17,6 @@ import sys
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
transformers
import
AutoTokenizer
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
from
transformers
import
AutoTokenizer
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
...
@@ -139,7 +138,6 @@ class SD3LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
...
@@ -139,7 +138,6 @@ class SD3LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@
require_torch_accelerator
@
require_torch_accelerator
@
require_peft_backend
@
require_peft_backend
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
SD3LoraIntegrationTests
(
unittest
.
TestCase
):
class
SD3LoraIntegrationTests
(
unittest
.
TestCase
):
pipeline_class
=
StableDiffusion3Img2ImgPipeline
pipeline_class
=
StableDiffusion3Img2ImgPipeline
repo_id
=
"stabilityai/stable-diffusion-3-medium-diffusers"
repo_id
=
"stabilityai/stable-diffusion-3-medium-diffusers"
...
...
tests/pipelines/controlnet_flux/test_controlnet_flux.py
View file @
cbc8ced2
...
@@ -17,7 +17,6 @@ import gc
...
@@ -17,7 +17,6 @@ import gc
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
huggingface_hub
import
hf_hub_download
from
huggingface_hub
import
hf_hub_download
from
transformers
import
CLIPTextConfig
,
CLIPTextModel
,
CLIPTokenizer
,
T5EncoderModel
,
T5TokenizerFast
from
transformers
import
CLIPTextConfig
,
CLIPTextModel
,
CLIPTokenizer
,
T5EncoderModel
,
T5TokenizerFast
...
@@ -211,7 +210,6 @@ class FluxControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin, Fl
...
@@ -211,7 +210,6 @@ class FluxControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin, Fl
@
nightly
@
nightly
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
FluxControlNetPipelineSlowTests
(
unittest
.
TestCase
):
class
FluxControlNetPipelineSlowTests
(
unittest
.
TestCase
):
pipeline_class
=
FluxControlNetPipeline
pipeline_class
=
FluxControlNetPipeline
...
...
tests/pipelines/controlnet_sd3/test_controlnet_sd3.py
View file @
cbc8ced2
...
@@ -18,7 +18,6 @@ import unittest
...
@@ -18,7 +18,6 @@ import unittest
from
typing
import
Optional
from
typing
import
Optional
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
...
@@ -221,7 +220,6 @@ class StableDiffusion3ControlNetPipelineFastTests(unittest.TestCase, PipelineTes
...
@@ -221,7 +220,6 @@ class StableDiffusion3ControlNetPipelineFastTests(unittest.TestCase, PipelineTes
@
slow
@
slow
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
StableDiffusion3ControlNetPipelineSlowTests
(
unittest
.
TestCase
):
class
StableDiffusion3ControlNetPipelineSlowTests
(
unittest
.
TestCase
):
pipeline_class
=
StableDiffusion3ControlNetPipeline
pipeline_class
=
StableDiffusion3ControlNetPipeline
...
...
tests/pipelines/flux/test_pipeline_flux.py
View file @
cbc8ced2
...
@@ -2,7 +2,6 @@ import gc
...
@@ -2,7 +2,6 @@ import gc
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
huggingface_hub
import
hf_hub_download
from
huggingface_hub
import
hf_hub_download
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModel
,
CLIPTokenizer
,
T5EncoderModel
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModel
,
CLIPTokenizer
,
T5EncoderModel
...
@@ -224,7 +223,6 @@ class FluxPipelineFastTests(
...
@@ -224,7 +223,6 @@ class FluxPipelineFastTests(
@
nightly
@
nightly
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
FluxPipelineSlowTests
(
unittest
.
TestCase
):
class
FluxPipelineSlowTests
(
unittest
.
TestCase
):
pipeline_class
=
FluxPipeline
pipeline_class
=
FluxPipeline
repo_id
=
"black-forest-labs/FLUX.1-schnell"
repo_id
=
"black-forest-labs/FLUX.1-schnell"
...
@@ -312,7 +310,6 @@ class FluxPipelineSlowTests(unittest.TestCase):
...
@@ -312,7 +310,6 @@ class FluxPipelineSlowTests(unittest.TestCase):
@
slow
@
slow
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
FluxIPAdapterPipelineSlowTests
(
unittest
.
TestCase
):
class
FluxIPAdapterPipelineSlowTests
(
unittest
.
TestCase
):
pipeline_class
=
FluxPipeline
pipeline_class
=
FluxPipeline
repo_id
=
"black-forest-labs/FLUX.1-dev"
repo_id
=
"black-forest-labs/FLUX.1-dev"
...
...
tests/pipelines/flux/test_pipeline_flux_redux.py
View file @
cbc8ced2
...
@@ -2,7 +2,6 @@ import gc
...
@@ -2,7 +2,6 @@ import gc
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
diffusers
import
FluxPipeline
,
FluxPriorReduxPipeline
from
diffusers
import
FluxPipeline
,
FluxPriorReduxPipeline
...
@@ -19,7 +18,6 @@ from diffusers.utils.testing_utils import (
...
@@ -19,7 +18,6 @@ from diffusers.utils.testing_utils import (
@
slow
@
slow
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
FluxReduxSlowTests
(
unittest
.
TestCase
):
class
FluxReduxSlowTests
(
unittest
.
TestCase
):
pipeline_class
=
FluxPriorReduxPipeline
pipeline_class
=
FluxPriorReduxPipeline
repo_id
=
"black-forest-labs/FLUX.1-Redux-dev"
repo_id
=
"black-forest-labs/FLUX.1-Redux-dev"
...
...
tests/pipelines/mochi/test_mochi.py
View file @
cbc8ced2
...
@@ -17,7 +17,6 @@ import inspect
...
@@ -17,7 +17,6 @@ import inspect
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
transformers
import
AutoTokenizer
,
T5EncoderModel
from
transformers
import
AutoTokenizer
,
T5EncoderModel
...
@@ -268,7 +267,6 @@ class MochiPipelineFastTests(PipelineTesterMixin, FasterCacheTesterMixin, unitte
...
@@ -268,7 +267,6 @@ class MochiPipelineFastTests(PipelineTesterMixin, FasterCacheTesterMixin, unitte
@
nightly
@
nightly
@
require_torch_accelerator
@
require_torch_accelerator
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
MochiPipelineIntegrationTests
(
unittest
.
TestCase
):
class
MochiPipelineIntegrationTests
(
unittest
.
TestCase
):
prompt
=
"A painting of a squirrel eating a burger."
prompt
=
"A painting of a squirrel eating a burger."
...
...
tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py
View file @
cbc8ced2
...
@@ -2,7 +2,6 @@ import gc
...
@@ -2,7 +2,6 @@ import gc
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
...
@@ -233,7 +232,6 @@ class StableDiffusion3PipelineFastTests(unittest.TestCase, PipelineTesterMixin):
...
@@ -233,7 +232,6 @@ class StableDiffusion3PipelineFastTests(unittest.TestCase, PipelineTesterMixin):
@
slow
@
slow
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
StableDiffusion3PipelineSlowTests
(
unittest
.
TestCase
):
class
StableDiffusion3PipelineSlowTests
(
unittest
.
TestCase
):
pipeline_class
=
StableDiffusion3Pipeline
pipeline_class
=
StableDiffusion3Pipeline
repo_id
=
"stabilityai/stable-diffusion-3-medium-diffusers"
repo_id
=
"stabilityai/stable-diffusion-3-medium-diffusers"
...
...
tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py
View file @
cbc8ced2
...
@@ -3,7 +3,6 @@ import random
...
@@ -3,7 +3,6 @@ import random
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
pytest
import
torch
import
torch
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
from
transformers
import
AutoTokenizer
,
CLIPTextConfig
,
CLIPTextModelWithProjection
,
CLIPTokenizer
,
T5EncoderModel
...
@@ -168,7 +167,6 @@ class StableDiffusion3Img2ImgPipelineFastTests(PipelineLatentTesterMixin, unitte
...
@@ -168,7 +167,6 @@ class StableDiffusion3Img2ImgPipelineFastTests(PipelineLatentTesterMixin, unitte
@
slow
@
slow
@
require_big_accelerator
@
require_big_accelerator
@
pytest
.
mark
.
big_accelerator
class
StableDiffusion3Img2ImgPipelineSlowTests
(
unittest
.
TestCase
):
class
StableDiffusion3Img2ImgPipelineSlowTests
(
unittest
.
TestCase
):
pipeline_class
=
StableDiffusion3Img2ImgPipeline
pipeline_class
=
StableDiffusion3Img2ImgPipeline
repo_id
=
"stabilityai/stable-diffusion-3-medium-diffusers"
repo_id
=
"stabilityai/stable-diffusion-3-medium-diffusers"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment