"git@developer.sourcefind.cn:change/sglang.git" did not exist on "2c05f81f157fdd5e532baea78bb0121a0ba2c1a0"
Unverified Commit 1d1e1a28 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

Some minor updates to the nightly and push workflows (#9759)

* move lora integration tests to nightly./

* remove slow marker in the workflow where not needed.
parent 24c7d578
......@@ -81,7 +81,7 @@ jobs:
- name: Environment
run: |
python utils/print_env.py
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
- name: PyTorch CUDA checkpoint tests on Ubuntu
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
......@@ -184,7 +184,7 @@ jobs:
run: |
python utils/print_env.py
- name: Run slow Flax TPU tests
- name: Run Flax TPU tests
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
run: |
......@@ -232,7 +232,7 @@ jobs:
run: |
python utils/print_env.py
- name: Run slow ONNXRuntime CUDA tests
- name: Run ONNXRuntime CUDA tests
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
run: |
......
......@@ -27,6 +27,7 @@ from diffusers import FlowMatchEulerDiscreteScheduler, FluxPipeline, FluxTransfo
from diffusers.utils.testing_utils import (
floats_tensor,
is_peft_available,
nightly,
numpy_cosine_similarity_distance,
require_peft_backend,
require_torch_gpu,
......@@ -165,9 +166,10 @@ class FluxLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@slow
@nightly
@require_torch_gpu
@require_peft_backend
# @unittest.skip("We cannot run inference on this model with the current CI hardware")
@unittest.skip("We cannot run inference on this model with the current CI hardware")
# TODO (DN6, sayakpaul): move these tests to a beefier GPU
class FluxLoRAIntegrationTests(unittest.TestCase):
"""internal note: The integration slices were obtained on audace.
......
......@@ -34,6 +34,7 @@ from diffusers import (
from diffusers.utils.import_utils import is_accelerate_available
from diffusers.utils.testing_utils import (
load_image,
nightly,
numpy_cosine_similarity_distance,
require_peft_backend,
require_torch_gpu,
......@@ -207,6 +208,7 @@ class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
@slow
@nightly
@require_torch_gpu
@require_peft_backend
class LoraIntegrationTests(unittest.TestCase):
......
......@@ -113,6 +113,7 @@ class StableDiffusionXLLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
@slow
@nightly
@require_torch_gpu
@require_peft_backend
class LoraSDXLIntegrationTests(unittest.TestCase):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment