Unverified Commit 1070e1a3 authored by Anton Lozhkov's avatar Anton Lozhkov Committed by GitHub
Browse files

[CI] Speed up slow tests (#708)

* [CI] Localize the HF cache

* pip cache

* de-env

* refactor matrix

* fix fast cache

* less onnx steps

* revert

* revert pip cache

* revert pip cache

* remove debugging trigger
parent b35bac4d
...@@ -21,7 +21,7 @@ jobs: ...@@ -21,7 +21,7 @@ jobs:
runs-on: [ self-hosted, docker-gpu ] runs-on: [ self-hosted, docker-gpu ]
container: container:
image: python:3.7 image: python:3.7
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
steps: steps:
- name: Checkout diffusers - name: Checkout diffusers
......
...@@ -15,14 +15,10 @@ env: ...@@ -15,14 +15,10 @@ env:
jobs: jobs:
run_tests_single_gpu: run_tests_single_gpu:
name: Diffusers tests name: Diffusers tests
strategy: runs-on: [ self-hosted, docker-gpu, single-gpu ]
fail-fast: false
matrix:
machine_type: [ single-gpu ]
runs-on: [ self-hosted, docker-gpu, '${{ matrix.machine_type }}' ]
container: container:
image: nvcr.io/nvidia/pytorch:22.07-py3 image: nvcr.io/nvidia/pytorch:22.07-py3
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache
steps: steps:
- name: Checkout diffusers - name: Checkout diffusers
...@@ -66,14 +62,10 @@ jobs: ...@@ -66,14 +62,10 @@ jobs:
run_examples_single_gpu: run_examples_single_gpu:
name: Examples tests name: Examples tests
strategy: runs-on: [ self-hosted, docker-gpu, single-gpu ]
fail-fast: false
matrix:
machine_type: [ single-gpu ]
runs-on: [ self-hosted, docker-gpu, '${{ matrix.machine_type }}' ]
container: container:
image: nvcr.io/nvidia/pytorch:22.07-py3 image: nvcr.io/nvidia/pytorch:22.07-py3
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache
steps: steps:
- name: Checkout diffusers - name: Checkout diffusers
......
...@@ -92,7 +92,7 @@ _deps = [ ...@@ -92,7 +92,7 @@ _deps = [
"jaxlib>=0.1.65,<=0.3.6", "jaxlib>=0.1.65,<=0.3.6",
"modelcards>=0.1.4", "modelcards>=0.1.4",
"numpy", "numpy",
"onnxruntime-gpu", "onnxruntime",
"pytest", "pytest",
"pytest-timeout", "pytest-timeout",
"pytest-xdist", "pytest-xdist",
...@@ -178,7 +178,7 @@ extras["docs"] = deps_list("hf-doc-builder") ...@@ -178,7 +178,7 @@ extras["docs"] = deps_list("hf-doc-builder")
extras["training"] = deps_list("accelerate", "datasets", "tensorboard", "modelcards") extras["training"] = deps_list("accelerate", "datasets", "tensorboard", "modelcards")
extras["test"] = deps_list( extras["test"] = deps_list(
"datasets", "datasets",
"onnxruntime-gpu", "onnxruntime",
"pytest", "pytest",
"pytest-timeout", "pytest-timeout",
"pytest-xdist", "pytest-xdist",
......
...@@ -17,7 +17,7 @@ deps = { ...@@ -17,7 +17,7 @@ deps = {
"jaxlib": "jaxlib>=0.1.65,<=0.3.6", "jaxlib": "jaxlib>=0.1.65,<=0.3.6",
"modelcards": "modelcards>=0.1.4", "modelcards": "modelcards>=0.1.4",
"numpy": "numpy", "numpy": "numpy",
"onnxruntime-gpu": "onnxruntime-gpu", "onnxruntime": "onnxruntime",
"pytest": "pytest", "pytest": "pytest",
"pytest-timeout": "pytest-timeout", "pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist", "pytest-xdist": "pytest-xdist",
......
...@@ -1422,18 +1422,18 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1422,18 +1422,18 @@ class PipelineTesterMixin(unittest.TestCase):
@slow @slow
def test_stable_diffusion_onnx(self): def test_stable_diffusion_onnx(self):
sd_pipe = StableDiffusionOnnxPipeline.from_pretrained( sd_pipe = StableDiffusionOnnxPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CUDAExecutionProvider", use_auth_token=True "CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider", use_auth_token=True
) )
prompt = "A painting of a squirrel eating a burger" prompt = "A painting of a squirrel eating a burger"
np.random.seed(0) np.random.seed(0)
output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=20, output_type="np") output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=5, output_type="np")
image = output.images image = output.images
image_slice = image[0, -3:, -3:, -1] image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3) assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.0385, 0.0252, 0.0234, 0.0287, 0.0358, 0.0287, 0.0276, 0.0235, 0.0010]) expected_slice = np.array([0.3602, 0.3688, 0.3652, 0.3895, 0.3782, 0.3747, 0.3927, 0.4241, 0.4327])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow @slow
...@@ -1592,7 +1592,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1592,7 +1592,7 @@ class PipelineTesterMixin(unittest.TestCase):
assert latents.shape == (1, 4, 64, 64) assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1] latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array( expected_slice = np.array(
[-0.6254, -0.2742, -1.0710, 0.2296, -1.1683, 0.6913, -2.0605, -0.0682, 0.9700] [-0.5950, -0.3039, -1.1672, 0.1594, -1.1572, 0.6719, -1.9712, -0.0403, 0.9592]
) )
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
...@@ -1606,6 +1606,6 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1606,6 +1606,6 @@ class PipelineTesterMixin(unittest.TestCase):
prompt = "Andromeda galaxy in a bottle" prompt = "Andromeda galaxy in a bottle"
np.random.seed(0) np.random.seed(0)
pipe(prompt=prompt, num_inference_steps=50, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1) pipe(prompt=prompt, num_inference_steps=5, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1)
assert test_callback_fn.has_been_called assert test_callback_fn.has_been_called
assert number_of_steps == 51 assert number_of_steps == 6
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment