Unverified Commit 4d39b748 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

Memory clean up on all Slow Tests (#7514)



* update

* update

---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
parent fac76169
...@@ -253,6 +253,11 @@ class StableDiffusionPanoramaPipelineFastTests( ...@@ -253,6 +253,11 @@ class StableDiffusionPanoramaPipelineFastTests(
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class StableDiffusionPanoramaNightlyTests(unittest.TestCase): class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
super().tearDown() super().tearDown()
gc.collect() gc.collect()
......
...@@ -28,6 +28,12 @@ from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_ ...@@ -28,6 +28,12 @@ from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_
class SafeDiffusionPipelineFastTests(unittest.TestCase): class SafeDiffusionPipelineFastTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
...@@ -258,6 +264,12 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase): ...@@ -258,6 +264,12 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class SafeDiffusionPipelineIntegrationTests(unittest.TestCase): class SafeDiffusionPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -146,6 +146,12 @@ class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTes ...@@ -146,6 +146,12 @@ class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTes
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class StableDiffusionPipelineIntegrationTests(unittest.TestCase): class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -1011,6 +1011,11 @@ class StableDiffusionXLPipelineFastTests( ...@@ -1011,6 +1011,11 @@ class StableDiffusionXLPipelineFastTests(
@slow @slow
class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
super().tearDown() super().tearDown()
gc.collect() gc.collect()
......
...@@ -683,6 +683,11 @@ class StableDiffusionXLMultiAdapterPipelineFastTests( ...@@ -683,6 +683,11 @@ class StableDiffusionXLMultiAdapterPipelineFastTests(
@slow @slow
@require_torch_gpu @require_torch_gpu
class AdapterSDXLPipelineSlowTests(unittest.TestCase): class AdapterSDXLPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
super().tearDown() super().tearDown()
gc.collect() gc.collect()
......
...@@ -31,6 +31,12 @@ enable_full_determinism() ...@@ -31,6 +31,12 @@ enable_full_determinism()
class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase): class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
dtype = torch.float16 dtype = torch.float16
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -188,6 +188,12 @@ class StableUnCLIPPipelineFastTests( ...@@ -188,6 +188,12 @@ class StableUnCLIPPipelineFastTests(
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class StableUnCLIPPipelineIntegrationTests(unittest.TestCase): class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -209,6 +209,12 @@ class StableUnCLIPImg2ImgPipelineFastTests( ...@@ -209,6 +209,12 @@ class StableUnCLIPImg2ImgPipelineFastTests(
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase): class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -516,6 +516,12 @@ class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa ...@@ -516,6 +516,12 @@ class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa
@slow @slow
@require_torch_gpu @require_torch_gpu
class StableVideoDiffusionPipelineSlowTests(unittest.TestCase): class StableVideoDiffusionPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -1056,6 +1056,12 @@ class CustomPipelineTests(unittest.TestCase): ...@@ -1056,6 +1056,12 @@ class CustomPipelineTests(unittest.TestCase):
class PipelineFastTests(unittest.TestCase): class PipelineFastTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
...@@ -1673,6 +1679,12 @@ class PipelineFastTests(unittest.TestCase): ...@@ -1673,6 +1679,12 @@ class PipelineFastTests(unittest.TestCase):
@slow @slow
@require_torch_gpu @require_torch_gpu
class PipelineSlowTests(unittest.TestCase): class PipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
...@@ -1898,6 +1910,12 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -1898,6 +1910,12 @@ class PipelineSlowTests(unittest.TestCase):
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class PipelineNightlyTests(unittest.TestCase): class PipelineNightlyTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -421,6 +421,12 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -421,6 +421,12 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
@nightly @nightly
class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase): class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
...@@ -453,6 +459,12 @@ class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase): ...@@ -453,6 +459,12 @@ class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase):
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class UnCLIPPipelineIntegrationTests(unittest.TestCase): class UnCLIPPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -496,6 +496,12 @@ class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCa ...@@ -496,6 +496,12 @@ class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCa
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase): class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
# clean up the VRAM after each test # clean up the VRAM after each test
super().tearDown() super().tearDown()
......
...@@ -574,6 +574,11 @@ class UniDiffuserPipelineFastTests( ...@@ -574,6 +574,11 @@ class UniDiffuserPipelineFastTests(
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class UniDiffuserPipelineSlowTests(unittest.TestCase): class UniDiffuserPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
super().tearDown() super().tearDown()
gc.collect() gc.collect()
...@@ -690,6 +695,11 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase): ...@@ -690,6 +695,11 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
@nightly @nightly
@require_torch_gpu @require_torch_gpu
class UniDiffuserPipelineNightlyTests(unittest.TestCase): class UniDiffuserPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self): def tearDown(self):
super().tearDown() super().tearDown()
gc.collect() gc.collect()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment