Unverified Commit 4d39b748 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

Memory clean up on all Slow Tests (#7514)



* update

* update

---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
parent fac76169
......@@ -37,6 +37,12 @@ enable_full_determinism()
class SafeDiffusionPipelineFastTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......@@ -267,6 +273,12 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
@nightly
@require_torch_gpu
class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -224,6 +224,12 @@ class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
@nightly
@require_torch_gpu
class ShapEPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -250,6 +250,12 @@ class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
@nightly
@require_torch_gpu
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -311,6 +311,12 @@ class StableCascadeDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCa
@slow
@require_torch_gpu
class StableCascadeDecoderPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -313,6 +313,12 @@ class StableCascadePriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase
@slow
@require_torch_gpu
class StableCascadePriorPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -1244,6 +1244,11 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
@slow
@require_torch_gpu
class StableDiffusionPipelineCkptTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......@@ -1338,6 +1343,11 @@ class StableDiffusionPipelineCkptTests(unittest.TestCase):
@nightly
@require_torch_gpu
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -395,6 +395,11 @@ class StableDiffusionImg2ImgPipelineFastTests(
@slow
@require_torch_gpu
class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......@@ -654,6 +659,11 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
@nightly
@require_torch_gpu
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -1089,6 +1089,11 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
@nightly
@require_torch_gpu
class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -271,6 +271,11 @@ class StableDiffusionInstructPix2PixPipelineFastTests(
@slow
@require_torch_gpu
class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -205,6 +205,11 @@ class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase):
super().tearDownClass()
torch.use_deterministic_algorithms(True)
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -378,6 +378,11 @@ class StableDiffusionDepth2ImgPipelineFastTests(
@slow
@require_torch_gpu
class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......@@ -517,6 +522,11 @@ class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
@nightly
@require_torch_gpu
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -293,6 +293,11 @@ class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, Pipeli
@require_torch_gpu
@nightly
class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......@@ -365,6 +370,11 @@ class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
@nightly
@require_torch_gpu
class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -156,6 +156,12 @@ class StableDiffusion2InpaintPipelineFastTests(
@slow
@require_torch_gpu
class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -243,6 +243,11 @@ class StableDiffusionLatentUpscalePipelineFastTests(
@require_torch_gpu
@slow
class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -40,6 +40,12 @@ enable_full_determinism()
class StableDiffusionUpscalePipelineFastTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......@@ -377,6 +383,12 @@ class StableDiffusionUpscalePipelineFastTests(unittest.TestCase):
@slow
@require_torch_gpu
class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -45,6 +45,12 @@ enable_full_determinism()
class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......@@ -254,6 +260,12 @@ class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase):
@slow
@require_torch_gpu
class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -593,6 +593,11 @@ class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterM
@slow
@require_torch_gpu
class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -164,6 +164,11 @@ class StableDiffusionImageVariationPipelineFastTests(
@slow
@require_torch_gpu
class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......@@ -274,6 +279,11 @@ class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase):
@nightly
@require_torch_gpu
class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
......@@ -29,6 +29,12 @@ enable_full_determinism()
@nightly
@require_torch_gpu
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -207,6 +207,11 @@ class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase):
@nightly
@require_torch_gpu
class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......@@ -253,6 +258,11 @@ class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
@nightly
@require_torch_gpu
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment