Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
79a7ab92
"src/libtorchaudio/utils.cpp" did not exist on "3f02b898ecfa27cbeced1282f1ac7b25466d3877"
Unverified
Commit
79a7ab92
authored
Dec 07, 2023
by
Dhruv Nair
Committed by
GitHub
Dec 07, 2023
Browse files
Fix clearing backend cache from device agnostic testing (#6075)
update
parent
c2717317
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
7 additions
and
7 deletions
+7
-7
tests/models/test_models_prior.py
tests/models/test_models_prior.py
+1
-1
tests/models/test_models_unet_2d_condition.py
tests/models/test_models_unet_2d_condition.py
+1
-1
tests/models/test_models_vae.py
tests/models/test_models_vae.py
+3
-3
tests/pipelines/stable_diffusion_2/test_stable_diffusion.py
tests/pipelines/stable_diffusion_2/test_stable_diffusion.py
+2
-2
No files found.
tests/models/test_models_prior.py
View file @
79a7ab92
...
...
@@ -164,7 +164,7 @@ class PriorTransformerIntegrationTests(unittest.TestCase):
# clean up the VRAM after each test
super
().
tearDown
()
gc
.
collect
()
backend_empty_cache
()
backend_empty_cache
(
torch_device
)
@
parameterized
.
expand
(
[
...
...
tests/models/test_models_unet_2d_condition.py
View file @
79a7ab92
...
...
@@ -869,7 +869,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
# clean up the VRAM after each test
super
().
tearDown
()
gc
.
collect
()
backend_empty_cache
()
backend_empty_cache
(
torch_device
)
def
get_latents
(
self
,
seed
=
0
,
shape
=
(
4
,
4
,
64
,
64
),
fp16
=
False
):
dtype
=
torch
.
float16
if
fp16
else
torch
.
float32
...
...
tests/models/test_models_vae.py
View file @
79a7ab92
...
...
@@ -485,7 +485,7 @@ class AutoencoderTinyIntegrationTests(unittest.TestCase):
# clean up the VRAM after each test
super
().
tearDown
()
gc
.
collect
()
backend_empty_cache
()
backend_empty_cache
(
torch_device
)
def
get_file_format
(
self
,
seed
,
shape
):
return
f
"gaussian_noise_s=
{
seed
}
_shape=
{
'_'
.
join
([
str
(
s
)
for
s
in
shape
])
}
.npy"
...
...
@@ -565,7 +565,7 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
# clean up the VRAM after each test
super
().
tearDown
()
gc
.
collect
()
backend_empty_cache
()
backend_empty_cache
(
torch_device
)
def
get_sd_image
(
self
,
seed
=
0
,
shape
=
(
4
,
3
,
512
,
512
),
fp16
=
False
):
dtype
=
torch
.
float16
if
fp16
else
torch
.
float32
...
...
@@ -820,7 +820,7 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase):
# clean up the VRAM after each test
super
().
tearDown
()
gc
.
collect
()
backend_empty_cache
()
backend_empty_cache
(
torch_device
)
def
get_sd_image
(
self
,
seed
=
0
,
shape
=
(
4
,
3
,
512
,
512
),
fp16
=
False
):
dtype
=
torch
.
float16
if
fp16
else
torch
.
float32
...
...
tests/pipelines/stable_diffusion_2/test_stable_diffusion.py
View file @
79a7ab92
...
...
@@ -310,7 +310,7 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase):
def
tearDown
(
self
):
super
().
tearDown
()
gc
.
collect
()
backend_empty_cache
()
backend_empty_cache
(
torch_device
)
def
get_inputs
(
self
,
device
,
generator_device
=
"cpu"
,
dtype
=
torch
.
float32
,
seed
=
0
):
_generator_device
=
"cpu"
if
not
generator_device
.
startswith
(
"cuda"
)
else
"cuda"
...
...
@@ -531,7 +531,7 @@ class StableDiffusion2PipelineNightlyTests(unittest.TestCase):
def
tearDown
(
self
):
super
().
tearDown
()
gc
.
collect
()
backend_empty_cache
()
backend_empty_cache
(
torch_device
)
def
get_inputs
(
self
,
device
,
generator_device
=
"cpu"
,
dtype
=
torch
.
float32
,
seed
=
0
):
_generator_device
=
"cpu"
if
not
generator_device
.
startswith
(
"cuda"
)
else
"cuda"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment