Unverified Commit 61719bf2 authored by Anton Lozhkov's avatar Anton Lozhkov Committed by GitHub
Browse files

Fix gpu_id (#1326)

parent b3911f89
...@@ -178,7 +178,7 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline): ...@@ -178,7 +178,7 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline):
self.enable_attention_slicing(None) self.enable_attention_slicing(None)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.enable_sequential_cpu_offload # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.enable_sequential_cpu_offload
def enable_sequential_cpu_offload(self): def enable_sequential_cpu_offload(self, gpu_id=0):
r""" r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
...@@ -189,7 +189,7 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline): ...@@ -189,7 +189,7 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline):
else: else:
raise ImportError("Please install accelerate via `pip install accelerate`") raise ImportError("Please install accelerate via `pip install accelerate`")
device = torch.device("cuda") device = torch.device(f"cuda:{gpu_id}")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None: if cpu_offloaded_model is not None:
......
...@@ -209,7 +209,7 @@ class CycleDiffusionPipeline(DiffusionPipeline): ...@@ -209,7 +209,7 @@ class CycleDiffusionPipeline(DiffusionPipeline):
self.enable_attention_slicing(None) self.enable_attention_slicing(None)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
def enable_sequential_cpu_offload(self): def enable_sequential_cpu_offload(self, gpu_id=0):
r""" r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
......
...@@ -176,7 +176,7 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline): ...@@ -176,7 +176,7 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline):
self.enable_attention_slicing(None) self.enable_attention_slicing(None)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
def enable_sequential_cpu_offload(self): def enable_sequential_cpu_offload(self, gpu_id=0):
r""" r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
......
...@@ -169,7 +169,7 @@ class StableDiffusionInpaintPipeline(DiffusionPipeline): ...@@ -169,7 +169,7 @@ class StableDiffusionInpaintPipeline(DiffusionPipeline):
self.enable_attention_slicing(None) self.enable_attention_slicing(None)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
def enable_sequential_cpu_offload(self): def enable_sequential_cpu_offload(self, gpu_id=0):
r""" r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
......
...@@ -189,7 +189,7 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline): ...@@ -189,7 +189,7 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline):
self.enable_attention_slicing(None) self.enable_attention_slicing(None)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
def enable_sequential_cpu_offload(self): def enable_sequential_cpu_offload(self, gpu_id=0):
r""" r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment