Unverified Commit 51fd3dd2 authored by M. Tolga Cangöz's avatar M. Tolga Cangöz Committed by GitHub
Browse files

[`Docs`] Remove `.to('cuda')` before `.enable_model_cpu_offload()` (#5795)

Remove .to('cuda') before cpu_offload, trim trailing whitespaces
parent 98457580
...@@ -86,7 +86,7 @@ import torch ...@@ -86,7 +86,7 @@ import torch
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16, use_safetensors=True) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16, use_safetensors=True)
pipe = StableDiffusionControlNetPipeline.from_pretrained( pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True
).to("cuda") )
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload() pipe.enable_model_cpu_offload()
...@@ -146,7 +146,7 @@ import torch ...@@ -146,7 +146,7 @@ import torch
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, use_safetensors=True) controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, use_safetensors=True)
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True
).to("cuda") )
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload() pipe.enable_model_cpu_offload()
...@@ -231,7 +231,7 @@ import torch ...@@ -231,7 +231,7 @@ import torch
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16, use_safetensors=True) controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16, use_safetensors=True)
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True
).to("cuda") )
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload() pipe.enable_model_cpu_offload()
......
...@@ -165,7 +165,7 @@ from diffusers import StableDiffusionDiffEditPipeline ...@@ -165,7 +165,7 @@ from diffusers import StableDiffusionDiffEditPipeline
pipeline = StableDiffusionDiffEditPipeline.from_pretrained( pipeline = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, use_safetensors=True "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
pipeline.enable_vae_slicing() pipeline.enable_vae_slicing()
......
...@@ -27,7 +27,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -27,7 +27,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -79,7 +79,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -79,7 +79,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -117,7 +117,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -117,7 +117,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -157,7 +157,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -157,7 +157,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -204,7 +204,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -204,7 +204,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -248,7 +248,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -248,7 +248,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -290,7 +290,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -290,7 +290,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -335,7 +335,7 @@ from diffusers.utils import make_image_grid ...@@ -335,7 +335,7 @@ from diffusers.utils import make_image_grid
pipeline = AutoPipelineForText2Image.from_pretrained( pipeline = AutoPipelineForText2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -349,7 +349,7 @@ Now you can pass this generated image to the image-to-image pipeline: ...@@ -349,7 +349,7 @@ Now you can pass this generated image to the image-to-image pipeline:
```py ```py
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -371,7 +371,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -371,7 +371,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -397,7 +397,7 @@ Pass the latent output from this pipeline to the next pipeline to generate an im ...@@ -397,7 +397,7 @@ Pass the latent output from this pipeline to the next pipeline to generate an im
```py ```py
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"ogkalu/Comic-Diffusion", torch_dtype=torch.float16 "ogkalu/Comic-Diffusion", torch_dtype=torch.float16
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -411,7 +411,7 @@ Repeat one more time to generate the final image in a [pixel art style](https:// ...@@ -411,7 +411,7 @@ Repeat one more time to generate the final image in a [pixel art style](https://
```py ```py
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"kohbanye/pixel-art-style", torch_dtype=torch.float16 "kohbanye/pixel-art-style", torch_dtype=torch.float16
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -434,7 +434,7 @@ from diffusers.utils import make_image_grid, load_image ...@@ -434,7 +434,7 @@ from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -462,7 +462,7 @@ from diffusers import StableDiffusionLatentUpscalePipeline ...@@ -462,7 +462,7 @@ from diffusers import StableDiffusionLatentUpscalePipeline
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
upscaler.enable_model_cpu_offload() upscaler.enable_model_cpu_offload()
upscaler.enable_xformers_memory_efficient_attention() upscaler.enable_xformers_memory_efficient_attention()
...@@ -476,7 +476,7 @@ from diffusers import StableDiffusionUpscalePipeline ...@@ -476,7 +476,7 @@ from diffusers import StableDiffusionUpscalePipeline
super_res = StableDiffusionUpscalePipeline.from_pretrained( super_res = StableDiffusionUpscalePipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
super_res.enable_model_cpu_offload() super_res.enable_model_cpu_offload()
super_res.enable_xformers_memory_efficient_attention() super_res.enable_xformers_memory_efficient_attention()
...@@ -500,7 +500,7 @@ import torch ...@@ -500,7 +500,7 @@ import torch
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -537,7 +537,7 @@ import torch ...@@ -537,7 +537,7 @@ import torch
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -571,7 +571,7 @@ Let's apply a new [style](https://huggingface.co/nitrosocke/elden-ring-diffusion ...@@ -571,7 +571,7 @@ Let's apply a new [style](https://huggingface.co/nitrosocke/elden-ring-diffusion
```py ```py
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, "nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16,
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
......
...@@ -27,7 +27,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -27,7 +27,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -98,7 +98,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -98,7 +98,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -124,7 +124,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -124,7 +124,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16" "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -150,7 +150,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -150,7 +150,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -379,7 +379,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -379,7 +379,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -424,7 +424,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -424,7 +424,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -464,7 +464,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -464,7 +464,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -503,7 +503,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -503,7 +503,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForText2Image.from_pretrained( pipeline = AutoPipelineForText2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -522,7 +522,7 @@ And let's inpaint the masked area with a waterfall: ...@@ -522,7 +522,7 @@ And let's inpaint the masked area with a waterfall:
```py ```py
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -556,7 +556,7 @@ from diffusers.utils import load_image, make_image_grid ...@@ -556,7 +556,7 @@ from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -577,7 +577,7 @@ Now let's pass the image to another inpainting pipeline with SDXL's refiner mode ...@@ -577,7 +577,7 @@ Now let's pass the image to another inpainting pipeline with SDXL's refiner mode
```py ```py
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16" "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -636,7 +636,7 @@ from diffusers.utils import make_image_grid ...@@ -636,7 +636,7 @@ from diffusers.utils import make_image_grid
pipeline = AutoPipelineForInpainting.from_pretrained( pipeline = AutoPipelineForInpainting.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16,
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -667,7 +667,7 @@ controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpai ...@@ -667,7 +667,7 @@ controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpai
# pass ControlNet to the pipeline # pass ControlNet to the pipeline
pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained( pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16" "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16"
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
...@@ -705,7 +705,7 @@ from diffusers import AutoPipelineForImage2Image ...@@ -705,7 +705,7 @@ from diffusers import AutoPipelineForImage2Image
pipeline = AutoPipelineForImage2Image.from_pretrained( pipeline = AutoPipelineForImage2Image.from_pretrained(
"nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, "nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16,
).to("cuda") )
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_xformers_memory_efficient_attention()
......
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Kandinsky # Kandinsky
[[open-in-colab]] [[open-in-colab]]
...@@ -91,7 +103,7 @@ Use the [`AutoPipelineForText2Image`] to automatically call the combined pipelin ...@@ -91,7 +103,7 @@ Use the [`AutoPipelineForText2Image`] to automatically call the combined pipelin
from diffusers import AutoPipelineForText2Image from diffusers import AutoPipelineForText2Image
import torch import torch
pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16).to("cuda") pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting"
...@@ -107,7 +119,7 @@ image = pipeline(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_ ...@@ -107,7 +119,7 @@ image = pipeline(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_
from diffusers import AutoPipelineForText2Image from diffusers import AutoPipelineForText2Image
import torch import torch
pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16).to("cuda") pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting"
...@@ -217,7 +229,7 @@ from io import BytesIO ...@@ -217,7 +229,7 @@ from io import BytesIO
from PIL import Image from PIL import Image
import os import os
pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True)
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
prompt = "A fantasy landscape, Cinematic lighting" prompt = "A fantasy landscape, Cinematic lighting"
...@@ -243,7 +255,7 @@ from io import BytesIO ...@@ -243,7 +255,7 @@ from io import BytesIO
from PIL import Image from PIL import Image
import os import os
pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16).to("cuda") pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
pipeline.enable_model_cpu_offload() pipeline.enable_model_cpu_offload()
prompt = "A fantasy landscape, Cinematic lighting" prompt = "A fantasy landscape, Cinematic lighting"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment