"git@developer.sourcefind.cn:OpenDAS/mmcv.git" did not exist on "630b747cb144a9634a83785bae8f99c089851bc5"
Unverified Commit 4f5e3e35 authored by Ameer Azam's avatar Ameer Azam Committed by GitHub
Browse files

Regarding the RunwayML path for V1.5 did change to...

Regarding the RunwayML path for V1.5 did change to stable-diffusion-v1-5/[stable-diffusion-v1-5/ stable-diffusion-inpainting] (#10476)

* Update pipeline_controlnet.py

* Update pipeline_controlnet_img2img.py

runwayml Take-down so change all from to this
stable-diffusion-v1-5/stable-diffusion-v1-5

* Update pipeline_controlnet_inpaint.py

* runwayml take-down make change to sd-legacy

* runwayml take-down make change to sd-legacy

* runwayml take-down make change to sd-legacy

* runwayml take-down make change to sd-legacy

* Update convert_blipdiffusion_to_diffusers.py

style change
parent 8f2253c5
...@@ -160,7 +160,7 @@ to trigger concept `{key}` → use `{tokens}` in your prompt \n ...@@ -160,7 +160,7 @@ to trigger concept `{key}` → use `{tokens}` in your prompt \n
from diffusers import AutoPipelineForText2Image from diffusers import AutoPipelineForText2Image
import torch import torch
{diffusers_imports_pivotal} {diffusers_imports_pivotal}
pipeline = AutoPipelineForText2Image.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda') pipeline = AutoPipelineForText2Image.from_pretrained('stable-diffusion-v1-5/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')
pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors') pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')
{diffusers_example_pivotal} {diffusers_example_pivotal}
image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0] image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0]
......
...@@ -303,10 +303,9 @@ def save_blip_diffusion_model(model, args): ...@@ -303,10 +303,9 @@ def save_blip_diffusion_model(model, args):
qformer = get_qformer(model) qformer = get_qformer(model)
qformer.eval() qformer.eval()
text_encoder = ContextCLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder") text_encoder = ContextCLIPTextModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="text_encoder")
vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae") vae = AutoencoderKL.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="vae")
unet = UNet2DConditionModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet")
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet")
vae.eval() vae.eval()
text_encoder.eval() text_encoder.eval()
scheduler = PNDMScheduler( scheduler = PNDMScheduler(
...@@ -316,7 +315,7 @@ def save_blip_diffusion_model(model, args): ...@@ -316,7 +315,7 @@ def save_blip_diffusion_model(model, args):
set_alpha_to_one=False, set_alpha_to_one=False,
skip_prk_steps=True, skip_prk_steps=True,
) )
tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer") tokenizer = CLIPTokenizer.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="tokenizer")
image_processor = BlipImageProcessor() image_processor = BlipImageProcessor()
blip_diffusion = BlipDiffusionPipeline( blip_diffusion = BlipDiffusionPipeline(
tokenizer=tokenizer, tokenizer=tokenizer,
......
...@@ -329,7 +329,7 @@ class FromSingleFileMixin: ...@@ -329,7 +329,7 @@ class FromSingleFileMixin:
>>> # Enable float16 and move to GPU >>> # Enable float16 and move to GPU
>>> pipeline = StableDiffusionPipeline.from_single_file( >>> pipeline = StableDiffusionPipeline.from_single_file(
... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt", ... "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
... torch_dtype=torch.float16, ... torch_dtype=torch.float16,
... ) ... )
>>> pipeline.to("cuda") >>> pipeline.to("cuda")
......
...@@ -333,7 +333,7 @@ class TextualInversionLoaderMixin: ...@@ -333,7 +333,7 @@ class TextualInversionLoaderMixin:
from diffusers import StableDiffusionPipeline from diffusers import StableDiffusionPipeline
import torch import torch
model_id = "runwayml/stable-diffusion-v1-5" model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
pipe.load_textual_inversion("sd-concepts-library/cat-toy") pipe.load_textual_inversion("sd-concepts-library/cat-toy")
...@@ -352,7 +352,7 @@ class TextualInversionLoaderMixin: ...@@ -352,7 +352,7 @@ class TextualInversionLoaderMixin:
from diffusers import StableDiffusionPipeline from diffusers import StableDiffusionPipeline
import torch import torch
model_id = "runwayml/stable-diffusion-v1-5" model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2") pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
...@@ -469,7 +469,7 @@ class TextualInversionLoaderMixin: ...@@ -469,7 +469,7 @@ class TextualInversionLoaderMixin:
from diffusers import AutoPipelineForText2Image from diffusers import AutoPipelineForText2Image
import torch import torch
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5") pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
# Example 1 # Example 1
pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork")
......
...@@ -60,7 +60,7 @@ class ConsistencyDecoderVAE(ModelMixin, ConfigMixin): ...@@ -60,7 +60,7 @@ class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
>>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16) >>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
>>> pipe = StableDiffusionPipeline.from_pretrained( >>> pipe = StableDiffusionPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16 ... "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
... ).to("cuda") ... ).to("cuda")
>>> image = pipe("horse", generator=torch.manual_seed(0)).images[0] >>> image = pipe("horse", generator=torch.manual_seed(0)).images[0]
......
...@@ -293,7 +293,7 @@ class AutoPipelineForText2Image(ConfigMixin): ...@@ -293,7 +293,7 @@ class AutoPipelineForText2Image(ConfigMixin):
If you get the error message below, you need to finetune the weights for your downstream task: If you get the error message below, you need to finetune the weights for your downstream task:
``` ```
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
``` ```
...@@ -385,7 +385,7 @@ class AutoPipelineForText2Image(ConfigMixin): ...@@ -385,7 +385,7 @@ class AutoPipelineForText2Image(ConfigMixin):
```py ```py
>>> from diffusers import AutoPipelineForText2Image >>> from diffusers import AutoPipelineForText2Image
>>> pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
>>> image = pipeline(prompt).images[0] >>> image = pipeline(prompt).images[0]
``` ```
""" """
...@@ -448,7 +448,7 @@ class AutoPipelineForText2Image(ConfigMixin): ...@@ -448,7 +448,7 @@ class AutoPipelineForText2Image(ConfigMixin):
>>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
>>> pipe_i2i = AutoPipelineForImage2Image.from_pretrained( >>> pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False ... "stable-diffusion-v1-5/stable-diffusion-v1-5", requires_safety_checker=False
... ) ... )
>>> pipe_t2i = AutoPipelineForText2Image.from_pipe(pipe_i2i) >>> pipe_t2i = AutoPipelineForText2Image.from_pipe(pipe_i2i)
...@@ -589,7 +589,7 @@ class AutoPipelineForImage2Image(ConfigMixin): ...@@ -589,7 +589,7 @@ class AutoPipelineForImage2Image(ConfigMixin):
If you get the error message below, you need to finetune the weights for your downstream task: If you get the error message below, you need to finetune the weights for your downstream task:
``` ```
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
``` ```
...@@ -681,7 +681,7 @@ class AutoPipelineForImage2Image(ConfigMixin): ...@@ -681,7 +681,7 @@ class AutoPipelineForImage2Image(ConfigMixin):
```py ```py
>>> from diffusers import AutoPipelineForImage2Image >>> from diffusers import AutoPipelineForImage2Image
>>> pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline = AutoPipelineForImage2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
>>> image = pipeline(prompt, image).images[0] >>> image = pipeline(prompt, image).images[0]
``` ```
""" """
...@@ -756,7 +756,7 @@ class AutoPipelineForImage2Image(ConfigMixin): ...@@ -756,7 +756,7 @@ class AutoPipelineForImage2Image(ConfigMixin):
>>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
>>> pipe_t2i = AutoPipelineForText2Image.from_pretrained( >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained(
... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False ... "stable-diffusion-v1-5/stable-diffusion-v1-5", requires_safety_checker=False
... ) ... )
>>> pipe_i2i = AutoPipelineForImage2Image.from_pipe(pipe_t2i) >>> pipe_i2i = AutoPipelineForImage2Image.from_pipe(pipe_t2i)
...@@ -900,7 +900,7 @@ class AutoPipelineForInpainting(ConfigMixin): ...@@ -900,7 +900,7 @@ class AutoPipelineForInpainting(ConfigMixin):
If you get the error message below, you need to finetune the weights for your downstream task: If you get the error message below, you need to finetune the weights for your downstream task:
``` ```
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
``` ```
...@@ -992,7 +992,7 @@ class AutoPipelineForInpainting(ConfigMixin): ...@@ -992,7 +992,7 @@ class AutoPipelineForInpainting(ConfigMixin):
```py ```py
>>> from diffusers import AutoPipelineForInpainting >>> from diffusers import AutoPipelineForInpainting
>>> pipeline = AutoPipelineForInpainting.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline = AutoPipelineForInpainting.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
>>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0]
``` ```
""" """
......
...@@ -80,7 +80,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -80,7 +80,7 @@ EXAMPLE_DOC_STRING = """
>>> # load control net and stable diffusion v1-5 >>> # load control net and stable diffusion v1-5
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
>>> pipe = StableDiffusionControlNetPipeline.from_pretrained( >>> pipe = StableDiffusionControlNetPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
... ) ... )
>>> # speed up diffusion process with faster scheduler and memory optimization >>> # speed up diffusion process with faster scheduler and memory optimization
...@@ -198,7 +198,7 @@ class StableDiffusionControlNetPipeline( ...@@ -198,7 +198,7 @@ class StableDiffusionControlNetPipeline(
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]): safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
......
...@@ -71,7 +71,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -71,7 +71,7 @@ EXAMPLE_DOC_STRING = """
>>> # load control net and stable diffusion v1-5 >>> # load control net and stable diffusion v1-5
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
... ) ... )
>>> # speed up diffusion process with faster scheduler and memory optimization >>> # speed up diffusion process with faster scheduler and memory optimization
...@@ -168,7 +168,7 @@ class StableDiffusionControlNetImg2ImgPipeline( ...@@ -168,7 +168,7 @@ class StableDiffusionControlNetImg2ImgPipeline(
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]): safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
......
...@@ -83,7 +83,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -83,7 +83,7 @@ EXAMPLE_DOC_STRING = """
... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16 ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
... ) ... )
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
... ) ... )
>>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
...@@ -141,9 +141,9 @@ class StableDiffusionControlNetInpaintPipeline( ...@@ -141,9 +141,9 @@ class StableDiffusionControlNetInpaintPipeline(
<Tip> <Tip>
This pipeline can be used with checkpoints that have been specifically fine-tuned for inpainting This pipeline can be used with checkpoints that have been specifically fine-tuned for inpainting
([runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting)) as well as ([stable-diffusion-v1-5/stable-diffusion-inpainting](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting)) as well as
default text-to-image Stable Diffusion checkpoints default text-to-image Stable Diffusion checkpoints
([runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)). Default text-to-image ([stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)). Default text-to-image
Stable Diffusion checkpoints might be preferable for ControlNets that have been fine-tuned on those, such as Stable Diffusion checkpoints might be preferable for ControlNets that have been fine-tuned on those, such as
[lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint). [lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint).
...@@ -167,7 +167,7 @@ class StableDiffusionControlNetInpaintPipeline( ...@@ -167,7 +167,7 @@ class StableDiffusionControlNetInpaintPipeline(
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]): safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
......
...@@ -1622,7 +1622,7 @@ class StableDiffusionXLControlNetInpaintPipeline( ...@@ -1622,7 +1622,7 @@ class StableDiffusionXLControlNetInpaintPipeline(
# 8. Check that sizes of mask, masked image and latents match # 8. Check that sizes of mask, masked image and latents match
if num_channels_unet == 9: if num_channels_unet == 9:
# default case for runwayml/stable-diffusion-inpainting # default case for stable-diffusion-v1-5/stable-diffusion-inpainting
num_channels_mask = mask.shape[1] num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1] num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
......
...@@ -75,7 +75,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -75,7 +75,7 @@ EXAMPLE_DOC_STRING = """
... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32 ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32
... ) ... )
>>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32 ... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32
... ) ... )
>>> params["controlnet"] = controlnet_params >>> params["controlnet"] = controlnet_params
...@@ -132,7 +132,7 @@ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline): ...@@ -132,7 +132,7 @@ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline):
[`FlaxDPMSolverMultistepScheduler`]. [`FlaxDPMSolverMultistepScheduler`].
safety_checker ([`FlaxStableDiffusionSafetyChecker`]): safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
......
...@@ -237,14 +237,14 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -237,14 +237,14 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
If you get the error message below, you need to finetune the weights for your downstream task: If you get the error message below, you need to finetune the weights for your downstream task:
``` ```
Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
``` ```
Parameters: Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
Can be either: Can be either:
- A string, the *repo id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained pipeline - A string, the *repo id* (for example `stable-diffusion-v1-5/stable-diffusion-v1-5`) of a pretrained pipeline
hosted on the Hub. hosted on the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
using [`~FlaxDiffusionPipeline.save_pretrained`]. using [`~FlaxDiffusionPipeline.save_pretrained`].
...@@ -293,7 +293,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -293,7 +293,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
>>> # Requires to be logged in to Hugging Face hub, >>> # Requires to be logged in to Hugging Face hub,
>>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens)
>>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", ... "stable-diffusion-v1-5/stable-diffusion-v1-5",
... variant="bf16", ... variant="bf16",
... dtype=jnp.bfloat16, ... dtype=jnp.bfloat16,
... ) ... )
...@@ -301,7 +301,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -301,7 +301,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
>>> # Download pipeline, but use a different scheduler >>> # Download pipeline, but use a different scheduler
>>> from diffusers import FlaxDPMSolverMultistepScheduler >>> from diffusers import FlaxDPMSolverMultistepScheduler
>>> model_id = "runwayml/stable-diffusion-v1-5" >>> model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
>>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained(
... model_id, ... model_id,
... subfolder="scheduler", ... subfolder="scheduler",
...@@ -559,7 +559,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -559,7 +559,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
... ) ... )
>>> text2img = FlaxStableDiffusionPipeline.from_pretrained( >>> text2img = FlaxStableDiffusionPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", variant="bf16", dtype=jnp.bfloat16 ... "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="bf16", dtype=jnp.bfloat16
... ) ... )
>>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components) >>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components)
``` ```
......
...@@ -813,9 +813,9 @@ def _maybe_raise_warning_for_inpainting(pipeline_class, pretrained_model_name_or ...@@ -813,9 +813,9 @@ def _maybe_raise_warning_for_inpainting(pipeline_class, pretrained_model_name_or
"You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the" "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the"
f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For" f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For"
" better inpainting results, we strongly suggest using Stable Diffusion's official inpainting" " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting"
" checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your" " checkpoint: https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting instead or adapting your"
f" checkpoint {pretrained_model_name_or_path} to the format of" f" checkpoint {pretrained_model_name_or_path} to the format of"
" https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain" " https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting. Note that we do not actively maintain"
" the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0." " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0."
) )
deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False) deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False)
......
...@@ -516,7 +516,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -516,7 +516,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
If you get the error message below, you need to finetune the weights for your downstream task: If you get the error message below, you need to finetune the weights for your downstream task:
``` ```
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
``` ```
...@@ -643,7 +643,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -643,7 +643,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
>>> # Download pipeline that requires an authorization token >>> # Download pipeline that requires an authorization token
>>> # For more information on access tokens, please refer to this section >>> # For more information on access tokens, please refer to this section
>>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
>>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
>>> # Use a different scheduler >>> # Use a different scheduler
>>> from diffusers import LMSDiscreteScheduler >>> from diffusers import LMSDiscreteScheduler
...@@ -1555,7 +1555,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -1555,7 +1555,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
... StableDiffusionInpaintPipeline, ... StableDiffusionInpaintPipeline,
... ) ... )
>>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> text2img = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
>>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components) >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
>>> inpaint = StableDiffusionInpaintPipeline(**text2img.components) >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components)
``` ```
...@@ -1688,7 +1688,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -1688,7 +1688,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
>>> from diffusers import StableDiffusionPipeline >>> from diffusers import StableDiffusionPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained( >>> pipe = StableDiffusionPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", ... "stable-diffusion-v1-5/stable-diffusion-v1-5",
... torch_dtype=torch.float16, ... torch_dtype=torch.float16,
... use_safetensors=True, ... use_safetensors=True,
... ) ... )
...@@ -1735,7 +1735,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -1735,7 +1735,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
```py ```py
>>> from diffusers import StableDiffusionPipeline, StableDiffusionSAGPipeline >>> from diffusers import StableDiffusionPipeline, StableDiffusionSAGPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
>>> new_pipe = StableDiffusionSAGPipeline.from_pipe(pipe) >>> new_pipe = StableDiffusionSAGPipeline.from_pipe(pipe)
``` ```
""" """
......
...@@ -55,7 +55,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -55,7 +55,7 @@ EXAMPLE_DOC_STRING = """
>>> from diffusers import FlaxStableDiffusionPipeline >>> from diffusers import FlaxStableDiffusionPipeline
>>> pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( >>> pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", variant="bf16", dtype=jax.numpy.bfloat16 ... "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="bf16", dtype=jax.numpy.bfloat16
... ) ... )
>>> prompt = "a photo of an astronaut riding a horse on mars" >>> prompt = "a photo of an astronaut riding a horse on mars"
...@@ -100,7 +100,7 @@ class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline): ...@@ -100,7 +100,7 @@ class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline):
[`FlaxDPMSolverMultistepScheduler`]. [`FlaxDPMSolverMultistepScheduler`].
safety_checker ([`FlaxStableDiffusionSafetyChecker`]): safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
...@@ -141,8 +141,8 @@ class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline): ...@@ -141,8 +141,8 @@ class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline):
"The configuration file of the unet has set the default `sample_size` to smaller than" "The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this" " in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
......
...@@ -124,7 +124,7 @@ class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline): ...@@ -124,7 +124,7 @@ class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline):
[`FlaxDPMSolverMultistepScheduler`]. [`FlaxDPMSolverMultistepScheduler`].
safety_checker ([`FlaxStableDiffusionSafetyChecker`]): safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
......
...@@ -127,7 +127,7 @@ class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline): ...@@ -127,7 +127,7 @@ class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline):
[`FlaxDPMSolverMultistepScheduler`]. [`FlaxDPMSolverMultistepScheduler`].
safety_checker ([`FlaxStableDiffusionSafetyChecker`]): safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
...@@ -168,8 +168,8 @@ class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline): ...@@ -168,8 +168,8 @@ class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline):
"The configuration file of the unet has set the default `sample_size` to smaller than" "The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this" " in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
......
...@@ -78,7 +78,7 @@ class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline): ...@@ -78,7 +78,7 @@ class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline):
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]): safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]): feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`. Model that extracts features from generated images to be used as inputs for the `safety_checker`.
""" """
......
...@@ -76,7 +76,7 @@ class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline): ...@@ -76,7 +76,7 @@ class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline):
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]): safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]): feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`. Model that extracts features from generated images to be used as inputs for the `safety_checker`.
""" """
......
...@@ -55,7 +55,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -55,7 +55,7 @@ EXAMPLE_DOC_STRING = """
>>> import torch >>> import torch
>>> from diffusers import StableDiffusionPipeline >>> from diffusers import StableDiffusionPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) >>> pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda") >>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars" >>> prompt = "a photo of an astronaut riding a horse on mars"
...@@ -184,7 +184,7 @@ class StableDiffusionPipeline( ...@@ -184,7 +184,7 @@ class StableDiffusionPipeline(
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]): safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
about a model's potential harms. about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
...@@ -266,8 +266,8 @@ class StableDiffusionPipeline( ...@@ -266,8 +266,8 @@ class StableDiffusionPipeline(
"The configuration file of the unet has set the default `sample_size` to smaller than" "The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this" " in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment