Unverified Commit 6b04d61c authored by Kashif Rasul's avatar Kashif Rasul Committed by GitHub
Browse files

[Styling] stylify using ruff (#5841)

* ruff format

* not need to use doc-builder's black styling as the doc is styled in ruff

* make fix-copies

* comment

* use run_ruff
parent 9c7f7fc4
...@@ -49,6 +49,7 @@ class LDMTextToImagePipeline(DiffusionPipeline): ...@@ -49,6 +49,7 @@ class LDMTextToImagePipeline(DiffusionPipeline):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
""" """
model_cpu_offload_seq = "bert->unet->vqvae" model_cpu_offload_seq = "bert->unet->vqvae"
def __init__( def __init__(
......
...@@ -177,6 +177,7 @@ class PaintByExamplePipeline(DiffusionPipeline): ...@@ -177,6 +177,7 @@ class PaintByExamplePipeline(DiffusionPipeline):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
# TODO: feature_extractor is required to encode initial images (if they are in PIL format), # TODO: feature_extractor is required to encode initial images (if they are in PIL format),
# we should give a descriptive message if the pipeline doesn't have one. # we should give a descriptive message if the pipeline doesn't have one.
......
...@@ -112,6 +112,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -112,6 +112,7 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
- **config_name** ([`str`]) -- The configuration filename that stores the class and module names of all the - **config_name** ([`str`]) -- The configuration filename that stores the class and module names of all the
diffusion pipeline's components. diffusion pipeline's components.
""" """
config_name = "model_index.json" config_name = "model_index.json"
def register_modules(self, **kwargs): def register_modules(self, **kwargs):
......
...@@ -542,6 +542,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -542,6 +542,7 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
- **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the - **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the
pipeline to function (should be overridden by subclasses). pipeline to function (should be overridden by subclasses).
""" """
config_name = "model_index.json" config_name = "model_index.json"
model_cpu_offload_seq = None model_cpu_offload_seq = None
_optional_components = [] _optional_components = []
......
...@@ -120,8 +120,21 @@ class PixArtAlphaPipeline(DiffusionPipeline): ...@@ -120,8 +120,21 @@ class PixArtAlphaPipeline(DiffusionPipeline):
scheduler ([`SchedulerMixin`]): scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `transformer` to denoise the encoded image latents. A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
""" """
bad_punct_regex = re.compile( bad_punct_regex = re.compile(
r"[" + "#®•©™&@·º½¾¿¡§~" + "\)" + "\(" + "\]" + "\[" + "\}" + "\{" + "\|" + "\\" + "\/" + "\*" + r"]{1,}" r"["
+ "#®•©™&@·º½¾¿¡§~"
+ r"\)"
+ r"\("
+ r"\]"
+ r"\["
+ r"\}"
+ r"\{"
+ r"\|"
+ "\\"
+ r"\/"
+ r"\*"
+ r"]{1,}"
) # noqa ) # noqa
_optional_components = ["tokenizer", "text_encoder"] _optional_components = ["tokenizer", "text_encoder"]
......
...@@ -35,6 +35,7 @@ class ScoreSdeVePipeline(DiffusionPipeline): ...@@ -35,6 +35,7 @@ class ScoreSdeVePipeline(DiffusionPipeline):
scheduler ([`ScoreSdeVeScheduler`]): scheduler ([`ScoreSdeVeScheduler`]):
A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image.
""" """
unet: UNet2DModel unet: UNet2DModel
scheduler: ScoreSdeVeScheduler scheduler: ScoreSdeVeScheduler
......
...@@ -54,6 +54,7 @@ class SpectrogramDiffusionPipeline(DiffusionPipeline): ...@@ -54,6 +54,7 @@ class SpectrogramDiffusionPipeline(DiffusionPipeline):
A scheduler to be used in combination with `decoder` to denoise the encoded audio latents. A scheduler to be used in combination with `decoder` to denoise the encoded audio latents.
melgan ([`OnnxRuntimeModel`]): melgan ([`OnnxRuntimeModel`]):
""" """
_optional_components = ["melgan"] _optional_components = ["melgan"]
def __init__( def __init__(
......
...@@ -148,6 +148,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor ...@@ -148,6 +148,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
......
...@@ -33,10 +33,7 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name ...@@ -33,10 +33,7 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess with 8->64 # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess with 8->64
def preprocess(image): def preprocess(image):
deprecation_message = ( deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead"
"The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use"
" VaeImageProcessor.preprocess(...) instead"
)
deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False)
if isinstance(image, torch.Tensor): if isinstance(image, torch.Tensor):
return image return image
...@@ -85,6 +82,7 @@ class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline): ...@@ -85,6 +82,7 @@ class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline):
feature_extractor ([`CLIPImageProcessor`]): feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`. Model that extracts features from generated images to be used as inputs for the `safety_checker`.
""" """
vae_encoder: OnnxRuntimeModel vae_encoder: OnnxRuntimeModel
vae_decoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel
text_encoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel
......
...@@ -80,6 +80,7 @@ class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline): ...@@ -80,6 +80,7 @@ class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline):
feature_extractor ([`CLIPImageProcessor`]): feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`. Model that extracts features from generated images to be used as inputs for the `safety_checker`.
""" """
vae_encoder: OnnxRuntimeModel vae_encoder: OnnxRuntimeModel
vae_decoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel
text_encoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel
......
...@@ -66,6 +66,7 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): ...@@ -66,6 +66,7 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline):
feature_extractor ([`CLIPImageProcessor`]): feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`. Model that extracts features from generated images to be used as inputs for the `safety_checker`.
""" """
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
_is_onnx = True _is_onnx = True
......
...@@ -102,6 +102,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo ...@@ -102,6 +102,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
_exclude_from_cpu_offload = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"]
......
...@@ -196,6 +196,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion ...@@ -196,6 +196,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
_exclude_from_cpu_offload = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"]
......
...@@ -95,6 +95,7 @@ class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoader ...@@ -95,6 +95,7 @@ class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoader
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "depth_mask"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "depth_mask"]
......
...@@ -273,6 +273,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM ...@@ -273,6 +273,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor", "inverse_scheduler"] _optional_components = ["safety_checker", "feature_extractor", "inverse_scheduler"]
_exclude_from_cpu_offload = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"]
......
...@@ -125,6 +125,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline): ...@@ -125,6 +125,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline):
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_exclude_from_cpu_offload = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"]
......
...@@ -177,6 +177,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline): ...@@ -177,6 +177,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline):
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
_exclude_from_cpu_offload = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"]
......
...@@ -62,6 +62,7 @@ class StableDiffusionImageVariationPipeline(DiffusionPipeline): ...@@ -62,6 +62,7 @@ class StableDiffusionImageVariationPipeline(DiffusionPipeline):
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
# TODO: feature_extractor is required to encode images (if they are in PIL format), # TODO: feature_extractor is required to encode images (if they are in PIL format),
# we should give a descriptive message if the pipeline doesn't have one. # we should give a descriptive message if the pipeline doesn't have one.
_optional_components = ["safety_checker"] _optional_components = ["safety_checker"]
......
...@@ -139,6 +139,7 @@ class StableDiffusionImg2ImgPipeline( ...@@ -139,6 +139,7 @@ class StableDiffusionImg2ImgPipeline(
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
_exclude_from_cpu_offload = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"]
......
...@@ -202,6 +202,7 @@ class StableDiffusionInpaintPipeline( ...@@ -202,6 +202,7 @@ class StableDiffusionInpaintPipeline(
feature_extractor ([`~transformers.CLIPImageProcessor`]): feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
""" """
model_cpu_offload_seq = "text_encoder->unet->vae" model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor"] _optional_components = ["safety_checker", "feature_extractor"]
_exclude_from_cpu_offload = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment