Unverified Commit 041501ae authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[docs] remove docstrings from repeated methods in `lora_pipeline.py` (#12393)

* start unbloating docstrings (save_lora_weights).

* load_lora_weights()

* lora_state_dict

* fuse_lora

* unfuse_lora

* load_lora_into_transformer
parent 9c094458
...@@ -621,33 +621,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin): ...@@ -621,33 +621,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`.
All kwargs are forwarded to `self.lora_state_dict`.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is
loaded into `self.unet`.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state
dict is loaded into `self.text_encoder`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -967,35 +941,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin): ...@@ -967,35 +941,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
text_encoder_2_lora_adapter_metadata=None, text_encoder_2_lora_adapter_metadata=None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the UNet and text encoder. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `unet`.
text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
encoder LoRA state dict because it comes from 🤗 Transformers.
text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text
encoder LoRA state dict because it comes from 🤗 Transformers.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
unet_lora_adapter_metadata:
LoRA adapter metadata associated with the unet to be serialized with the state dict.
text_encoder_lora_adapter_metadata:
LoRA adapter metadata associated with the text encoder to be serialized with the state dict.
text_encoder_2_lora_adapter_metadata:
LoRA adapter metadata associated with the second text encoder to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -1036,35 +982,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin): ...@@ -1036,35 +982,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -1076,21 +994,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin): ...@@ -1076,21 +994,7 @@ class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin):
def unfuse_lora(self, components: List[str] = ["unet", "text_encoder", "text_encoder_2"], **kwargs): def unfuse_lora(self, components: List[str] = ["unet", "text_encoder", "text_encoder_2"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
unfuse_text_encoder (`bool`, defaults to `True`):
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
LoRA parameters then it won't have any effect.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -1116,51 +1020,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin): ...@@ -1116,51 +1020,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -1214,30 +1074,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin): ...@@ -1214,30 +1074,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`.
All kwargs are forwarded to `self.lora_state_dict`.
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -1306,26 +1143,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin): ...@@ -1306,26 +1143,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`SD3Transformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -1420,35 +1238,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin): ...@@ -1420,35 +1238,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
text_encoder_2_lora_adapter_metadata=None, text_encoder_2_lora_adapter_metadata=None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the UNet and text encoder. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
encoder LoRA state dict because it comes from 🤗 Transformers.
text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text
encoder LoRA state dict because it comes from 🤗 Transformers.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
text_encoder_lora_adapter_metadata:
LoRA adapter metadata associated with the text encoder to be serialized with the state dict.
text_encoder_2_lora_adapter_metadata:
LoRA adapter metadata associated with the second text encoder to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -1490,35 +1280,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin): ...@@ -1490,35 +1280,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -1531,21 +1293,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin): ...@@ -1531,21 +1293,7 @@ class SD3LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.unfuse_lora with unet->transformer # Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.unfuse_lora with unet->transformer
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
unfuse_text_encoder (`bool`, defaults to `True`):
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
LoRA parameters then it won't have any effect.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -1567,51 +1315,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin): ...@@ -1567,51 +1315,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -1666,25 +1370,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin): ...@@ -1666,25 +1370,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -1730,26 +1416,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin): ...@@ -1730,26 +1416,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`AuraFlowTransformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -1781,25 +1448,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin): ...@@ -1781,25 +1448,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -1831,35 +1480,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin): ...@@ -1831,35 +1480,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -1872,18 +1493,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin): ...@@ -1872,18 +1493,7 @@ class AuraFlowLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -1910,50 +1520,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin): ...@@ -1910,50 +1520,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -2207,30 +1774,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin): ...@@ -2207,30 +1774,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
hotswap: bool = False, hotswap: bool = False,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
network_alphas (`Dict[str, float]`):
The value of the network alpha used for stable learning and preventing underflow. This value has the
same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
transformer (`FluxTransformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"):
raise ValueError( raise ValueError(
...@@ -2435,35 +1979,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin): ...@@ -2435,35 +1979,7 @@ class FluxLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer
...@@ -2806,30 +2322,7 @@ class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin): ...@@ -2806,30 +2322,7 @@ class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin):
hotswap: bool = False, hotswap: bool = False,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
network_alphas (`Dict[str, float]`):
The value of the network alpha used for stable learning and preventing underflow. This value has the
same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
transformer (`UVit2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"):
raise ValueError( raise ValueError(
...@@ -2979,51 +2472,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin): ...@@ -2979,51 +2472,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -3077,25 +2526,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin): ...@@ -3077,25 +2526,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -3141,26 +2572,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin): ...@@ -3141,26 +2572,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`CogVideoXTransformer3DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -3180,7 +2592,6 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin): ...@@ -3180,7 +2592,6 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
) )
@classmethod @classmethod
# Adapted from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.save_lora_weights without support for text encoder
def save_lora_weights( def save_lora_weights(
cls, cls,
save_directory: Union[str, os.PathLike], save_directory: Union[str, os.PathLike],
...@@ -3192,25 +2603,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin): ...@@ -3192,25 +2603,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -3241,35 +2634,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin): ...@@ -3241,35 +2634,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -3281,18 +2646,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin): ...@@ -3281,18 +2646,7 @@ class CogVideoXLoraLoaderMixin(LoraBaseMixin):
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -3314,51 +2668,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin): ...@@ -3314,51 +2668,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -3413,25 +2723,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin): ...@@ -3413,25 +2723,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -3477,26 +2769,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin): ...@@ -3477,26 +2769,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`MochiTransformer3DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -3528,25 +2801,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin): ...@@ -3528,25 +2801,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -3578,35 +2833,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin): ...@@ -3578,35 +2833,7 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -3619,20 +2846,9 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin): ...@@ -3619,20 +2846,9 @@ class Mochi1LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). """
super().unfuse_lora(components=components, **kwargs)
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
"""
super().unfuse_lora(components=components, **kwargs)
class LTXVideoLoraLoaderMixin(LoraBaseMixin): class LTXVideoLoraLoaderMixin(LoraBaseMixin):
...@@ -3651,50 +2867,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -3651,50 +2867,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -3753,25 +2926,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -3753,25 +2926,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -3817,26 +2972,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -3817,26 +2972,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`LTXVideoTransformer3DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -3868,25 +3004,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -3868,25 +3004,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -3918,35 +3036,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -3918,35 +3036,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -3959,18 +3049,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -3959,18 +3049,7 @@ class LTXVideoLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -3992,51 +3071,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin): ...@@ -3992,51 +3071,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -4091,25 +3126,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin): ...@@ -4091,25 +3126,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -4155,26 +3172,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin): ...@@ -4155,26 +3172,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`SanaTransformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -4206,25 +3204,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin): ...@@ -4206,25 +3204,7 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -4256,59 +3236,20 @@ class SanaLoraLoaderMixin(LoraBaseMixin): ...@@ -4256,59 +3236,20 @@ class SanaLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
"""
<Tip warning={true}> super().fuse_lora(
components=components,
This is an experimental API. lora_scale=lora_scale,
safe_fusing=safe_fusing,
</Tip> adapter_names=adapter_names,
**kwargs,
Args: )
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
"""
super().fuse_lora(
components=components,
lora_scale=lora_scale,
safe_fusing=safe_fusing,
adapter_names=adapter_names,
**kwargs,
)
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -4329,50 +3270,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -4329,50 +3270,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading original format HunyuanVideo LoRA checkpoints.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -4431,25 +3329,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -4431,25 +3329,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -4495,26 +3375,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -4495,26 +3375,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`HunyuanVideoTransformer3DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -4546,25 +3407,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -4546,25 +3407,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -4596,35 +3439,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -4596,35 +3439,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -4637,18 +3452,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin): ...@@ -4637,18 +3452,7 @@ class HunyuanVideoLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -4669,50 +3473,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin): ...@@ -4669,50 +3473,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -4772,25 +3533,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin): ...@@ -4772,25 +3533,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -4836,26 +3579,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin): ...@@ -4836,26 +3579,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`Lumina2Transformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -4887,25 +3611,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin): ...@@ -4887,25 +3611,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -4937,35 +3643,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin): ...@@ -4937,35 +3643,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -4978,18 +3656,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin): ...@@ -4978,18 +3656,7 @@ class Lumina2LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -5010,50 +3677,7 @@ class WanLoraLoaderMixin(LoraBaseMixin): ...@@ -5010,50 +3677,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -5159,25 +3783,7 @@ class WanLoraLoaderMixin(LoraBaseMixin): ...@@ -5159,25 +3783,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -5247,26 +3853,7 @@ class WanLoraLoaderMixin(LoraBaseMixin): ...@@ -5247,26 +3853,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`WanTransformer3DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -5298,25 +3885,7 @@ class WanLoraLoaderMixin(LoraBaseMixin): ...@@ -5298,25 +3885,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -5348,35 +3917,7 @@ class WanLoraLoaderMixin(LoraBaseMixin): ...@@ -5348,35 +3917,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -5389,18 +3930,7 @@ class WanLoraLoaderMixin(LoraBaseMixin): ...@@ -5389,18 +3930,7 @@ class WanLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -5422,50 +3952,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin): ...@@ -5422,50 +3952,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -5573,25 +4060,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin): ...@@ -5573,25 +4060,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -5661,26 +4130,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin): ...@@ -5661,26 +4130,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`SkyReelsV2Transformer3DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -5712,25 +4162,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin): ...@@ -5712,25 +4162,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -5762,35 +4194,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin): ...@@ -5762,35 +4194,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -5803,18 +4207,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin): ...@@ -5803,18 +4207,7 @@ class SkyReelsV2LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -5836,51 +4229,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin): ...@@ -5836,51 +4229,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -5935,25 +4284,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin): ...@@ -5935,25 +4284,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -5999,26 +4330,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin): ...@@ -5999,26 +4330,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`CogView4Transformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -6050,25 +4362,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin): ...@@ -6050,25 +4362,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -6100,35 +4394,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin): ...@@ -6100,35 +4394,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -6141,18 +4407,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin): ...@@ -6141,18 +4407,7 @@ class CogView4LoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -6162,61 +4417,18 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6162,61 +4417,18 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
Load LoRA layers into [`HiDreamImageTransformer2DModel`]. Specific to [`HiDreamImagePipeline`]. Load LoRA layers into [`HiDreamImageTransformer2DModel`]. Specific to [`HiDreamImagePipeline`].
""" """
_lora_loadable_modules = ["transformer"] _lora_loadable_modules = ["transformer"]
transformer_name = TRANSFORMER_NAME transformer_name = TRANSFORMER_NAME
@classmethod @classmethod
@validate_hf_hub_args @validate_hf_hub_args
def lora_state_dict( def lora_state_dict(
cls, cls,
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -6275,25 +4487,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6275,25 +4487,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -6339,26 +4533,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6339,26 +4533,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`HiDreamImageTransformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -6390,25 +4565,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6390,25 +4565,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -6440,35 +4597,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6440,35 +4597,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -6481,18 +4610,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6481,18 +4610,7 @@ class HiDreamImageLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
...@@ -6513,51 +4631,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6513,51 +4631,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Return state dict for lora weights and the network alphas. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details.
<Tip warning={true}>
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
This function is experimental and might change in the future.
</Tip>
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
return_lora_metadata (`bool`, *optional*, defaults to False):
When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
""" """
# Load the main state dict first which has the LoRA layers for either of # Load the main state dict first which has the LoRA layers for either of
# transformer and text encoder or both. # transformer and text encoder or both.
...@@ -6618,25 +4692,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6618,25 +4692,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
""" """
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for more details.
`self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
[`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
dict is loaded into `self.transformer`.
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
kwargs (`dict`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
""" """
if not USE_PEFT_BACKEND: if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.") raise ValueError("PEFT backend is required for this method.")
...@@ -6682,26 +4738,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6682,26 +4738,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
metadata=None, metadata=None,
): ):
""" """
This will load the LoRA layers specified in `state_dict` into `transformer`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
Parameters:
state_dict (`dict`):
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
encoder lora layers.
transformer (`QwenImageTransformer2DModel`):
The Transformer model to load the LoRA layers into.
adapter_name (`str`, *optional*):
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
`default_{i}` where i is the total number of adapters being loaded.
low_cpu_mem_usage (`bool`, *optional*):
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
weights.
hotswap (`bool`, *optional*):
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
metadata (`dict`):
Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
from the state dict.
""" """
if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): if low_cpu_mem_usage and is_peft_version("<", "0.13.0"):
raise ValueError( raise ValueError(
...@@ -6733,25 +4770,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6733,25 +4770,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
transformer_lora_adapter_metadata: Optional[dict] = None, transformer_lora_adapter_metadata: Optional[dict] = None,
): ):
r""" r"""
Save the LoRA parameters corresponding to the transformer. See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save LoRA parameters to. Will be created if it doesn't exist.
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
State dict of the LoRA layers corresponding to the `transformer`.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
transformer_lora_adapter_metadata:
LoRA adapter metadata associated with the transformer to be serialized with the state dict.
""" """
lora_layers = {} lora_layers = {}
lora_metadata = {} lora_metadata = {}
...@@ -6783,35 +4802,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6783,35 +4802,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
**kwargs, **kwargs,
): ):
r""" r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks. See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
""" """
super().fuse_lora( super().fuse_lora(
components=components, components=components,
...@@ -6824,18 +4815,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin): ...@@ -6824,18 +4815,7 @@ class QwenImageLoraLoaderMixin(LoraBaseMixin):
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
r""" r"""
Reverses the effect of See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
""" """
super().unfuse_lora(components=components, **kwargs) super().unfuse_lora(components=components, **kwargs)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment