Unverified Commit 78b87dc2 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[LoRA] make LoRAs trained with `peft` loadable when `peft` isn't installed (#6306)

* spit diffusers-native format from the get go.

* rejig the peft_to_diffusers mapping.
parent 0af12f1f
...@@ -54,7 +54,7 @@ from diffusers import ( ...@@ -54,7 +54,7 @@ from diffusers import (
from diffusers.loaders import LoraLoaderMixin from diffusers.loaders import LoraLoaderMixin
from diffusers.optimization import get_scheduler from diffusers.optimization import get_scheduler
from diffusers.training_utils import compute_snr from diffusers.training_utils import compute_snr
from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils import check_min_version, convert_state_dict_to_diffusers, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
...@@ -1019,11 +1019,15 @@ def main(args): ...@@ -1019,11 +1019,15 @@ def main(args):
for model in models: for model in models:
if isinstance(model, type(accelerator.unwrap_model(unet))): if isinstance(model, type(accelerator.unwrap_model(unet))):
unet_lora_layers_to_save = get_peft_model_state_dict(model) unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))): elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
text_encoder_one_lora_layers_to_save = get_peft_model_state_dict(model) text_encoder_one_lora_layers_to_save = convert_state_dict_to_diffusers(
get_peft_model_state_dict(model)
)
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))): elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
text_encoder_two_lora_layers_to_save = get_peft_model_state_dict(model) text_encoder_two_lora_layers_to_save = convert_state_dict_to_diffusers(
get_peft_model_state_dict(model)
)
else: else:
raise ValueError(f"unexpected save model: {model.__class__}") raise ValueError(f"unexpected save model: {model.__class__}")
...@@ -1615,13 +1619,17 @@ def main(args): ...@@ -1615,13 +1619,17 @@ def main(args):
if accelerator.is_main_process: if accelerator.is_main_process:
unet = accelerator.unwrap_model(unet) unet = accelerator.unwrap_model(unet)
unet = unet.to(torch.float32) unet = unet.to(torch.float32)
unet_lora_layers = get_peft_model_state_dict(unet) unet_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
if args.train_text_encoder: if args.train_text_encoder:
text_encoder_one = accelerator.unwrap_model(text_encoder_one) text_encoder_one = accelerator.unwrap_model(text_encoder_one)
text_encoder_lora_layers = get_peft_model_state_dict(text_encoder_one.to(torch.float32)) text_encoder_lora_layers = convert_state_dict_to_diffusers(
get_peft_model_state_dict(text_encoder_one.to(torch.float32))
)
text_encoder_two = accelerator.unwrap_model(text_encoder_two) text_encoder_two = accelerator.unwrap_model(text_encoder_two)
text_encoder_2_lora_layers = get_peft_model_state_dict(text_encoder_two.to(torch.float32)) text_encoder_2_lora_layers = convert_state_dict_to_diffusers(
get_peft_model_state_dict(text_encoder_two.to(torch.float32))
)
else: else:
text_encoder_lora_layers = None text_encoder_lora_layers = None
text_encoder_2_lora_layers = None text_encoder_2_lora_layers = None
......
...@@ -79,6 +79,14 @@ PEFT_TO_DIFFUSERS = { ...@@ -79,6 +79,14 @@ PEFT_TO_DIFFUSERS = {
".v_proj.lora_A": ".v_proj.lora_linear_layer.down", ".v_proj.lora_A": ".v_proj.lora_linear_layer.down",
".out_proj.lora_B": ".out_proj.lora_linear_layer.up", ".out_proj.lora_B": ".out_proj.lora_linear_layer.up",
".out_proj.lora_A": ".out_proj.lora_linear_layer.down", ".out_proj.lora_A": ".out_proj.lora_linear_layer.down",
"to_k.lora_A": "to_k.lora.down",
"to_k.lora_B": "to_k.lora.up",
"to_q.lora_A": "to_q.lora.down",
"to_q.lora_B": "to_q.lora.up",
"to_v.lora_A": "to_v.lora.down",
"to_v.lora_B": "to_v.lora.up",
"to_out.0.lora_A": "to_out.0.lora.down",
"to_out.0.lora_B": "to_out.0.lora.up",
} }
DIFFUSERS_OLD_TO_DIFFUSERS = { DIFFUSERS_OLD_TO_DIFFUSERS = {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment