Commit 182c323c authored by Muyang Li's avatar Muyang Li Committed by Zhekai Zhang
Browse files

[minor] fix the lora scaling issue

parent bf0813a6
...@@ -50,6 +50,11 @@ def comfyui2diffusers( ...@@ -50,6 +50,11 @@ def comfyui2diffusers(
new_k = new_k.replace("_txt_mlp_0", ".ff_context.net.0.proj") new_k = new_k.replace("_txt_mlp_0", ".ff_context.net.0.proj")
new_k = new_k.replace("_txt_mlp_2", ".ff_context.net.2") new_k = new_k.replace("_txt_mlp_2", ".ff_context.net.2")
new_k = new_k.replace("_txt_mod_lin", ".norm1_context.linear") new_k = new_k.replace("_txt_mod_lin", ".norm1_context.linear")
if "lora_down" in k:
alpha = tensors[k.replace("lora_down.weight", "alpha")]
rank = v.shape[0]
v = v * alpha / rank
max_rank = max(max_rank, rank)
new_tensors[new_k] = v new_tensors[new_k] = v
else: else:
assert "lora_unet_single_blocks" in k assert "lora_unet_single_blocks" in k
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment