Commit c7f41661 authored by muyangli's avatar muyangli
Browse files

[minor] fix a bug when converting loras

parent c03abb58
__version__ = "0.0.2beta5"
__version__ = "0.0.2beta6"
......@@ -54,7 +54,7 @@ if __name__ == "__main__":
lora_format = args.lora_format
if lora_format == "diffusers":
extra_lora_dict = load_state_dict_in_safetensors(args.lora_path, filter_prefix="transformer.")
extra_lora_dict = load_state_dict_in_safetensors(args.lora_path)
else:
if lora_format == "comfyui":
extra_lora_dict = comfyui2diffusers(args.lora_path)
......@@ -62,7 +62,7 @@ if __name__ == "__main__":
extra_lora_dict = xlab2diffusers(args.lora_path)
else:
raise NotImplementedError(f"LoRA format {lora_format} is not supported.")
extra_lora_dict = filter_state_dict(extra_lora_dict, filter_prefix="transformer.")
extra_lora_dict = filter_state_dict(extra_lora_dict)
converted = convert_to_nunchaku_flux_lowrank_dict(
base_model=orig_state_dict,
......
......@@ -55,7 +55,7 @@ def load_state_dict_in_safetensors(
return state_dict
def filter_state_dict(state_dict: dict[str, torch.Tensor], filter_prefix: str) -> dict[str, torch.Tensor]:
def filter_state_dict(state_dict: dict[str, torch.Tensor], filter_prefix: str = "") -> dict[str, torch.Tensor]:
"""Filter state dict.
Args:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment