Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
a2d3d6af
Unverified
Commit
a2d3d6af
authored
Mar 07, 2025
by
Sayak Paul
Committed by
GitHub
Mar 07, 2025
Browse files
[LoRA] remove full key prefix from peft. (#11004)
remove full key prefix from peft.
parent
363d1ab7
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
4 additions
and
15 deletions
+4
-15
src/diffusers/loaders/peft.py
src/diffusers/loaders/peft.py
+4
-15
No files found.
src/diffusers/loaders/peft.py
View file @
a2d3d6af
...
@@ -192,11 +192,6 @@ class PeftAdapterMixin:
...
@@ -192,11 +192,6 @@ class PeftAdapterMixin:
from
peft
import
LoraConfig
,
inject_adapter_in_model
,
set_peft_model_state_dict
from
peft
import
LoraConfig
,
inject_adapter_in_model
,
set_peft_model_state_dict
from
peft.tuners.tuners_utils
import
BaseTunerLayer
from
peft.tuners.tuners_utils
import
BaseTunerLayer
try
:
from
peft.utils.constants
import
FULLY_QUALIFIED_PATTERN_KEY_PREFIX
except
ImportError
:
FULLY_QUALIFIED_PATTERN_KEY_PREFIX
=
None
cache_dir
=
kwargs
.
pop
(
"cache_dir"
,
None
)
cache_dir
=
kwargs
.
pop
(
"cache_dir"
,
None
)
force_download
=
kwargs
.
pop
(
"force_download"
,
False
)
force_download
=
kwargs
.
pop
(
"force_download"
,
False
)
proxies
=
kwargs
.
pop
(
"proxies"
,
None
)
proxies
=
kwargs
.
pop
(
"proxies"
,
None
)
...
@@ -261,22 +256,16 @@ class PeftAdapterMixin:
...
@@ -261,22 +256,16 @@ class PeftAdapterMixin:
# Cannot figure out rank from lora layers that don't have atleast 2 dimensions.
# Cannot figure out rank from lora layers that don't have atleast 2 dimensions.
# Bias layers in LoRA only have a single dimension
# Bias layers in LoRA only have a single dimension
if
"lora_B"
in
key
and
val
.
ndim
>
1
:
if
"lora_B"
in
key
and
val
.
ndim
>
1
:
# Support to handle cases where layer patterns are treated as full layer names
# TODO: revisit this after https://github.com/huggingface/peft/pull/2382 is merged.
# was added later in PEFT. So, we handle it accordingly.
rank
[
key
]
=
val
.
shape
[
1
]
# TODO: when we fix the minimal PEFT version for Diffusers,
# we should remove `_maybe_adjust_config()`.
if
FULLY_QUALIFIED_PATTERN_KEY_PREFIX
:
rank
[
f
"
{
FULLY_QUALIFIED_PATTERN_KEY_PREFIX
}{
key
}
"
]
=
val
.
shape
[
1
]
else
:
rank
[
key
]
=
val
.
shape
[
1
]
if
network_alphas
is
not
None
and
len
(
network_alphas
)
>=
1
:
if
network_alphas
is
not
None
and
len
(
network_alphas
)
>=
1
:
alpha_keys
=
[
k
for
k
in
network_alphas
.
keys
()
if
k
.
startswith
(
f
"
{
prefix
}
."
)]
alpha_keys
=
[
k
for
k
in
network_alphas
.
keys
()
if
k
.
startswith
(
f
"
{
prefix
}
."
)]
network_alphas
=
{
k
.
replace
(
f
"
{
prefix
}
."
,
""
):
v
for
k
,
v
in
network_alphas
.
items
()
if
k
in
alpha_keys
}
network_alphas
=
{
k
.
replace
(
f
"
{
prefix
}
."
,
""
):
v
for
k
,
v
in
network_alphas
.
items
()
if
k
in
alpha_keys
}
lora_config_kwargs
=
get_peft_kwargs
(
rank
,
network_alpha_dict
=
network_alphas
,
peft_state_dict
=
state_dict
)
lora_config_kwargs
=
get_peft_kwargs
(
rank
,
network_alpha_dict
=
network_alphas
,
peft_state_dict
=
state_dict
)
if
not
FULLY_QUALIFIED_PATTERN_KEY_PREFIX
:
# TODO: revisit this after https://github.com/huggingface/peft/pull/2382 is merged.
lora_config_kwargs
=
_maybe_adjust_config
(
lora_config_kwargs
)
lora_config_kwargs
=
_maybe_adjust_config
(
lora_config_kwargs
)
if
"use_dora"
in
lora_config_kwargs
:
if
"use_dora"
in
lora_config_kwargs
:
if
lora_config_kwargs
[
"use_dora"
]:
if
lora_config_kwargs
[
"use_dora"
]:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment