Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
b98b314b
Unverified
Commit
b98b314b
authored
Dec 18, 2023
by
Sayak Paul
Committed by
GitHub
Dec 18, 2023
Browse files
[Training] remove depcreated method from lora scripts. (#6207)
remove depcreated method from lora scripts.
parent
74558ff6
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
0 additions
and
132 deletions
+0
-132
examples/dreambooth/train_dreambooth_lora.py
examples/dreambooth/train_dreambooth_lora.py
+0
-33
examples/dreambooth/train_dreambooth_lora_sdxl.py
examples/dreambooth/train_dreambooth_lora_sdxl.py
+0
-33
examples/text_to_image/train_text_to_image_lora.py
examples/text_to_image/train_text_to_image_lora.py
+0
-33
examples/text_to_image/train_text_to_image_lora_sdxl.py
examples/text_to_image/train_text_to_image_lora_sdxl.py
+0
-33
No files found.
examples/dreambooth/train_dreambooth_lora.py
View file @
b98b314b
...
...
@@ -64,39 +64,6 @@ check_min_version("0.25.0.dev0")
logger
=
get_logger
(
__name__
)
# TODO: This function should be removed once training scripts are rewritten in PEFT
def
text_encoder_lora_state_dict
(
text_encoder
):
state_dict
=
{}
def
text_encoder_attn_modules
(
text_encoder
):
from
transformers
import
CLIPTextModel
,
CLIPTextModelWithProjection
attn_modules
=
[]
if
isinstance
(
text_encoder
,
(
CLIPTextModel
,
CLIPTextModelWithProjection
)):
for
i
,
layer
in
enumerate
(
text_encoder
.
text_model
.
encoder
.
layers
):
name
=
f
"text_model.encoder.layers.
{
i
}
.self_attn"
mod
=
layer
.
self_attn
attn_modules
.
append
((
name
,
mod
))
return
attn_modules
for
name
,
module
in
text_encoder_attn_modules
(
text_encoder
):
for
k
,
v
in
module
.
q_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.q_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
k_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.k_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
v_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.v_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
out_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.out_proj.lora_linear_layer.
{
k
}
"
]
=
v
return
state_dict
def
save_model_card
(
repo_id
:
str
,
images
=
None
,
...
...
examples/dreambooth/train_dreambooth_lora_sdxl.py
View file @
b98b314b
...
...
@@ -64,39 +64,6 @@ check_min_version("0.25.0.dev0")
logger
=
get_logger
(
__name__
)
# TODO: This function should be removed once training scripts are rewritten in PEFT
def
text_encoder_lora_state_dict
(
text_encoder
):
state_dict
=
{}
def
text_encoder_attn_modules
(
text_encoder
):
from
transformers
import
CLIPTextModel
,
CLIPTextModelWithProjection
attn_modules
=
[]
if
isinstance
(
text_encoder
,
(
CLIPTextModel
,
CLIPTextModelWithProjection
)):
for
i
,
layer
in
enumerate
(
text_encoder
.
text_model
.
encoder
.
layers
):
name
=
f
"text_model.encoder.layers.
{
i
}
.self_attn"
mod
=
layer
.
self_attn
attn_modules
.
append
((
name
,
mod
))
return
attn_modules
for
name
,
module
in
text_encoder_attn_modules
(
text_encoder
):
for
k
,
v
in
module
.
q_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.q_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
k_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.k_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
v_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.v_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
out_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.out_proj.lora_linear_layer.
{
k
}
"
]
=
v
return
state_dict
def
save_model_card
(
repo_id
:
str
,
images
=
None
,
...
...
examples/text_to_image/train_text_to_image_lora.py
View file @
b98b314b
...
...
@@ -54,39 +54,6 @@ check_min_version("0.25.0.dev0")
logger
=
get_logger
(
__name__
,
log_level
=
"INFO"
)
# TODO: This function should be removed once training scripts are rewritten in PEFT
def
text_encoder_lora_state_dict
(
text_encoder
):
state_dict
=
{}
def
text_encoder_attn_modules
(
text_encoder
):
from
transformers
import
CLIPTextModel
,
CLIPTextModelWithProjection
attn_modules
=
[]
if
isinstance
(
text_encoder
,
(
CLIPTextModel
,
CLIPTextModelWithProjection
)):
for
i
,
layer
in
enumerate
(
text_encoder
.
text_model
.
encoder
.
layers
):
name
=
f
"text_model.encoder.layers.
{
i
}
.self_attn"
mod
=
layer
.
self_attn
attn_modules
.
append
((
name
,
mod
))
return
attn_modules
for
name
,
module
in
text_encoder_attn_modules
(
text_encoder
):
for
k
,
v
in
module
.
q_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.q_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
k_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.k_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
v_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.v_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
out_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.out_proj.lora_linear_layer.
{
k
}
"
]
=
v
return
state_dict
def
save_model_card
(
repo_id
:
str
,
images
=
None
,
base_model
=
str
,
dataset_name
=
str
,
repo_folder
=
None
):
img_str
=
""
for
i
,
image
in
enumerate
(
images
):
...
...
examples/text_to_image/train_text_to_image_lora_sdxl.py
View file @
b98b314b
...
...
@@ -63,39 +63,6 @@ check_min_version("0.25.0.dev0")
logger
=
get_logger
(
__name__
)
# TODO: This function should be removed once training scripts are rewritten in PEFT
def
text_encoder_lora_state_dict
(
text_encoder
):
state_dict
=
{}
def
text_encoder_attn_modules
(
text_encoder
):
from
transformers
import
CLIPTextModel
,
CLIPTextModelWithProjection
attn_modules
=
[]
if
isinstance
(
text_encoder
,
(
CLIPTextModel
,
CLIPTextModelWithProjection
)):
for
i
,
layer
in
enumerate
(
text_encoder
.
text_model
.
encoder
.
layers
):
name
=
f
"text_model.encoder.layers.
{
i
}
.self_attn"
mod
=
layer
.
self_attn
attn_modules
.
append
((
name
,
mod
))
return
attn_modules
for
name
,
module
in
text_encoder_attn_modules
(
text_encoder
):
for
k
,
v
in
module
.
q_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.q_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
k_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.k_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
v_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.v_proj.lora_linear_layer.
{
k
}
"
]
=
v
for
k
,
v
in
module
.
out_proj
.
lora_linear_layer
.
state_dict
().
items
():
state_dict
[
f
"
{
name
}
.out_proj.lora_linear_layer.
{
k
}
"
]
=
v
return
state_dict
def
save_model_card
(
repo_id
:
str
,
images
=
None
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment