Commit 17470057 authored by Patrick von Platen's avatar Patrick von Platen
Browse files

make style

parent 3979aac9
......@@ -848,7 +848,7 @@ class LoraLoaderMixin:
"""
# Loop over the original attention modules.
for name, _ in self.text_encoder.named_modules():
if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]):
if any(x in name for x in TEXT_ENCODER_TARGET_MODULES):
# Retrieve the module and its corresponding LoRA processor.
module = self.text_encoder.get_submodule(name)
# Construct a new function that performs the LoRA merging. We will monkey patch
......
......@@ -46,7 +46,7 @@ def create_unet_lora_layers(unet: nn.Module):
def create_text_encoder_lora_layers(text_encoder: nn.Module):
text_lora_attn_procs = {}
for name, module in text_encoder.named_modules():
if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]):
if any(x in name for x in TEXT_ENCODER_TARGET_MODULES):
text_lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=module.out_features, cross_attention_dim=None)
text_encoder_lora_layers = AttnProcsLayers(text_lora_attn_procs)
return text_encoder_lora_layers
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment