Unverified Commit 20e4b6a6 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[LoRA] change to warning from info when notifying the users about a LoRA no-op (#11044)

* move to warning.

* test related changes.
parent 4ea9f89b
...@@ -423,8 +423,12 @@ def _load_lora_into_text_encoder( ...@@ -423,8 +423,12 @@ def _load_lora_into_text_encoder(
# Unsafe code /> # Unsafe code />
if prefix is not None and not state_dict: if prefix is not None and not state_dict:
logger.info( logger.warning(
f"No LoRA keys associated to {text_encoder.__class__.__name__} found with the {prefix=}. This is safe to ignore if LoRA state dict didn't originally have any {text_encoder.__class__.__name__} related params. Open an issue if you think it's unexpected: https://github.com/huggingface/diffusers/issues/new" f"No LoRA keys associated to {text_encoder.__class__.__name__} found with the {prefix=}. "
"This is safe to ignore if LoRA state dict didn't originally have any "
f"{text_encoder.__class__.__name__} related params. You can also try specifying `prefix=None` "
"to resolve the warning. Otherwise, open an issue if you think it's unexpected: "
"https://github.com/huggingface/diffusers/issues/new"
) )
......
...@@ -354,8 +354,12 @@ class PeftAdapterMixin: ...@@ -354,8 +354,12 @@ class PeftAdapterMixin:
# Unsafe code /> # Unsafe code />
if prefix is not None and not state_dict: if prefix is not None and not state_dict:
logger.info( logger.warning(
f"No LoRA keys associated to {self.__class__.__name__} found with the {prefix=}. This is safe to ignore if LoRA state dict didn't originally have any {self.__class__.__name__} related params. Open an issue if you think it's unexpected: https://github.com/huggingface/diffusers/issues/new" f"No LoRA keys associated to {self.__class__.__name__} found with the {prefix=}. "
"This is safe to ignore if LoRA state dict didn't originally have any "
f"{self.__class__.__name__} related params. You can also try specifying `prefix=None` "
"to resolve the warning. Otherwise, open an issue if you think it's unexpected: "
"https://github.com/huggingface/diffusers/issues/new"
) )
def save_lora_adapter( def save_lora_adapter(
......
...@@ -1961,7 +1961,7 @@ class PeftLoraLoaderMixinTests: ...@@ -1961,7 +1961,7 @@ class PeftLoraLoaderMixinTests:
no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)} no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)}
logger = logging.get_logger("diffusers.loaders.peft") logger = logging.get_logger("diffusers.loaders.peft")
logger.setLevel(logging.INFO) logger.setLevel(logging.WARNING)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
pipe.load_lora_weights(no_op_state_dict) pipe.load_lora_weights(no_op_state_dict)
...@@ -1981,7 +1981,7 @@ class PeftLoraLoaderMixinTests: ...@@ -1981,7 +1981,7 @@ class PeftLoraLoaderMixinTests:
prefix = "text_encoder_2" prefix = "text_encoder_2"
logger = logging.get_logger("diffusers.loaders.lora_base") logger = logging.get_logger("diffusers.loaders.lora_base")
logger.setLevel(logging.INFO) logger.setLevel(logging.WARNING)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
self.pipeline_class.load_lora_into_text_encoder( self.pipeline_class.load_lora_into_text_encoder(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment