"...text-generation-inference.git" did not exist on "521de6cacd2af42caa1f93c75a34460a6ecddf9e"
Unverified Commit c6ae8837 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

remove print statements from attention processor. (#3592)

parent 5559d042
...@@ -222,9 +222,6 @@ class Attention(nn.Module): ...@@ -222,9 +222,6 @@ class Attention(nn.Module):
) )
processor.load_state_dict(self.processor.state_dict()) processor.load_state_dict(self.processor.state_dict())
processor.to(self.processor.to_q_lora.up.weight.device) processor.to(self.processor.to_q_lora.up.weight.device)
print(
f"is_lora is set to {is_lora}, type: LoRAXFormersAttnProcessor: {isinstance(processor, LoRAXFormersAttnProcessor)}"
)
elif is_custom_diffusion: elif is_custom_diffusion:
processor = CustomDiffusionXFormersAttnProcessor( processor = CustomDiffusionXFormersAttnProcessor(
train_kv=self.processor.train_kv, train_kv=self.processor.train_kv,
...@@ -262,7 +259,6 @@ class Attention(nn.Module): ...@@ -262,7 +259,6 @@ class Attention(nn.Module):
# We use the AttnProcessor2_0 by default when torch 2.x is used which uses # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
# torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
# but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
print("Still defaulting to: AttnProcessor2_0 :O")
processor = ( processor = (
AttnProcessor2_0() AttnProcessor2_0()
if hasattr(F, "scaled_dot_product_attention") and self.scale_qk if hasattr(F, "scaled_dot_product_attention") and self.scale_qk
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment