Unverified Commit 0a0bb526 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[LoRA depcrecation] LoRA depcrecation trilogy (#6450)

* edebug

* debug

* more debug

* more more debug

* remove tests for LoRAAttnProcessors.

* rename
parent 2fada8dc
...@@ -1496,7 +1496,7 @@ class UNet2DConditionLoRAModelTests(unittest.TestCase): ...@@ -1496,7 +1496,7 @@ class UNet2DConditionLoRAModelTests(unittest.TestCase):
inputs_dict = self.dummy_input inputs_dict = self.dummy_input
return init_dict, inputs_dict return init_dict, inputs_dict
def test_lora_processors(self): def test_lora_at_different_scales(self):
# enable deterministic behavior for gradient checkpointing # enable deterministic behavior for gradient checkpointing
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
...@@ -1514,9 +1514,6 @@ class UNet2DConditionLoRAModelTests(unittest.TestCase): ...@@ -1514,9 +1514,6 @@ class UNet2DConditionLoRAModelTests(unittest.TestCase):
model.load_attn_procs(lora_params) model.load_attn_procs(lora_params)
model.to(torch_device) model.to(torch_device)
# test that attn processors can be set to itself
model.set_attn_processor(model.attn_processors)
with torch.no_grad(): with torch.no_grad():
sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample
sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
...@@ -1595,7 +1592,7 @@ class UNet2DConditionLoRAModelTests(unittest.TestCase): ...@@ -1595,7 +1592,7 @@ class UNet2DConditionLoRAModelTests(unittest.TestCase):
@deprecate_after_peft_backend @deprecate_after_peft_backend
class UNet3DConditionModelTests(unittest.TestCase): class UNet3DConditionLoRAModelTests(unittest.TestCase):
model_class = UNet3DConditionModel model_class = UNet3DConditionModel
main_input_name = "sample" main_input_name = "sample"
...@@ -1638,7 +1635,7 @@ class UNet3DConditionModelTests(unittest.TestCase): ...@@ -1638,7 +1635,7 @@ class UNet3DConditionModelTests(unittest.TestCase):
inputs_dict = self.dummy_input inputs_dict = self.dummy_input
return init_dict, inputs_dict return init_dict, inputs_dict
def test_lora_processors(self): def test_lora_at_different_scales(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
init_dict["attention_head_dim"] = 8 init_dict["attention_head_dim"] = 8
...@@ -1655,9 +1652,6 @@ class UNet3DConditionModelTests(unittest.TestCase): ...@@ -1655,9 +1652,6 @@ class UNet3DConditionModelTests(unittest.TestCase):
model.load_attn_procs(unet_lora_params) model.load_attn_procs(unet_lora_params)
model.to(torch_device) model.to(torch_device)
# test that attn processors can be set to itself
model.set_attn_processor(model.attn_processors)
with torch.no_grad(): with torch.no_grad():
sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample
sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment