"tests/git@developer.sourcefind.cn:OpenDAS/mmcv.git" did not exist on "761f725b70f4edb9ca7e9108dc8bcf7397093d8d"
Unverified Commit 57084dac authored by Tolga Cangöz's avatar Tolga Cangöz Committed by GitHub
Browse files

Remove unnecessary lines (#8569)



* Remove unused line


---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
parent 70611a10
...@@ -467,8 +467,6 @@ def make_emblist(self, prompts): ...@@ -467,8 +467,6 @@ def make_emblist(self, prompts):
def split_dims(xs, height, width): def split_dims(xs, height, width):
xs = xs
def repeat_div(x, y): def repeat_div(x, y):
while y > 0: while y > 0:
x = math.ceil(x / 2) x = math.ceil(x / 2)
......
...@@ -1112,9 +1112,7 @@ class FusedJointAttnProcessor2_0: ...@@ -1112,9 +1112,7 @@ class FusedJointAttnProcessor2_0:
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
hidden_states = hidden_states = F.scaled_dot_product_attention( hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
query, key, value, dropout_p=0.0, is_causal=False
)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
hidden_states = hidden_states.to(query.dtype) hidden_states = hidden_states.to(query.dtype)
......
...@@ -308,8 +308,6 @@ class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginal ...@@ -308,8 +308,6 @@ class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginal
"Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
) )
height, width = hidden_states.shape[-2:]
hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too. hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
temb = self.time_text_embed(timestep, pooled_projections) temb = self.time_text_embed(timestep, pooled_projections)
encoder_hidden_states = self.context_embedder(encoder_hidden_states) encoder_hidden_states = self.context_embedder(encoder_hidden_states)
......
...@@ -478,9 +478,7 @@ class StableCascadeUNet(ModelMixin, ConfigMixin, FromOriginalModelMixin): ...@@ -478,9 +478,7 @@ class StableCascadeUNet(ModelMixin, ConfigMixin, FromOriginalModelMixin):
create_custom_forward(block), x, r_embed, use_reentrant=False create_custom_forward(block), x, r_embed, use_reentrant=False
) )
else: else:
x = x = torch.utils.checkpoint.checkpoint( x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), use_reentrant=False)
create_custom_forward(block), use_reentrant=False
)
if i < len(repmap): if i < len(repmap):
x = repmap[i](x) x = repmap[i](x)
level_outputs.insert(0, x) level_outputs.insert(0, x)
......
...@@ -661,7 +661,6 @@ class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): ...@@ -661,7 +661,6 @@ class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
noise_guidance_edit_tmp = torch.einsum( noise_guidance_edit_tmp = torch.einsum(
"cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp "cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp
) )
noise_guidance_edit_tmp = noise_guidance_edit_tmp
noise_guidance = noise_guidance + noise_guidance_edit_tmp noise_guidance = noise_guidance + noise_guidance_edit_tmp
self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu() self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu()
......
...@@ -153,7 +153,6 @@ class SD3LoRATests(unittest.TestCase): ...@@ -153,7 +153,6 @@ class SD3LoRATests(unittest.TestCase):
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device) pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
pipe.transformer.add_adapter(transformer_config) pipe.transformer.add_adapter(transformer_config)
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")
......
...@@ -144,9 +144,6 @@ class PriorTransformerTests(ModelTesterMixin, unittest.TestCase): ...@@ -144,9 +144,6 @@ class PriorTransformerTests(ModelTesterMixin, unittest.TestCase):
class PriorTransformerIntegrationTests(unittest.TestCase): class PriorTransformerIntegrationTests(unittest.TestCase):
def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0): def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0):
torch.manual_seed(seed) torch.manual_seed(seed)
batch_size = batch_size
embedding_dim = embedding_dim
num_embeddings = num_embeddings
hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device)
......
...@@ -142,7 +142,7 @@ class StableDiffusionAttendAndExcitePipelineFastTests( ...@@ -142,7 +142,7 @@ class StableDiffusionAttendAndExcitePipelineFastTests(
generator = torch.manual_seed(seed) generator = torch.manual_seed(seed)
else: else:
generator = torch.Generator(device=device).manual_seed(seed) generator = torch.Generator(device=device).manual_seed(seed)
inputs = inputs = { inputs = {
"prompt": "a cat and a frog", "prompt": "a cat and a frog",
"token_indices": [2, 5], "token_indices": [2, 5],
"generator": generator, "generator": generator,
......
...@@ -538,7 +538,6 @@ class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterM ...@@ -538,7 +538,6 @@ class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterM
# batchify inputs # batchify inputs
batched_inputs = {} batched_inputs = {}
batch_size = batch_size
for name, value in inputs.items(): for name, value in inputs.items():
if name in self.batch_params: if name in self.batch_params:
# prompt is string # prompt is string
......
...@@ -574,7 +574,6 @@ class StableDiffusionXLMultiAdapterPipelineFastTests( ...@@ -574,7 +574,6 @@ class StableDiffusionXLMultiAdapterPipelineFastTests(
# batchify inputs # batchify inputs
batched_inputs = {} batched_inputs = {}
batch_size = batch_size
for name, value in inputs.items(): for name, value in inputs.items():
if name in self.batch_params: if name in self.batch_params:
# prompt is string # prompt is string
......
...@@ -89,9 +89,6 @@ class EDMEulerSchedulerTest(SchedulerCommonTest): ...@@ -89,9 +89,6 @@ class EDMEulerSchedulerTest(SchedulerCommonTest):
scheduler_config = self.get_scheduler_config() scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config) scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname) scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment