Unverified Commit 54dac3a8 authored by hlky's avatar hlky Committed by GitHub
Browse files

Fix enable_sequential_cpu_offload in CogView4Pipeline (#11195)

* Fix enable_sequential_cpu_offload in CogView4Pipeline

* make fix-copies
parent e5c6027e
......@@ -213,9 +213,7 @@ class CogView4Pipeline(DiffusionPipeline, CogView4LoraLoaderMixin):
device=text_input_ids.device,
)
text_input_ids = torch.cat([pad_ids, text_input_ids], dim=1)
prompt_embeds = self.text_encoder(
text_input_ids.to(self.text_encoder.device), output_hidden_states=True
).hidden_states[-2]
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=True).hidden_states[-2]
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
return prompt_embeds
......
......@@ -216,9 +216,7 @@ class CogView4ControlPipeline(DiffusionPipeline):
device=text_input_ids.device,
)
text_input_ids = torch.cat([pad_ids, text_input_ids], dim=1)
prompt_embeds = self.text_encoder(
text_input_ids.to(self.text_encoder.device), output_hidden_states=True
).hidden_states[-2]
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=True).hidden_states[-2]
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
return prompt_embeds
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment