Unverified Commit c0f5346a authored by co63oc's avatar co63oc Committed by GitHub
Browse files

Fix procecss process (#6591)



* Fix words

* Fix

---------
Co-authored-by: default avatarYiYi Xu <yixu310@gmail.com>
parent 087daee2
...@@ -321,7 +321,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin ...@@ -321,7 +321,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -403,7 +403,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin ...@@ -403,7 +403,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -356,7 +356,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion ...@@ -356,7 +356,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -438,7 +438,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion ...@@ -438,7 +438,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -498,7 +498,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM ...@@ -498,7 +498,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -580,7 +580,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM ...@@ -580,7 +580,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -295,7 +295,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline): ...@@ -295,7 +295,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline):
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -377,7 +377,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline): ...@@ -377,7 +377,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline):
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -320,7 +320,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline): ...@@ -320,7 +320,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline):
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -402,7 +402,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline): ...@@ -402,7 +402,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline):
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -238,7 +238,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade ...@@ -238,7 +238,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -320,7 +320,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade ...@@ -320,7 +320,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -325,7 +325,7 @@ class StableDiffusionXLKDiffusionPipeline( ...@@ -325,7 +325,7 @@ class StableDiffusionXLKDiffusionPipeline(
prompt_2 = prompt_2 or prompt prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
prompt_embeds_list = [] prompt_embeds_list = []
prompts = [prompt, prompt_2] prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
......
...@@ -292,7 +292,7 @@ class StableDiffusionLDM3DPipeline( ...@@ -292,7 +292,7 @@ class StableDiffusionLDM3DPipeline(
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -374,7 +374,7 @@ class StableDiffusionLDM3DPipeline( ...@@ -374,7 +374,7 @@ class StableDiffusionLDM3DPipeline(
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -250,7 +250,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM ...@@ -250,7 +250,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -332,7 +332,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM ...@@ -332,7 +332,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -271,7 +271,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin, ...@@ -271,7 +271,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin,
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -353,7 +353,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin, ...@@ -353,7 +353,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin,
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -385,7 +385,7 @@ class StableDiffusionXLPipeline( ...@@ -385,7 +385,7 @@ class StableDiffusionXLPipeline(
prompt_2 = prompt_2 or prompt prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
prompt_embeds_list = [] prompt_embeds_list = []
prompts = [prompt, prompt_2] prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
......
...@@ -407,7 +407,7 @@ class StableDiffusionXLImg2ImgPipeline( ...@@ -407,7 +407,7 @@ class StableDiffusionXLImg2ImgPipeline(
prompt_2 = prompt_2 or prompt prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
prompt_embeds_list = [] prompt_embeds_list = []
prompts = [prompt, prompt_2] prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
......
...@@ -618,7 +618,7 @@ class StableDiffusionXLInpaintPipeline( ...@@ -618,7 +618,7 @@ class StableDiffusionXLInpaintPipeline(
prompt_2 = prompt_2 or prompt prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
prompt_embeds_list = [] prompt_embeds_list = []
prompts = [prompt, prompt_2] prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
......
...@@ -326,7 +326,7 @@ class StableDiffusionXLInstructPix2PixPipeline( ...@@ -326,7 +326,7 @@ class StableDiffusionXLInstructPix2PixPipeline(
if prompt_embeds is None: if prompt_embeds is None:
prompt_2 = prompt_2 or prompt prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
prompt_embeds_list = [] prompt_embeds_list = []
prompts = [prompt, prompt_2] prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
......
...@@ -358,7 +358,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline): ...@@ -358,7 +358,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -440,7 +440,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline): ...@@ -440,7 +440,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -399,7 +399,7 @@ class StableDiffusionXLAdapterPipeline( ...@@ -399,7 +399,7 @@ class StableDiffusionXLAdapterPipeline(
prompt_2 = prompt_2 or prompt prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
prompt_embeds_list = [] prompt_embeds_list = []
prompts = [prompt, prompt_2] prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
......
...@@ -256,7 +256,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora ...@@ -256,7 +256,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -338,7 +338,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora ...@@ -338,7 +338,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -332,7 +332,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor ...@@ -332,7 +332,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -414,7 +414,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor ...@@ -414,7 +414,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -838,7 +838,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo ...@@ -838,7 +838,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
batch_size = prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
if prompt_embeds is None: if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
...@@ -920,7 +920,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo ...@@ -920,7 +920,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
else: else:
uncond_tokens = negative_prompt uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin): if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
......
...@@ -685,7 +685,7 @@ class TextToVideoZeroSDXLPipeline( ...@@ -685,7 +685,7 @@ class TextToVideoZeroSDXLPipeline(
prompt_2 = prompt_2 or prompt prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary # textual inversion: process multi-vector tokens if necessary
prompt_embeds_list = [] prompt_embeds_list = []
prompts = [prompt, prompt_2] prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment