import os from typing import List, Tuple, Callable, Optional, Union, Dict import torch import torch.distributed from diffusers import LattePipeline from diffusers.pipelines.latte.pipeline_latte import ( LattePipelineOutput, retrieve_timesteps, ) from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.utils import deprecate from xfuser.config import EngineConfig from xfuser.core.distributed import ( get_data_parallel_world_size, get_classifier_free_guidance_world_size, get_pipeline_parallel_world_size, get_data_parallel_rank, get_runtime_state, is_pipeline_first_stage, ) from xfuser.core.distributed import ( get_data_parallel_world_size, get_sequence_parallel_world_size, get_pipeline_parallel_world_size, get_classifier_free_guidance_world_size, get_classifier_free_guidance_rank, get_pipeline_parallel_rank, get_pp_group, get_world_group, get_cfg_group, get_sp_group, get_runtime_state, initialize_runtime_state, is_dp_last_group, ) from xfuser.model_executor.pipelines import xFuserPipelineBaseWrapper from .register import xFuserPipelineWrapperRegister @xFuserPipelineWrapperRegister.register(LattePipeline) class xFuserLattePipeline(xFuserPipelineBaseWrapper): @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], engine_config: EngineConfig, **kwargs, ): pipeline = LattePipeline.from_pretrained( pretrained_model_name_or_path, **kwargs ) return cls(pipeline, engine_config) @torch.no_grad() @xFuserPipelineBaseWrapper.enable_data_parallel @xFuserPipelineBaseWrapper.check_to_use_naive_forward def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: str = "", num_inference_steps: int = 50, timesteps: Optional[List[int]] = None, guidance_scale: float = 7.5, num_images_per_prompt: int = 1, video_length: int = 16, height: int = 512, width: int = 512, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: str = "pil", return_dict: bool = True, callback_on_step_end: Optional[ Union[ Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks, ] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], clean_caption: bool = True, mask_feature: bool = True, enable_temporal_attentions: bool = True, decode_chunk_size: Optional[int] = None, num_pipeline_warmup_steps: Optional[int] = 3, **kwargs, ) -> Union[LattePipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality video at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`, usually at the expense of lower video quality. video_length (`int`, *optional*, defaults to 16): The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds num_images_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated video. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated video. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For Latte this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate video. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A callback function or a list of callback functions to be called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List[str]`, *optional*): A list of tensor inputs that should be passed to the callback function. If not defined, all tensor inputs will be passed. clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. mask_feature (`bool` defaults to `True`): If set to `True`, the text embeddings will be masked. enable_temporal_attentions (`bool`, *optional*, defaults to `True`): Whether to enable temporal attentions decode_chunk_size (`int`, *optional*): The number of frames to decode at a time. Higher chunk size leads to better temporal consistency at the expense of more memory usage. By default, the decoder decodes all frames at once for maximal quality. For lower memory usage, reduce `decode_chunk_size`. Examples: Returns: [`~pipelines.latte.pipeline_latte.LattePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.latte.pipeline_latte.LattePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. Default num_frames = video_length decode_chunk_size = ( decode_chunk_size if decode_chunk_size is not None else num_frames ) # 1. Check inputs. Raise error if not correct height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor self.check_inputs( prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds, ) self._guidance_scale = guidance_scale self._interrupt = False # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # * set runtime state input parameters get_runtime_state().set_video_input_parameters( height=height, width=width, num_frames=num_frames, batch_size=batch_size, num_inference_steps=num_inference_steps, ) # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption, mask_feature=mask_feature, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps ) self._num_timesteps = len(timesteps) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, num_frames, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = max( len(timesteps) - num_inference_steps * self.scheduler.order, 0 ) with self.progress_bar(total=num_inference_steps) as progress_bar: latents = self._init_video_sync_pipeline(latents) for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = ( torch.cat([latents] * 2) if do_classifier_free_guidance else latents ) latent_model_input = self.scheduler.scale_model_input( latent_model_input, t ) current_timestep = t if not torch.is_tensor(current_timestep): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = latent_model_input.device.type == "mps" if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor( [current_timestep], dtype=dtype, device=latent_model_input.device, ) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to( latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML current_timestep = current_timestep.expand(latent_model_input.shape[0]) noise_pred = self.transformer( latent_model_input, encoder_hidden_states=prompt_embeds, timestep=current_timestep, enable_temporal_attentions=enable_temporal_attentions, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * ( noise_pred_text - noise_pred_uncond ) # use learned sigma? if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred = noise_pred.chunk(2, dim=1)[0] # compute previous video: x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, **extra_step_kwargs, return_dict=False )[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop( "negative_prompt_embeds", negative_prompt_embeds ) if i == len(timesteps) - 1 or ( (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 ): progress_bar.update() if get_sequence_parallel_world_size() > 1: sp_degree = get_sequence_parallel_world_size() sp_latents_list = get_sp_group().all_gather(latents, separate_tensors=True) latents_list = [] for pp_patch_idx in range(get_runtime_state().num_pipeline_patch): latents_list += [ sp_latents_list[sp_patch_idx][ :, :, :, get_runtime_state() .pp_patches_start_idx_local[pp_patch_idx] : get_runtime_state() .pp_patches_start_idx_local[pp_patch_idx + 1], :, ] for sp_patch_idx in range(sp_degree) ] latents = torch.cat(latents_list, dim=-2) if is_dp_last_group(): if not (output_type == "latents" or output_type == "latent"): video = self.decode_latents(latents, num_frames, decode_chunk_size=14) video = self.video_processor.postprocess_video( video=video, output_type=output_type ) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LattePipelineOutput(frames=video) @property def interrupt(self): return self._interrupt