Commit c0b36010 authored by PengGao's avatar PengGao Committed by GitHub
Browse files

style: remove unused import (#184)

parent f6d4fc85
import math
import torch
import torch.cuda.amp as amp
from loguru import logger
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
......
import math
import torch
from loguru import logger
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
......
......@@ -5,7 +5,7 @@ import torch
from lightx2v.utils.envs import *
from ..transformer_infer import WanTransformerInfer
from ..utils import apply_rotary_emb, compute_freqs, compute_freqs_causvid
from ..utils import apply_rotary_emb, compute_freqs_causvid
class WanTransformerInferCausVid(WanTransformerInfer):
......
import torch
from diffusers.models.embeddings import TimestepEmbedding
from lightx2v.utils.envs import *
......
import torch
import torch.distributed as dist
from loguru import logger
from lightx2v.utils.envs import *
......
import imageio
import numpy as np
from diffusers.utils import export_to_video
from lightx2v.models.input_encoders.hf.t5_v1_1_xxl.model import T5EncoderModel_v1_1_xxl
from lightx2v.models.networks.cogvideox.model import CogvideoxModel
from lightx2v.models.runners.default_runner import DefaultRunner
from lightx2v.models.schedulers.cogvideox.scheduler import CogvideoxXDPMScheduler
from lightx2v.models.video_encoders.hf.cogvideox.model import CogvideoxVAE
from lightx2v.utils.profiler import ProfilingContext
from lightx2v.utils.registry_factory import RUNNER_REGISTER
......
......@@ -3,7 +3,6 @@ import os
import numpy as np
import torch
import torchvision
from PIL import Image
from lightx2v.models.input_encoders.hf.clip.model import TextEncoderHFClipModel
from lightx2v.models.input_encoders.hf.llama.model import TextEncoderHFLlamaModel
......@@ -13,7 +12,6 @@ from lightx2v.models.runners.default_runner import DefaultRunner
from lightx2v.models.schedulers.hunyuan.feature_caching.scheduler import HunyuanSchedulerAdaCaching, HunyuanSchedulerCustomCaching, HunyuanSchedulerTaylorCaching, HunyuanSchedulerTeaCaching
from lightx2v.models.schedulers.hunyuan.scheduler import HunyuanScheduler
from lightx2v.models.video_encoders.hf.autoencoder_kl_causal_3d.model import VideoEncoderKLCausal3DModel
from lightx2v.utils.profiler import ProfilingContext
from lightx2v.utils.registry_factory import RUNNER_REGISTER
from lightx2v.utils.utils import save_videos_grid
......
import gc
import os
import numpy as np
import torch
import torch.distributed as dist
import torchvision.transforms.functional as TF
from PIL import Image
from loguru import logger
from lightx2v.models.input_encoders.hf.t5.model import T5EncoderModel
from lightx2v.models.input_encoders.hf.xlm_roberta.model import CLIPModel
from lightx2v.models.networks.wan.causvid_model import WanCausVidModel
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
from lightx2v.models.networks.wan.model import WanModel
from lightx2v.models.runners.default_runner import DefaultRunner
from lightx2v.models.runners.wan.wan_runner import WanRunner
from lightx2v.models.schedulers.wan.scheduler import WanScheduler
from lightx2v.models.schedulers.wan.step_distill.scheduler import WanStepDistillScheduler
from lightx2v.models.video_encoders.hf.wan.vae import WanVAE
from lightx2v.utils.profiler import ProfilingContext, ProfilingContext4Debug
from lightx2v.utils.profiler import ProfilingContext4Debug
from lightx2v.utils.registry_factory import RUNNER_REGISTER
......
import os
import numpy as np
import torch
import torchvision.transforms.functional as TF
from PIL import Image
from loguru import logger
from lightx2v.models.input_encoders.hf.t5.model import T5EncoderModel
from lightx2v.models.input_encoders.hf.xlm_roberta.model import CLIPModel
from lightx2v.models.networks.wan.distill_model import WanDistillModel
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
from lightx2v.models.networks.wan.model import WanModel
from lightx2v.models.runners.wan.wan_runner import WanRunner
from lightx2v.models.schedulers.wan.step_distill.scheduler import WanStepDistillScheduler
from lightx2v.models.video_encoders.hf.wan.vae import WanVAE
from lightx2v.models.video_encoders.hf.wan.vae_tiny import WanVAE_tiny
from lightx2v.utils.profiler import ProfilingContext
from lightx2v.utils.registry_factory import RUNNER_REGISTER
from lightx2v.utils.utils import cache_video
@RUNNER_REGISTER("wan2.1_distill")
......
......@@ -24,7 +24,6 @@ from lightx2v.models.schedulers.wan.scheduler import WanScheduler
from lightx2v.models.video_encoders.hf.wan.vae import WanVAE
from lightx2v.models.video_encoders.hf.wan.vae_2_2 import Wan2_2_VAE
from lightx2v.models.video_encoders.hf.wan.vae_tiny import WanVAE_tiny
from lightx2v.utils.profiler import ProfilingContext
from lightx2v.utils.registry_factory import RUNNER_REGISTER
from lightx2v.utils.utils import *
from lightx2v.utils.utils import best_output_size, cache_video
......
......@@ -3,7 +3,6 @@ import os
import numpy as np
import torch
import torch.distributed as dist
import torchvision.transforms.functional as TF
from PIL import Image
from loguru import logger
......
import torch
from ..scheduler import HunyuanScheduler
......
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
......
import gc
import math
import os
from typing import List, Optional, Tuple, Union
import warnings
import numpy as np
import torch
from diffusers import (
FlowMatchEulerDiscreteScheduler as FlowMatchEulerDiscreteSchedulerBase, # pyright: ignore
)
from diffusers.configuration_utils import register_to_config
from loguru import logger
from torch import Tensor
from lightx2v.models.schedulers.scheduler import BaseScheduler
......
import torch
from lightx2v.models.schedulers.wan.scheduler import WanScheduler
class WanScheduler4ChangingResolutionInterface:
def __new__(cls, father_scheduler, config):
......
import gc
import math
from typing import List, Optional, Union
import numpy as np
......
import math
from typing import List, Optional, Tuple, Union
from typing import Union
import numpy as np
import torch
from lightx2v.models.schedulers.wan.scheduler import WanScheduler
......
import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
backwarp_tenGrid = {}
......
......@@ -2,7 +2,6 @@ import os
from typing import List, Optional, Tuple
import torch
from loguru import logger
from torch.nn import functional as F
from lightx2v.utils.profiler import ProfilingContext
......
import gc
import os
from collections import namedtuple
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment