Commit 75c03057 authored by helloyongyang's avatar helloyongyang
Browse files

删除t2v和i2v的区分,统一接口

parent efb4d161
...@@ -5,8 +5,8 @@ import argparse ...@@ -5,8 +5,8 @@ import argparse
import torch import torch
from loguru import logger from loguru import logger
from lightx2v.text2v.models.video_encoders.hf.autoencoder_kl_causal_3d.autoencoder_kl_causal_3d import AutoencoderKLCausal3D from lightx2v.models.video_encoders.hf.autoencoder_kl_causal_3d.autoencoder_kl_causal_3d import AutoencoderKLCausal3D
from lightx2v.text2v.models.video_encoders.trt.autoencoder_kl_causal_3d.trt_vae_infer import HyVaeTrtModelInfer from lightx2v.models.video_encoders.trt.autoencoder_kl_causal_3d.trt_vae_infer import HyVaeTrtModelInfer
def parse_args(): def parse_args():
......
...@@ -10,25 +10,25 @@ import torchvision.transforms.functional as TF ...@@ -10,25 +10,25 @@ import torchvision.transforms.functional as TF
import numpy as np import numpy as np
from contextlib import contextmanager from contextlib import contextmanager
from PIL import Image from PIL import Image
from lightx2v.text2v.models.text_encoders.hf.llama.model import TextEncoderHFLlamaModel from lightx2v.models.input_encoders.hf.llama.model import TextEncoderHFLlamaModel
from lightx2v.text2v.models.text_encoders.hf.clip.model import TextEncoderHFClipModel from lightx2v.models.input_encoders.hf.clip.model import TextEncoderHFClipModel
from lightx2v.text2v.models.text_encoders.hf.t5.model import T5EncoderModel from lightx2v.models.input_encoders.hf.t5.model import T5EncoderModel
from lightx2v.text2v.models.text_encoders.hf.llava.model import TextEncoderHFLlavaModel from lightx2v.models.input_encoders.hf.llava.model import TextEncoderHFLlavaModel
from lightx2v.models.input_encoders.hf.xlm_roberta.model import CLIPModel
from lightx2v.text2v.models.schedulers.hunyuan.scheduler import HunyuanScheduler
from lightx2v.text2v.models.schedulers.hunyuan.feature_caching.scheduler import HunyuanSchedulerTaylorCaching, HunyuanSchedulerTeaCaching from lightx2v.models.schedulers.hunyuan.scheduler import HunyuanScheduler
from lightx2v.text2v.models.schedulers.wan.scheduler import WanScheduler from lightx2v.models.schedulers.hunyuan.feature_caching.scheduler import HunyuanSchedulerTaylorCaching, HunyuanSchedulerTeaCaching
from lightx2v.text2v.models.schedulers.wan.feature_caching.scheduler import WanSchedulerTeaCaching from lightx2v.models.schedulers.wan.scheduler import WanScheduler
from lightx2v.models.schedulers.wan.feature_caching.scheduler import WanSchedulerTeaCaching
from lightx2v.text2v.models.networks.hunyuan.model import HunyuanModel
from lightx2v.text2v.models.networks.wan.model import WanModel from lightx2v.models.networks.hunyuan.model import HunyuanModel
from lightx2v.text2v.models.networks.wan.lora_adapter import WanLoraWrapper from lightx2v.models.networks.wan.model import WanModel
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
from lightx2v.text2v.models.video_encoders.hf.autoencoder_kl_causal_3d.model import VideoEncoderKLCausal3DModel
from lightx2v.text2v.models.video_encoders.hf.wan.vae import WanVAE from lightx2v.models.video_encoders.hf.autoencoder_kl_causal_3d.model import VideoEncoderKLCausal3DModel
from lightx2v.models.video_encoders.hf.wan.vae import WanVAE
from lightx2v.utils.utils import save_videos_grid, seed_all, cache_video from lightx2v.utils.utils import save_videos_grid, seed_all, cache_video
from lightx2v.common.ops import * from lightx2v.common.ops import *
from lightx2v.image2v.models.wan.model import CLIPModel
from lightx2v.utils.set_config import set_config from lightx2v.utils.set_config import set_config
......
...@@ -9,7 +9,7 @@ import torch.nn.functional as F ...@@ -9,7 +9,7 @@ import torch.nn.functional as F
import torchvision.transforms as T import torchvision.transforms as T
from lightx2v.attentions import attention from lightx2v.attentions import attention
from lightx2v.text2v.models.text_encoders.hf.t5.tokenizer import HuggingfaceTokenizer from lightx2v.models.input_encoders.hf.t5.tokenizer import HuggingfaceTokenizer
from .xlm_roberta import XLMRoberta from .xlm_roberta import XLMRoberta
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment