wan_distill_runner.py 10 KB
Newer Older
1
2
import os

PengGao's avatar
PengGao committed
3
4
from loguru import logger

5
from lightx2v.models.networks.wan.distill_model import WanDistillModel
6
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
helloyongyang's avatar
helloyongyang committed
7
from lightx2v.models.networks.wan.model import WanModel
8
9
from lightx2v.models.runners.wan.wan_runner import MultiModelStruct, WanRunner
from lightx2v.models.schedulers.wan.step_distill.scheduler import Wan22StepDistillScheduler, WanStepDistillScheduler
10
from lightx2v.utils.profiler import *
PengGao's avatar
PengGao committed
11
from lightx2v.utils.registry_factory import RUNNER_REGISTER
12
13
14
15
16
17
18


@RUNNER_REGISTER("wan2.1_distill")
class WanDistillRunner(WanRunner):
    def __init__(self, config):
        super().__init__(config)

Zhuguanyu Wu's avatar
Zhuguanyu Wu committed
19
    def load_transformer(self):
20
        if self.config.get("lora_configs") and self.config["lora_configs"]:
GoatWu's avatar
GoatWu committed
21
            model = WanModel(
22
                self.config["model_path"],
GoatWu's avatar
GoatWu committed
23
24
25
                self.config,
                self.init_device,
            )
26
            lora_wrapper = WanLoraWrapper(model)
27
            for lora_config in self.config["lora_configs"]:
28
29
                lora_path = lora_config["path"]
                strength = lora_config.get("strength", 1.0)
GoatWu's avatar
GoatWu committed
30
                lora_name = lora_wrapper.load_lora(lora_path)
31
32
                lora_wrapper.apply_lora(lora_name, strength)
                logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
GoatWu's avatar
GoatWu committed
33
        else:
34
            model = WanDistillModel(self.config["model_path"], self.config, self.init_device)
35
36
37
        return model

    def init_scheduler(self):
38
        if self.config["feature_caching"] == "NoCaching":
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
39
            self.scheduler = WanStepDistillScheduler(self.config)
40
        else:
41
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
42
43
44
45
46
47
48
49
50
51
52


class MultiDistillModelStruct(MultiModelStruct):
    def __init__(self, model_list, config, boundary_step_index=2):
        self.model = model_list  # [high_noise_model, low_noise_model]
        assert len(self.model) == 2, "MultiModelStruct only supports 2 models now."
        self.config = config
        self.boundary_step_index = boundary_step_index
        self.cur_model_index = -1
        logger.info(f"boundary step index: {self.boundary_step_index}")

53
    @ProfilingContext4DebugL2("Swtich models in infer_main costs")
54
55
56
    def get_current_model_index(self):
        if self.scheduler.step_index < self.boundary_step_index:
            logger.info(f"using - HIGH - noise model at step_index {self.scheduler.step_index + 1}")
57
            #  self.scheduler.sample_guide_scale = self.config["sample_guide_scale"][0]
gushiqiao's avatar
gushiqiao committed
58
59
60
61
62
63
            if self.config.get("cpu_offload", False) and self.config.get("offload_granularity", "block") == "model":
                if self.cur_model_index == -1:
                    self.to_cuda(model_index=0)
                elif self.cur_model_index == 1:  # 1 -> 0
                    self.offload_cpu(model_index=1)
                    self.to_cuda(model_index=0)
64
65
66
            self.cur_model_index = 0
        else:
            logger.info(f"using - LOW - noise model at step_index {self.scheduler.step_index + 1}")
67
            # self.scheduler.sample_guide_scale = self.config["sample_guide_scale"][1]
gushiqiao's avatar
gushiqiao committed
68
69
70
71
72
73
            if self.config.get("cpu_offload", False) and self.config.get("offload_granularity", "block") == "model":
                if self.cur_model_index == -1:
                    self.to_cuda(model_index=1)
                elif self.cur_model_index == 0:  # 0 -> 1
                    self.offload_cpu(model_index=0)
                    self.to_cuda(model_index=1)
74
75
            self.cur_model_index = 1

76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    def infer(self, inputs):
        self.get_current_model_index()
        if not self.config.get("lazy_load", False) and not self.config.get("unload_modules", False):
            self.model[self.cur_model_index].infer(inputs)
        else:
            if self.model[self.cur_model_index] is not None:
                self.model[self.cur_model_index].infer(inputs)
            else:
                if self.cur_model_index == 0:
                    high_noise_model = WanDistillModel(
                        self.high_noise_model_path,
                        self.config,
                        self.init_device,
                        model_type="wan2.2_moe_high_noise",
                    )
                    high_noise_model.set_scheduler(self.scheduler)
                    self.model[0] = high_noise_model
                    self.model[0].infer(inputs)
                elif self.cur_model_index == 1:
                    low_noise_model = WanDistillModel(
                        self.low_noise_model_path,
                        self.config,
                        self.init_device,
                        model_type="wan2.2_moe_low_noise",
                    )
                    low_noise_model.set_scheduler(self.scheduler)
                    self.model[1] = low_noise_model
                    self.model[1].infer(inputs)

105
106
107
108
109

@RUNNER_REGISTER("wan2.2_moe_distill")
class Wan22MoeDistillRunner(WanDistillRunner):
    def __init__(self, config):
        super().__init__(config)
110
111
112
113
        if self.config.get("dit_quantized", False) and self.config.get("high_noise_quantized_ckpt", None):
            self.high_noise_model_path = self.config["high_noise_quantized_ckpt"]
        elif self.config.get("high_noise_original_ckpt", None):
            self.high_noise_model_path = self.config["high_noise_original_ckpt"]
114
115
116
117
118
119
        else:
            self.high_noise_model_path = os.path.join(self.config["model_path"], "high_noise_model")
            if not os.path.isdir(self.high_noise_model_path):
                self.high_noise_model_path = os.path.join(self.config["model_path"], "distill_models", "high_noise_model")
                if not os.path.isdir(self.high_noise_model_path):
                    raise FileNotFoundError(f"High Noise Model does not find")
120
121
122
123
124

        if self.config.get("dit_quantized", False) and self.config.get("low_noise_quantized_ckpt", None):
            self.low_noise_model_path = self.config["low_noise_quantized_ckpt"]
        elif not self.config.get("dit_quantized", False) and self.config.get("low_noise_original_ckpt", None):
            self.low_noise_model_path = self.config["low_noise_original_ckpt"]
125
126
127
128
129
130
        else:
            self.low_noise_model_path = os.path.join(self.config["model_path"], "low_noise_model")
            if not os.path.isdir(self.low_noise_model_path):
                self.low_noise_model_path = os.path.join(self.config["model_path"], "distill_models", "low_noise_model")
                if not os.path.isdir(self.high_noise_model_path):
                    raise FileNotFoundError(f"Low Noise Model does not find")
131
132

    def load_transformer(self):
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
        if not self.config.get("lazy_load", False) and not self.config.get("unload_modules", False):
            use_high_lora, use_low_lora = False, False
            if self.config.get("lora_configs") and self.config["lora_configs"]:
                for lora_config in self.config["lora_configs"]:
                    if lora_config.get("name", "") == "high_noise_model":
                        use_high_lora = True
                    elif lora_config.get("name", "") == "low_noise_model":
                        use_low_lora = True

            if use_high_lora:
                high_noise_model = WanModel(
                    self.high_noise_model_path,
                    self.config,
                    self.init_device,
                    model_type="wan2.2_moe_high_noise",
                )
                high_lora_wrapper = WanLoraWrapper(high_noise_model)
                for lora_config in self.config["lora_configs"]:
                    if lora_config.get("name", "") == "high_noise_model":
                        lora_path = lora_config["path"]
                        strength = lora_config.get("strength", 1.0)
                        lora_name = high_lora_wrapper.load_lora(lora_path)
                        high_lora_wrapper.apply_lora(lora_name, strength)
                        logger.info(f"High noise model loaded LoRA: {lora_name} with strength: {strength}")
            else:
                high_noise_model = WanDistillModel(
                    self.high_noise_model_path,
                    self.config,
                    self.init_device,
                    model_type="wan2.2_moe_high_noise",
                )

            if use_low_lora:
                low_noise_model = WanModel(
                    self.low_noise_model_path,
                    self.config,
                    self.init_device,
                    model_type="wan2.2_moe_low_noise",
                )
                low_lora_wrapper = WanLoraWrapper(low_noise_model)
                for lora_config in self.config["lora_configs"]:
                    if lora_config.get("name", "") == "low_noise_model":
                        lora_path = lora_config["path"]
                        strength = lora_config.get("strength", 1.0)
                        lora_name = low_lora_wrapper.load_lora(lora_path)
                        low_lora_wrapper.apply_lora(lora_name, strength)
                        logger.info(f"Low noise model loaded LoRA: {lora_name} with strength: {strength}")
            else:
                low_noise_model = WanDistillModel(
                    self.low_noise_model_path,
                    self.config,
                    self.init_device,
                    model_type="wan2.2_moe_low_noise",
                )

            return MultiDistillModelStruct([high_noise_model, low_noise_model], self.config, self.config["boundary_step_index"])
189
        else:
190
191
192
193
194
            model_struct = MultiDistillModelStruct([None, None], self.config, self.config["boundary_step_index"])
            model_struct.low_noise_model_path = self.low_noise_model_path
            model_struct.high_noise_model_path = self.high_noise_model_path
            model_struct.init_device = self.init_device
            return model_struct
195
196

    def init_scheduler(self):
197
        if self.config["feature_caching"] == "NoCaching":
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
198
            self.scheduler = Wan22StepDistillScheduler(self.config)
199
        else:
200
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")