nodes_model_advanced.py 6.45 KB
Newer Older
1
2
3
import folder_paths
import comfy.sd
import comfy.model_sampling
comfyanonymous's avatar
comfyanonymous committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import torch

class LCM(comfy.model_sampling.EPS):
    def calculate_denoised(self, sigma, model_output, model_input):
        timestep = self.timestep(sigma).view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
        sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
        x0 = model_input - model_output * sigma

        sigma_data = 0.5
        scaled_timestep = timestep * 10.0 #timestep_scaling

        c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2)
        c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5

        return c_out * x0 + c_skip * model_input

20
class ModelSamplingDiscreteDistilled(comfy.model_sampling.ModelSamplingDiscrete):
21
22
    original_timesteps = 50

23
24
    def __init__(self, model_config=None):
        super().__init__(model_config)
comfyanonymous's avatar
comfyanonymous committed
25

26
        self.skip_steps = self.num_timesteps // self.original_timesteps
comfyanonymous's avatar
comfyanonymous committed
27

28
        sigmas_valid = torch.zeros((self.original_timesteps), dtype=torch.float32)
29
        for x in range(self.original_timesteps):
30
            sigmas_valid[self.original_timesteps - 1 - x] = self.sigmas[self.num_timesteps - 1 - x * self.skip_steps]
comfyanonymous's avatar
comfyanonymous committed
31

32
        self.set_sigmas(sigmas_valid)
comfyanonymous's avatar
comfyanonymous committed
33
34
35
36

    def timestep(self, sigma):
        log_sigma = sigma.log()
        dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
37
        return (dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1)).to(sigma.device)
comfyanonymous's avatar
comfyanonymous committed
38
39

    def sigma(self, timestep):
40
        t = torch.clamp(((timestep.float().to(self.log_sigmas.device) - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1))
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        low_idx = t.floor().long()
        high_idx = t.ceil().long()
        w = t.frac()
        log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
45
        return log_sigma.exp().to(timestep.device)
comfyanonymous's avatar
comfyanonymous committed
46

47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70

def rescale_zero_terminal_snr_sigmas(sigmas):
    alphas_cumprod = 1 / ((sigmas * sigmas) + 1)
    alphas_bar_sqrt = alphas_cumprod.sqrt()

    # Store old values.
    alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
    alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()

    # Shift so the last timestep is zero.
    alphas_bar_sqrt -= (alphas_bar_sqrt_T)

    # Scale so the first timestep is back to the old value.
    alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)

    # Convert alphas_bar_sqrt to betas
    alphas_bar = alphas_bar_sqrt**2  # Revert sqrt
    alphas_bar[-1] = 4.8973451890853435e-08
    return ((1 - alphas_bar) / alphas_bar) ** 0.5

class ModelSamplingDiscrete:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
comfyanonymous's avatar
comfyanonymous committed
71
                              "sampling": (["eps", "v_prediction", "lcm"],),
72
73
74
75
76
77
78
79
80
81
82
                              "zsnr": ("BOOLEAN", {"default": False}),
                              }}

    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "advanced/model"

    def patch(self, model, sampling, zsnr):
        m = model.clone()

comfyanonymous's avatar
comfyanonymous committed
83
        sampling_base = comfy.model_sampling.ModelSamplingDiscrete
84
85
86
87
        if sampling == "eps":
            sampling_type = comfy.model_sampling.EPS
        elif sampling == "v_prediction":
            sampling_type = comfy.model_sampling.V_PREDICTION
comfyanonymous's avatar
comfyanonymous committed
88
89
        elif sampling == "lcm":
            sampling_type = LCM
90
            sampling_base = ModelSamplingDiscreteDistilled
91

comfyanonymous's avatar
comfyanonymous committed
92
        class ModelSamplingAdvanced(sampling_base, sampling_type):
93
94
95
96
97
            pass

        model_sampling = ModelSamplingAdvanced()
        if zsnr:
            model_sampling.set_sigmas(rescale_zero_terminal_snr_sigmas(model_sampling.sigmas))
comfyanonymous's avatar
comfyanonymous committed
98

99
100
101
        m.add_object_patch("model_sampling", model_sampling)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
class ModelSamplingContinuousEDM:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "sampling": (["v_prediction", "eps"],),
                              "sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
                              "sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
                              }}

    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "advanced/model"

    def patch(self, model, sampling, sigma_max, sigma_min):
        m = model.clone()

        if sampling == "eps":
            sampling_type = comfy.model_sampling.EPS
        elif sampling == "v_prediction":
            sampling_type = comfy.model_sampling.V_PREDICTION

        class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousEDM, sampling_type):
            pass

127
        model_sampling = ModelSamplingAdvanced(model.model.model_config)
comfyanonymous's avatar
comfyanonymous committed
128
129
130
131
        model_sampling.set_sigma_range(sigma_min, sigma_max)
        m.add_object_patch("model_sampling", model_sampling)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
class RescaleCFG:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "multiplier": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "advanced/model"

    def patch(self, model, multiplier):
        def rescale_cfg(args):
            cond = args["cond"]
            uncond = args["uncond"]
            cond_scale = args["cond_scale"]
            sigma = args["sigma"]
149
            sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1))
comfyanonymous's avatar
comfyanonymous committed
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
            x_orig = args["input"]

            #rescale cfg has to be done on v-pred model output
            x = x_orig / (sigma * sigma + 1.0)
            cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
            uncond = ((x - (x_orig - uncond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)

            #rescalecfg
            x_cfg = uncond + cond_scale * (cond - uncond)
            ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True)
            ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True)

            x_rescaled = x_cfg * (ro_pos / ro_cfg)
            x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg

            return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5)

        m = model.clone()
        m.set_model_sampler_cfg_function(rescale_cfg)
        return (m, )

171
172
NODE_CLASS_MAPPINGS = {
    "ModelSamplingDiscrete": ModelSamplingDiscrete,
comfyanonymous's avatar
comfyanonymous committed
173
    "ModelSamplingContinuousEDM": ModelSamplingContinuousEDM,
comfyanonymous's avatar
comfyanonymous committed
174
    "RescaleCFG": RescaleCFG,
175
}