samplers.py 28.3 KB
Newer Older
1
2
from .k_diffusion import sampling as k_diffusion_sampling
from .k_diffusion import external as k_diffusion_external
3
from .extra_samplers import uni_pc
comfyanonymous's avatar
comfyanonymous committed
4
import torch
5
from comfy import model_management
comfyanonymous's avatar
comfyanonymous committed
6
7
from .ldm.models.diffusion.ddim import DDIMSampler
from .ldm.modules.diffusionmodules.util import make_ddim_timesteps
comfyanonymous's avatar
comfyanonymous committed
8
9
10
11
import math

def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
    return abs(a*b) // math.gcd(a, b)
comfyanonymous's avatar
comfyanonymous committed
12

comfyanonymous's avatar
comfyanonymous committed
13
14
#The main sampling function shared by all the samplers
#Returns predicted noise
15
def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}, seed=None):
comfyanonymous's avatar
comfyanonymous committed
16
        def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in):
17
18
19
20
21
22
            area = (x_in.shape[2], x_in.shape[3], 0, 0)
            strength = 1.0
            if 'area' in cond[1]:
                area = cond[1]['area']
            if 'strength' in cond[1]:
                strength = cond[1]['strength']
23

24
            adm_cond = None
25
26
            if 'adm_encoded' in cond[1]:
                adm_cond = cond[1]['adm_encoded']
27

28
            input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
Jacob Segal's avatar
Jacob Segal committed
29
30
31
            if 'mask' in cond[1]:
                # Scale the mask to the size of the input
                # The mask should have been resized as we began the sampling process
32
33
34
                mask_strength = 1.0
                if "mask_strength" in cond[1]:
                    mask_strength = cond[1]["mask_strength"]
Jacob Segal's avatar
Jacob Segal committed
35
36
37
                mask = cond[1]['mask']
                assert(mask.shape[1] == x_in.shape[2])
                assert(mask.shape[2] == x_in.shape[3])
38
                mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength
Jacob Segal's avatar
Jacob Segal committed
39
                mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1)
Jacob Segal's avatar
Jacob Segal committed
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
            else:
                mask = torch.ones_like(input_x)
            mult = mask * strength

            if 'mask' not in cond[1]:
                rr = 8
                if area[2] != 0:
                    for t in range(rr):
                        mult[:,:,t:1+t,:] *= ((1.0/rr) * (t + 1))
                if (area[0] + area[2]) < x_in.shape[2]:
                    for t in range(rr):
                        mult[:,:,area[0] - 1 - t:area[0] - t,:] *= ((1.0/rr) * (t + 1))
                if area[3] != 0:
                    for t in range(rr):
                        mult[:,:,:,t:1+t] *= ((1.0/rr) * (t + 1))
                if (area[1] + area[3]) < x_in.shape[3]:
                    for t in range(rr):
                        mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1))

comfyanonymous's avatar
comfyanonymous committed
59
60
61
62
63
64
65
66
            conditionning = {}
            conditionning['c_crossattn'] = cond[0]
            if cond_concat_in is not None and len(cond_concat_in) > 0:
                cropped = []
                for x in cond_concat_in:
                    cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
                    cropped.append(cr)
                conditionning['c_concat'] = torch.cat(cropped, dim=1)
comfyanonymous's avatar
comfyanonymous committed
67

68
69
70
            if adm_cond is not None:
                conditionning['c_adm'] = adm_cond

comfyanonymous's avatar
comfyanonymous committed
71
72
73
            control = None
            if 'control' in cond[1]:
                control = cond[1]['control']
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88

            patches = None
            if 'gligen' in cond[1]:
                gligen = cond[1]['gligen']
                patches = {}
                gligen_type = gligen[0]
                gligen_model = gligen[1]
                if gligen_type == "position":
                    gligen_patch = gligen_model.set_position(input_x.shape, gligen[2], input_x.device)
                else:
                    gligen_patch = gligen_model.set_empty(input_x.shape, input_x.device)

                patches['middle_patch'] = [gligen_patch]

            return (input_x, mult, conditionning, area, control, patches)
comfyanonymous's avatar
comfyanonymous committed
89
90

        def cond_equal_size(c1, c2):
comfyanonymous's avatar
comfyanonymous committed
91
92
            if c1 is c2:
                return True
comfyanonymous's avatar
comfyanonymous committed
93
94
95
            if c1.keys() != c2.keys():
                return False
            if 'c_crossattn' in c1:
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
101
102
103
104
105
                s1 = c1['c_crossattn'].shape
                s2 = c2['c_crossattn'].shape
                if s1 != s2:
                    if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
                        return False

                    mult_min = lcm(s1[1], s2[1])
                    diff = mult_min // min(s1[1], s2[1])
                    if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
                        return False
comfyanonymous's avatar
comfyanonymous committed
106
107
108
            if 'c_concat' in c1:
                if c1['c_concat'].shape != c2['c_concat'].shape:
                    return False
109
110
111
            if 'c_adm' in c1:
                if c1['c_adm'].shape != c2['c_adm'].shape:
                    return False
comfyanonymous's avatar
comfyanonymous committed
112
113
            return True

comfyanonymous's avatar
comfyanonymous committed
114
115
116
        def can_concat_cond(c1, c2):
            if c1[0].shape != c2[0].shape:
                return False
117
118

            #control
comfyanonymous's avatar
comfyanonymous committed
119
120
121
122
123
124
            if (c1[4] is None) != (c2[4] is None):
                return False
            if c1[4] is not None:
                if c1[4] is not c2[4]:
                    return False

125
126
127
128
129
130
131
            #patches
            if (c1[5] is None) != (c2[5] is None):
                return False
            if (c1[5] is not None):
                if c1[5] is not c2[5]:
                    return False

comfyanonymous's avatar
comfyanonymous committed
132
133
            return cond_equal_size(c1[2], c2[2])

comfyanonymous's avatar
comfyanonymous committed
134
135
136
        def cond_cat(c_list):
            c_crossattn = []
            c_concat = []
137
            c_adm = []
comfyanonymous's avatar
comfyanonymous committed
138
            crossattn_max_len = 0
comfyanonymous's avatar
comfyanonymous committed
139
140
            for x in c_list:
                if 'c_crossattn' in x:
comfyanonymous's avatar
comfyanonymous committed
141
142
143
144
145
146
                    c = x['c_crossattn']
                    if crossattn_max_len == 0:
                        crossattn_max_len = c.shape[1]
                    else:
                        crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
                    c_crossattn.append(c)
comfyanonymous's avatar
comfyanonymous committed
147
148
                if 'c_concat' in x:
                    c_concat.append(x['c_concat'])
149
150
                if 'c_adm' in x:
                    c_adm.append(x['c_adm'])
comfyanonymous's avatar
comfyanonymous committed
151
            out = {}
comfyanonymous's avatar
comfyanonymous committed
152
153
154
155
156
157
158
159
            c_crossattn_out = []
            for c in c_crossattn:
                if c.shape[1] < crossattn_max_len:
                    c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result
                c_crossattn_out.append(c)

            if len(c_crossattn_out) > 0:
                out['c_crossattn'] = [torch.cat(c_crossattn_out)]
comfyanonymous's avatar
comfyanonymous committed
160
161
            if len(c_concat) > 0:
                out['c_concat'] = [torch.cat(c_concat)]
162
163
            if len(c_adm) > 0:
                out['c_adm'] = torch.cat(c_adm)
comfyanonymous's avatar
comfyanonymous committed
164
165
            return out

166
        def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, cond_concat_in, model_options):
comfyanonymous's avatar
comfyanonymous committed
167
168
            out_cond = torch.zeros_like(x_in)
            out_count = torch.ones_like(x_in)/100000.0
169
170
171
172
173
174

            out_uncond = torch.zeros_like(x_in)
            out_uncond_count = torch.ones_like(x_in)/100000.0

            COND = 0
            UNCOND = 1
comfyanonymous's avatar
comfyanonymous committed
175

176
            to_run = []
comfyanonymous's avatar
comfyanonymous committed
177
            for x in cond:
comfyanonymous's avatar
comfyanonymous committed
178
                p = get_area_and_mult(x, x_in, cond_concat_in, timestep)
179
                if p is None:
comfyanonymous's avatar
comfyanonymous committed
180
                    continue
181
182
183

                to_run += [(p, COND)]
            for x in uncond:
comfyanonymous's avatar
comfyanonymous committed
184
                p = get_area_and_mult(x, x_in, cond_concat_in, timestep)
185
186
187
188
189
190
191
192
                if p is None:
                    continue

                to_run += [(p, UNCOND)]

            while len(to_run) > 0:
                first = to_run[0]
                first_shape = first[0][0].shape
193
                to_batch_temp = []
194
                for x in range(len(to_run)):
comfyanonymous's avatar
comfyanonymous committed
195
196
                    if can_concat_cond(to_run[x][0], first[0]):
                        to_batch_temp += [x]
197
198
199
200
201
202
203
204
205

                to_batch_temp.reverse()
                to_batch = to_batch_temp[:1]

                for i in range(1, len(to_batch_temp) + 1):
                    batch_amount = to_batch_temp[:len(to_batch_temp)//i]
                    if (len(batch_amount) * first_shape[0] * first_shape[2] * first_shape[3] < max_total_area):
                        to_batch = batch_amount
                        break
206
207
208
209
210
211

                input_x = []
                mult = []
                c = []
                cond_or_uncond = []
                area = []
comfyanonymous's avatar
comfyanonymous committed
212
                control = None
213
                patches = None
214
215
216
217
218
219
220
221
                for x in to_batch:
                    o = to_run.pop(x)
                    p = o[0]
                    input_x += [p[0]]
                    mult += [p[1]]
                    c += [p[2]]
                    area += [p[3]]
                    cond_or_uncond += [o[1]]
comfyanonymous's avatar
comfyanonymous committed
222
                    control = p[4]
223
                    patches = p[5]
224
225
226

                batch_chunks = len(cond_or_uncond)
                input_x = torch.cat(input_x)
comfyanonymous's avatar
comfyanonymous committed
227
                c = cond_cat(c)
comfyanonymous's avatar
comfyanonymous committed
228
                timestep_ = torch.cat([timestep] * batch_chunks)
229

comfyanonymous's avatar
comfyanonymous committed
230
                if control is not None:
231
                    c['control'] = control.get_control(input_x, timestep_, c, len(cond_or_uncond))
comfyanonymous's avatar
comfyanonymous committed
232

233
                transformer_options = {}
234
                if 'transformer_options' in model_options:
235
236
237
                    transformer_options = model_options['transformer_options'].copy()

                if patches is not None:
238
239
240
241
242
243
244
245
246
                    if "patches" in transformer_options:
                        cur_patches = transformer_options["patches"].copy()
                        for p in patches:
                            if p in cur_patches:
                                cur_patches[p] = cur_patches[p] + patches[p]
                            else:
                                cur_patches[p] = patches[p]
                    else:
                        transformer_options["patches"] = patches
247
248

                c['transformer_options'] = transformer_options
249

comfyanonymous's avatar
comfyanonymous committed
250
                output = model_function(input_x, timestep_, **c).chunk(batch_chunks)
comfyanonymous's avatar
comfyanonymous committed
251
                del input_x
252

253
254
                model_management.throw_exception_if_processing_interrupted()

255
256
257
258
259
260
261
                for o in range(batch_chunks):
                    if cond_or_uncond[o] == COND:
                        out_cond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o]
                        out_count[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += mult[o]
                    else:
                        out_uncond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o]
                        out_uncond_count[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += mult[o]
comfyanonymous's avatar
comfyanonymous committed
262
263
264
265
                del mult

            out_cond /= out_count
            del out_count
266
267
268
269
            out_uncond /= out_uncond_count
            del out_uncond_count

            return out_cond, out_uncond
comfyanonymous's avatar
comfyanonymous committed
270
271


272
        max_total_area = model_management.maximum_batch_area()
273
        cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
274
        if "sampler_cfg_function" in model_options:
275
276
            args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep}
            return model_options["sampler_cfg_function"](args)
277
278
        else:
            return uncond + (cond - uncond) * cond_scale
comfyanonymous's avatar
comfyanonymous committed
279

comfyanonymous's avatar
comfyanonymous committed
280
281
282
283
284
285
286
287
288
289
290
291
292
293

class CompVisVDenoiser(k_diffusion_external.DiscreteVDDPMDenoiser):
    def __init__(self, model, quantize=False, device='cpu'):
        super().__init__(model, model.alphas_cumprod, quantize=quantize)

    def get_v(self, x, t, cond, **kwargs):
        return self.inner_model.apply_model(x, t, cond, **kwargs)


class CFGNoisePredictor(torch.nn.Module):
    def __init__(self, model):
        super().__init__()
        self.inner_model = model
        self.alphas_cumprod = model.alphas_cumprod
294
295
    def apply_model(self, x, timestep, cond, uncond, cond_scale, cond_concat=None, model_options={}, seed=None):
        out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed)
comfyanonymous's avatar
comfyanonymous committed
296
297
298
299
        return out


class KSamplerX0Inpaint(torch.nn.Module):
300
301
302
    def __init__(self, model):
        super().__init__()
        self.inner_model = model
303
    def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, cond_concat=None, model_options={}, seed=None):
304
305
        if denoise_mask is not None:
            latent_mask = 1. - denoise_mask
306
            x = x * denoise_mask + (self.latent_image + self.noise * sigma.reshape([sigma.shape[0]] + [1] * (len(self.noise.shape) - 1))) * latent_mask
307
        out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed)
308
309
310
311
312
313
        if denoise_mask is not None:
            out *= denoise_mask

        if denoise_mask is not None:
            out += self.latent_image * latent_mask
        return out
314

comfyanonymous's avatar
comfyanonymous committed
315
316
317
318
319
320
321
322
def simple_scheduler(model, steps):
    sigs = []
    ss = len(model.sigmas) / steps
    for x in range(steps):
        sigs += [float(model.sigmas[-(1 + int(x * ss))])]
    sigs += [0.0]
    return torch.FloatTensor(sigs)

comfyanonymous's avatar
comfyanonymous committed
323
324
325
326
def ddim_scheduler(model, steps):
    sigs = []
    ddim_timesteps = make_ddim_timesteps(ddim_discr_method="uniform", num_ddim_timesteps=steps, num_ddpm_timesteps=model.inner_model.inner_model.num_timesteps, verbose=False)
    for x in range(len(ddim_timesteps) - 1, -1, -1):
327
328
329
330
        ts = ddim_timesteps[x]
        if ts > 999:
            ts = 999
        sigs.append(model.t_to_sigma(torch.tensor(ts)))
comfyanonymous's avatar
comfyanonymous committed
331
332
333
    sigs += [0.0]
    return torch.FloatTensor(sigs)

comfyanonymous's avatar
comfyanonymous committed
334
335
336
337
338
339
340
341
342
def blank_inpaint_image_like(latent_image):
    blank_image = torch.ones_like(latent_image)
    # these are the values for "zero" in pixel space translated to latent space
    blank_image[:,0] *= 0.8223
    blank_image[:,1] *= -0.6876
    blank_image[:,2] *= 0.6364
    blank_image[:,3] *= 0.1380
    return blank_image

Jacob Segal's avatar
Jacob Segal committed
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
def get_mask_aabb(masks):
    if masks.numel() == 0:
        return torch.zeros((0, 4), device=masks.device, dtype=torch.int)

    b = masks.shape[0]

    bounding_boxes = torch.zeros((b, 4), device=masks.device, dtype=torch.int)
    is_empty = torch.zeros((b), device=masks.device, dtype=torch.bool)
    for i in range(b):
        mask = masks[i]
        if mask.numel() == 0:
            continue
        if torch.max(mask != 0) == False:
            is_empty[i] = True
            continue
        y, x = torch.where(mask)
        bounding_boxes[i, 0] = torch.min(x)
        bounding_boxes[i, 1] = torch.min(y)
        bounding_boxes[i, 2] = torch.max(x)
        bounding_boxes[i, 3] = torch.max(y)

    return bounding_boxes, is_empty

Jacob Segal's avatar
Jacob Segal committed
366
367
368
369
370
371
372
373
374
375
376
def resolve_cond_masks(conditions, h, w, device):
    # We need to decide on an area outside the sampling loop in order to properly generate opposite areas of equal sizes.
    # While we're doing this, we can also resolve the mask device and scaling for performance reasons
    for i in range(len(conditions)):
        c = conditions[i]
        if 'mask' in c[1]:
            mask = c[1]['mask']
            mask = mask.to(device=device)
            modified = c[1].copy()
            if len(mask.shape) == 2:
                mask = mask.unsqueeze(0)
mara's avatar
mara committed
377
            if mask.shape[1] != h or mask.shape[2] != w:
Jacob Segal's avatar
Jacob Segal committed
378
379
                mask = torch.nn.functional.interpolate(mask.unsqueeze(1), size=(h, w), mode='bilinear', align_corners=False).squeeze(1)

Jacob Segal's avatar
Jacob Segal committed
380
            if modified.get("set_area_to_bounds", False):
Jacob Segal's avatar
Jacob Segal committed
381
                bounds = torch.max(torch.abs(mask),dim=0).values.unsqueeze(0)
Jacob Segal's avatar
Jacob Segal committed
382
383
384
385
                boxes, is_empty = get_mask_aabb(bounds)
                if is_empty[0]:
                    # Use the minimum possible size for efficiency reasons. (Since the mask is all-0, this becomes a noop anyway)
                    modified['area'] = (8, 8, 0, 0)
Jacob Segal's avatar
Jacob Segal committed
386
                else:
Jacob Segal's avatar
Jacob Segal committed
387
                    box = boxes[0]
Jacob Segal's avatar
Jacob Segal committed
388
                    H, W, Y, X = (box[3] - box[1] + 1, box[2] - box[0] + 1, box[1], box[0])
389
390
                    H = max(8, H)
                    W = max(8, W)
Jacob Segal's avatar
Jacob Segal committed
391
392
                    area = (int(H), int(W), int(Y), int(X))
                    modified['area'] = area
Jacob Segal's avatar
Jacob Segal committed
393
394
395
396

            modified['mask'] = mask
            conditions[i] = [c[0], modified]

comfyanonymous's avatar
comfyanonymous committed
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_cond_with_same_area_if_none(conds, c):
    if 'area' not in c[1]:
        return

    c_area = c[1]['area']
    smallest = None
    for x in conds:
        if 'area' in x[1]:
            a = x[1]['area']
            if c_area[2] >= a[2] and c_area[3] >= a[3]:
                if a[0] + a[2] >= c_area[0] + c_area[2]:
                    if a[1] + a[3] >= c_area[1] + c_area[3]:
                        if smallest is None:
                            smallest = x
                        elif 'area' not in smallest[1]:
                            smallest = x
                        else:
                            if smallest[1]['area'][0] * smallest[1]['area'][1] > a[0] * a[1]:
                                smallest = x
        else:
            if smallest is None:
                smallest = x
    if smallest is None:
        return
    if 'area' in smallest[1]:
        if smallest[1]['area'] == c_area:
            return
    n = c[1].copy()
    conds += [[smallest[0], n]]
comfyanonymous's avatar
comfyanonymous committed
426

427
def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func):
comfyanonymous's avatar
comfyanonymous committed
428
429
430
431
432
433
434
    cond_cnets = []
    cond_other = []
    uncond_cnets = []
    uncond_other = []
    for t in range(len(conds)):
        x = conds[t]
        if 'area' not in x[1]:
435
436
            if name in x[1] and x[1][name] is not None:
                cond_cnets.append(x[1][name])
comfyanonymous's avatar
comfyanonymous committed
437
438
439
440
441
            else:
                cond_other.append((x, t))
    for t in range(len(uncond)):
        x = uncond[t]
        if 'area' not in x[1]:
442
443
            if name in x[1] and x[1][name] is not None:
                uncond_cnets.append(x[1][name])
comfyanonymous's avatar
comfyanonymous committed
444
445
446
447
448
449
450
451
452
            else:
                uncond_other.append((x, t))

    if len(uncond_cnets) > 0:
        return

    for x in range(len(cond_cnets)):
        temp = uncond_other[x % len(uncond_other)]
        o = temp[0]
453
        if name in o[1] and o[1][name] is not None:
comfyanonymous's avatar
comfyanonymous committed
454
            n = o[1].copy()
455
            n[name] = uncond_fill_func(cond_cnets, x)
comfyanonymous's avatar
comfyanonymous committed
456
457
458
            uncond += [[o[0], n]]
        else:
            n = o[1].copy()
459
            n[name] = uncond_fill_func(cond_cnets, x)
comfyanonymous's avatar
comfyanonymous committed
460
461
            uncond[temp[1]] = [o[0], n]

462
def encode_adm(model, conds, batch_size, width, height, device, prompt_type):
463
464
    for t in range(len(conds)):
        x = conds[t]
comfyanonymous's avatar
comfyanonymous committed
465
        adm_out = None
466
467
        if 'adm' in x[1]:
            adm_out = x[1]["adm"]
468
        else:
469
            params = x[1].copy()
470
471
472
            params["width"] = params.get("width", width * 8)
            params["height"] = params.get("height", height * 8)
            params["prompt_type"] = params.get("prompt_type", prompt_type)
473
            adm_out = model.encode_adm(device=device, **params)
474

comfyanonymous's avatar
comfyanonymous committed
475
476
        if adm_out is not None:
            x[1] = x[1].copy()
477
            x[1]["adm_encoded"] = torch.cat([adm_out] * batch_size).to(device)
478
479
480

    return conds

481

comfyanonymous's avatar
comfyanonymous committed
482
class KSampler:
483
    SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
484
    SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
485
486
                "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
                "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "ddim", "uni_pc", "uni_pc_bh2"]
comfyanonymous's avatar
comfyanonymous committed
487

488
    def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}):
comfyanonymous's avatar
comfyanonymous committed
489
        self.model = model
comfyanonymous's avatar
comfyanonymous committed
490
        self.model_denoise = CFGNoisePredictor(self.model)
comfyanonymous's avatar
comfyanonymous committed
491
        if self.model.parameterization == "v":
comfyanonymous's avatar
comfyanonymous committed
492
            self.model_wrap = CompVisVDenoiser(self.model_denoise, quantize=True)
comfyanonymous's avatar
comfyanonymous committed
493
        else:
comfyanonymous's avatar
comfyanonymous committed
494
495
496
            self.model_wrap = k_diffusion_external.CompVisDenoiser(self.model_denoise, quantize=True)
        self.model_wrap.parameterization = self.model.parameterization
        self.model_k = KSamplerX0Inpaint(self.model_wrap)
comfyanonymous's avatar
comfyanonymous committed
497
498
499
500
501
502
503
        self.device = device
        if scheduler not in self.SCHEDULERS:
            scheduler = self.SCHEDULERS[0]
        if sampler not in self.SAMPLERS:
            sampler = self.SAMPLERS[0]
        self.scheduler = scheduler
        self.sampler = sampler
504
505
        self.sigma_min=float(self.model_wrap.sigma_min)
        self.sigma_max=float(self.model_wrap.sigma_max)
comfyanonymous's avatar
comfyanonymous committed
506
        self.set_steps(steps, denoise)
507
        self.denoise = denoise
508
        self.model_options = model_options
comfyanonymous's avatar
comfyanonymous committed
509

comfyanonymous's avatar
comfyanonymous committed
510
511
512
513
514
515
516
517
518
519
    def calculate_sigmas(self, steps):
        sigmas = None

        discard_penultimate_sigma = False
        if self.sampler in ['dpm_2', 'dpm_2_ancestral']:
            steps += 1
            discard_penultimate_sigma = True

        if self.scheduler == "karras":
            sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max)
520
521
        elif self.scheduler == "exponential":
            sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max)
comfyanonymous's avatar
comfyanonymous committed
522
523
524
525
526
527
528
529
530
531
532
533
534
        elif self.scheduler == "normal":
            sigmas = self.model_wrap.get_sigmas(steps)
        elif self.scheduler == "simple":
            sigmas = simple_scheduler(self.model_wrap, steps)
        elif self.scheduler == "ddim_uniform":
            sigmas = ddim_scheduler(self.model_wrap, steps)
        else:
            print("error invalid scheduler", self.scheduler)

        if discard_penultimate_sigma:
            sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
        return sigmas

comfyanonymous's avatar
comfyanonymous committed
535
536
    def set_steps(self, steps, denoise=None):
        self.steps = steps
537
        if denoise is None or denoise > 0.9999:
comfyanonymous's avatar
comfyanonymous committed
538
            self.sigmas = self.calculate_sigmas(steps).to(self.device)
comfyanonymous's avatar
comfyanonymous committed
539
540
        else:
            new_steps = int(steps/denoise)
comfyanonymous's avatar
comfyanonymous committed
541
            sigmas = self.calculate_sigmas(new_steps).to(self.device)
comfyanonymous's avatar
comfyanonymous committed
542
543
            self.sigmas = sigmas[-(steps + 1):]

544
    def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None):
545
546
        if sigmas is None:
            sigmas = self.sigmas
comfyanonymous's avatar
comfyanonymous committed
547
548
        sigma_min = self.sigma_min

comfyanonymous's avatar
comfyanonymous committed
549
        if last_step is not None and last_step < (len(sigmas) - 1):
comfyanonymous's avatar
comfyanonymous committed
550
551
            sigma_min = sigmas[last_step]
            sigmas = sigmas[:last_step + 1]
comfyanonymous's avatar
comfyanonymous committed
552
553
554
            if force_full_denoise:
                sigmas[-1] = 0

comfyanonymous's avatar
comfyanonymous committed
555
        if start_step is not None:
comfyanonymous's avatar
comfyanonymous committed
556
557
558
559
560
561
562
            if start_step < (len(sigmas) - 1):
                sigmas = sigmas[start_step:]
            else:
                if latent_image is not None:
                    return latent_image
                else:
                    return torch.zeros_like(noise)
comfyanonymous's avatar
comfyanonymous committed
563

comfyanonymous's avatar
comfyanonymous committed
564
565
        positive = positive[:]
        negative = negative[:]
Jacob Segal's avatar
Jacob Segal committed
566
567
568
569

        resolve_cond_masks(positive, noise.shape[2], noise.shape[3], self.device)
        resolve_cond_masks(negative, noise.shape[2], noise.shape[3], self.device)

comfyanonymous's avatar
comfyanonymous committed
570
571
572
573
574
575
        #make sure each cond area has an opposite one with the same area
        for c in positive:
            create_cond_with_same_area_if_none(negative, c)
        for c in negative:
            create_cond_with_same_area_if_none(positive, c)

576
577
        apply_empty_x_to_equal_area(positive, negative, 'control', lambda cond_cnets, x: cond_cnets[x])
        apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x])
comfyanonymous's avatar
comfyanonymous committed
578

comfyanonymous's avatar
comfyanonymous committed
579
        if self.model.is_adm():
580
581
            positive = encode_adm(self.model, positive, noise.shape[0], noise.shape[3], noise.shape[2], self.device, "positive")
            negative = encode_adm(self.model, negative, noise.shape[0], noise.shape[3], noise.shape[2], self.device, "negative")
582

583
584
585
        if latent_image is not None:
            latent_image = self.model.process_latent_in(latent_image)

586
        extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": self.model_options, "seed":seed}
comfyanonymous's avatar
comfyanonymous committed
587

comfyanonymous's avatar
comfyanonymous committed
588
        cond_concat = None
589
        if hasattr(self.model, 'concat_keys'): #inpaint
comfyanonymous's avatar
comfyanonymous committed
590
591
592
593
594
595
            cond_concat = []
            for ck in self.model.concat_keys:
                if denoise_mask is not None:
                    if ck == "mask":
                        cond_concat.append(denoise_mask[:,:1])
                    elif ck == "masked_image":
596
                        cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space
comfyanonymous's avatar
comfyanonymous committed
597
598
599
600
601
602
603
                else:
                    if ck == "mask":
                        cond_concat.append(torch.ones_like(noise)[:,:1])
                    elif ck == "masked_image":
                        cond_concat.append(blank_inpaint_image_like(noise))
            extra_args["cond_concat"] = cond_concat

604
605
606
607
608
        if sigmas[0] != self.sigmas[0] or (self.denoise is not None and self.denoise < 1.0):
            max_denoise = False
        else:
            max_denoise = True

609

610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
        if self.sampler == "uni_pc":
            samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar)
        elif self.sampler == "uni_pc_bh2":
            samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar)
        elif self.sampler == "ddim":
            timesteps = []
            for s in range(sigmas.shape[0]):
                timesteps.insert(0, self.model_wrap.sigma_to_t(sigmas[s]))
            noise_mask = None
            if denoise_mask is not None:
                noise_mask = 1.0 - denoise_mask

            ddim_callback = None
            if callback is not None:
                total_steps = len(timesteps) - 1
                ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None, total_steps)

            sampler = DDIMSampler(self.model, device=self.device)
            sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False)
            z_enc = sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(self.device), noise=noise, max_denoise=max_denoise)
            samples, _ = sampler.sample_custom(ddim_timesteps=timesteps,
                                                    conditioning=positive,
                                                    batch_size=noise.shape[0],
                                                    shape=noise.shape[1:],
                                                    verbose=False,
                                                    unconditional_guidance_scale=cfg,
                                                    unconditional_conditioning=negative,
                                                    eta=0.0,
                                                    x_T=z_enc,
                                                    x0=latent_image,
                                                    img_callback=ddim_callback,
                                                    denoise_function=sampling_function,
                                                    extra_args=extra_args,
                                                    mask=noise_mask,
                                                    to_zero=sigmas[-1]==0,
                                                    end_step=sigmas.shape[0] - 1,
                                                    disable_pbar=disable_pbar)
647

648
649
650
651
        else:
            extra_args["denoise_mask"] = denoise_mask
            self.model_k.latent_image = latent_image
            self.model_k.noise = noise
652

653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
            if max_denoise:
                noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0)
            else:
                noise = noise * sigmas[0]

            k_callback = None
            total_steps = len(sigmas) - 1
            if callback is not None:
                k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps)

            if latent_image is not None:
                noise += latent_image
            if self.sampler == "dpm_fast":
                samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
            elif self.sampler == "dpm_adaptive":
                samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar)
            else:
                samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
671

672
        return self.model.process_latent_out(samples.to(torch.float32))