sample.py 3.52 KB
Newer Older
1
2
import torch
import comfy.model_management
3
import comfy.samplers
4
import math
5

6
7
8
9
10
def prepare_noise(latent_image, seed, skip=0):
    """
    creates random noise given a latent image and a seed.
    optional arg skip can be used to skip and discard x number of noise generations for a given seed
    """
BlenderNeko's avatar
BlenderNeko committed
11
    generator = torch.manual_seed(seed)
12
    for _ in range(skip):
BlenderNeko's avatar
BlenderNeko committed
13
14
        noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
    noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
15
16
    return noise

17
def prepare_mask(noise_mask, shape, device):
18
    """ensures noise mask is of proper dimensions"""
19
    noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
20
    noise_mask = noise_mask.round()
21
    noise_mask = torch.cat([noise_mask] * shape[1], dim=1)
22
23
    if noise_mask.shape[0] < shape[0]:
        noise_mask = noise_mask.repeat(math.ceil(shape[0] / noise_mask.shape[0]), 1, 1, 1)[:shape[0]]
24
    noise_mask = noise_mask.to(device)
25
26
    return noise_mask

27
28
def broadcast_cond(cond, batch, device):
    """broadcasts conditioning to the batch size"""
29
30
31
    copy = []
    for p in cond:
        t = p[0]
32
33
        if t.shape[0] < batch:
            t = torch.cat([t] * batch)
34
35
36
37
        t = t.to(device)
        copy += [[t] + p[1:]]
    return copy

38
39
40
41
42
43
def get_models_from_cond(cond, model_type):
    models = []
    for c in cond:
        if model_type in c[1]:
            models += [c[1][model_type]]
    return models
44
45

def load_additional_models(positive, negative):
BlenderNeko's avatar
BlenderNeko committed
46
    """loads additional models in positive and negative conditioning"""
BlenderNeko's avatar
BlenderNeko committed
47
48
49
50
    control_nets = get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control")
    gligen = get_models_from_cond(positive, "gligen") + get_models_from_cond(negative, "gligen")
    gligen = [x[1] for x in gligen]
    models = control_nets + gligen
51
52
53
54
    comfy.model_management.load_controlnet_gpu(models)
    return models

def cleanup_additional_models(models):
BlenderNeko's avatar
BlenderNeko committed
55
    """cleanup additional models that were loaded"""
56
    for m in models:
57
58
        m.cleanup()

59
def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False):
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
    device = comfy.model_management.get_torch_device()

    if noise_mask is not None:
        noise_mask = prepare_mask(noise_mask, noise.shape, device)

    real_model = None
    comfy.model_management.load_model_gpu(model)
    real_model = model.model

    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = broadcast_cond(positive, noise.shape[0], device)
    negative_copy = broadcast_cond(negative, noise.shape[0], device)

    models = load_additional_models(positive, negative)

    sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)

79
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar)
80
81
82
83
    samples = samples.cpu()

    cleanup_additional_models(models)
    return samples