Commit 5489d5af authored by comfyanonymous's avatar comfyanonymous
Browse files

Add uni_pc sampler to KSampler* nodes.

parent 1a4edd19
This diff is collapsed.
from .k_diffusion import sampling as k_diffusion_sampling from .k_diffusion import sampling as k_diffusion_sampling
from .k_diffusion import external as k_diffusion_external from .k_diffusion import external as k_diffusion_external
from .extra_samplers import uni_pc
import torch import torch
import contextlib import contextlib
import model_management import model_management
...@@ -20,12 +21,8 @@ class CFGDenoiser(torch.nn.Module): ...@@ -20,12 +21,8 @@ class CFGDenoiser(torch.nn.Module):
uncond = self.inner_model(x, sigma, cond=uncond) uncond = self.inner_model(x, sigma, cond=uncond)
return uncond + (cond - uncond) * cond_scale return uncond + (cond - uncond) * cond_scale
class CFGDenoiserComplex(torch.nn.Module): def sampling_function(model_function, x, sigma, uncond, cond, cond_scale):
def __init__(self, model): def get_area_and_mult(cond, x_in):
super().__init__()
self.inner_model = model
def forward(self, x, sigma, uncond, cond, cond_scale):
def get_area_and_mult(cond, x_in, sigma):
area = (x_in.shape[2], x_in.shape[3], 0, 0) area = (x_in.shape[2], x_in.shape[3], 0, 0)
strength = 1.0 strength = 1.0
min_sigma = 0.0 min_sigma = 0.0
...@@ -34,12 +31,7 @@ class CFGDenoiserComplex(torch.nn.Module): ...@@ -34,12 +31,7 @@ class CFGDenoiserComplex(torch.nn.Module):
area = cond[1]['area'] area = cond[1]['area']
if 'strength' in cond[1]: if 'strength' in cond[1]:
strength = cond[1]['strength'] strength = cond[1]['strength']
if 'min_sigma' in cond[1]:
min_sigma = cond[1]['min_sigma']
if 'max_sigma' in cond[1]:
max_sigma = cond[1]['max_sigma']
if sigma < min_sigma or sigma > max_sigma:
return None
input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
mult = torch.ones_like(input_x) * strength mult = torch.ones_like(input_x) * strength
...@@ -58,26 +50,25 @@ class CFGDenoiserComplex(torch.nn.Module): ...@@ -58,26 +50,25 @@ class CFGDenoiserComplex(torch.nn.Module):
mult[:,:,:,area[1] + area[3] - 1 - t:area[1] + area[3] - t] *= ((1.0/rr) * (t + 1)) mult[:,:,:,area[1] + area[3] - 1 - t:area[1] + area[3] - t] *= ((1.0/rr) * (t + 1))
return (input_x, mult, cond[0], area) return (input_x, mult, cond[0], area)
def calc_cond_uncond_batch(cond, uncond, x_in, sigma, max_total_area): def calc_cond_uncond_batch(model_function, cond, uncond, x_in, sigma, max_total_area):
out_cond = torch.zeros_like(x_in) out_cond = torch.zeros_like(x_in)
out_count = torch.ones_like(x_in)/100000.0 out_count = torch.ones_like(x_in)/100000.0
out_uncond = torch.zeros_like(x_in) out_uncond = torch.zeros_like(x_in)
out_uncond_count = torch.ones_like(x_in)/100000.0 out_uncond_count = torch.ones_like(x_in)/100000.0
sigma_cmp = sigma[0]
COND = 0 COND = 0
UNCOND = 1 UNCOND = 1
to_run = [] to_run = []
for x in cond: for x in cond:
p = get_area_and_mult(x, x_in, sigma_cmp) p = get_area_and_mult(x, x_in)
if p is None: if p is None:
continue continue
to_run += [(p, COND)] to_run += [(p, COND)]
for x in uncond: for x in uncond:
p = get_area_and_mult(x, x_in, sigma_cmp) p = get_area_and_mult(x, x_in)
if p is None: if p is None:
continue continue
...@@ -120,7 +111,7 @@ class CFGDenoiserComplex(torch.nn.Module): ...@@ -120,7 +111,7 @@ class CFGDenoiserComplex(torch.nn.Module):
c = torch.cat(c) c = torch.cat(c)
sigma_ = torch.cat([sigma] * batch_chunks) sigma_ = torch.cat([sigma] * batch_chunks)
output = self.inner_model(input_x, sigma_, cond=c).chunk(batch_chunks) output = model_function(input_x, sigma_, cond=c).chunk(batch_chunks)
del input_x del input_x
for o in range(batch_chunks): for o in range(batch_chunks):
...@@ -141,9 +132,16 @@ class CFGDenoiserComplex(torch.nn.Module): ...@@ -141,9 +132,16 @@ class CFGDenoiserComplex(torch.nn.Module):
max_total_area = model_management.maximum_batch_area() max_total_area = model_management.maximum_batch_area()
cond, uncond = calc_cond_uncond_batch(cond, uncond, x, sigma, max_total_area) cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, sigma, max_total_area)
return uncond + (cond - uncond) * cond_scale return uncond + (cond - uncond) * cond_scale
class CFGDenoiserComplex(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
def forward(self, x, sigma, uncond, cond, cond_scale):
return sampling_function(self.inner_model, x, sigma, uncond, cond, cond_scale)
def simple_scheduler(model, steps): def simple_scheduler(model, steps):
sigs = [] sigs = []
ss = len(model.sigmas) / steps ss = len(model.sigmas) / steps
...@@ -186,7 +184,7 @@ class KSampler: ...@@ -186,7 +184,7 @@ class KSampler:
SCHEDULERS = ["karras", "normal", "simple"] SCHEDULERS = ["karras", "normal", "simple"]
SAMPLERS = ["sample_euler", "sample_euler_ancestral", "sample_heun", "sample_dpm_2", "sample_dpm_2_ancestral", SAMPLERS = ["sample_euler", "sample_euler_ancestral", "sample_heun", "sample_dpm_2", "sample_dpm_2_ancestral",
"sample_lms", "sample_dpm_fast", "sample_dpm_adaptive", "sample_dpmpp_2s_ancestral", "sample_dpmpp_sde", "sample_lms", "sample_dpm_fast", "sample_dpm_adaptive", "sample_dpmpp_2s_ancestral", "sample_dpmpp_sde",
"sample_dpmpp_2m"] "sample_dpmpp_2m", "uni_pc"]
def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None): def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None):
self.model = model self.model = model
...@@ -256,10 +254,6 @@ class KSampler: ...@@ -256,10 +254,6 @@ class KSampler:
else: else:
return torch.zeros_like(noise) return torch.zeros_like(noise)
noise *= sigmas[0]
if latent_image is not None:
noise += latent_image
positive = positive[:] positive = positive[:]
negative = negative[:] negative = negative[:]
#make sure each cond area has an opposite one with the same area #make sure each cond area has an opposite one with the same area
...@@ -274,10 +268,16 @@ class KSampler: ...@@ -274,10 +268,16 @@ class KSampler:
precision_scope = contextlib.nullcontext precision_scope = contextlib.nullcontext
with precision_scope(self.device): with precision_scope(self.device):
if self.sampler == "sample_dpm_fast": if self.sampler == "uni_pc":
samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args={"cond":positive, "uncond":negative, "cond_scale": cfg}) samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, extra_args={"cond":positive, "uncond":negative, "cond_scale": cfg})
elif self.sampler == "sample_dpm_adaptive":
samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args={"cond":positive, "uncond":negative, "cond_scale": cfg})
else: else:
samples = getattr(k_diffusion_sampling, self.sampler)(self.model_k, noise, sigmas, extra_args={"cond":positive, "uncond":negative, "cond_scale": cfg}) noise *= sigmas[0]
if latent_image is not None:
noise += latent_image
if self.sampler == "sample_dpm_fast":
samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args={"cond":positive, "uncond":negative, "cond_scale": cfg})
elif self.sampler == "sample_dpm_adaptive":
samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args={"cond":positive, "uncond":negative, "cond_scale": cfg})
else:
samples = getattr(k_diffusion_sampling, self.sampler)(self.model_k, noise, sigmas, extra_args={"cond":positive, "uncond":negative, "cond_scale": cfg})
return samples.to(torch.float32) return samples.to(torch.float32)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment