Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
1ffa8858
Commit
1ffa8858
authored
Nov 04, 2023
by
comfyanonymous
Browse files
Move model sampling code to comfy/model_sampling.py
parent
ae2acfc2
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
79 additions
and
76 deletions
+79
-76
comfy/model_base.py
comfy/model_base.py
+1
-76
comfy/model_sampling.py
comfy/model_sampling.py
+78
-0
No files found.
comfy/model_base.py
View file @
1ffa8858
import
torch
from
comfy.ldm.modules.diffusionmodules.openaimodel
import
UNetModel
from
comfy.ldm.modules.encoders.noise_aug_modules
import
CLIPEmbeddingNoiseAugmentation
from
comfy.ldm.modules.diffusionmodules.util
import
make_beta_schedule
from
comfy.ldm.modules.diffusionmodules.openaimodel
import
Timestep
import
comfy.model_management
import
comfy.conds
import
numpy
as
np
from
enum
import
Enum
from
.
import
utils
...
...
@@ -14,79 +12,7 @@ class ModelType(Enum):
V_PREDICTION
=
2
#NOTE: all this sampling stuff will be moved
class
EPS
:
def
calculate_input
(
self
,
sigma
,
noise
):
sigma
=
sigma
.
view
(
sigma
.
shape
[:
1
]
+
(
1
,)
*
(
noise
.
ndim
-
1
))
return
noise
/
(
sigma
**
2
+
self
.
sigma_data
**
2
)
**
0.5
def
calculate_denoised
(
self
,
sigma
,
model_output
,
model_input
):
sigma
=
sigma
.
view
(
sigma
.
shape
[:
1
]
+
(
1
,)
*
(
model_output
.
ndim
-
1
))
return
model_input
-
model_output
*
sigma
class
V_PREDICTION
(
EPS
):
def
calculate_denoised
(
self
,
sigma
,
model_output
,
model_input
):
sigma
=
sigma
.
view
(
sigma
.
shape
[:
1
]
+
(
1
,)
*
(
model_output
.
ndim
-
1
))
return
model_input
*
self
.
sigma_data
**
2
/
(
sigma
**
2
+
self
.
sigma_data
**
2
)
-
model_output
*
sigma
*
self
.
sigma_data
/
(
sigma
**
2
+
self
.
sigma_data
**
2
)
**
0.5
class
ModelSamplingDiscrete
(
torch
.
nn
.
Module
):
def
__init__
(
self
,
model_config
=
None
):
super
().
__init__
()
beta_schedule
=
"linear"
if
model_config
is
not
None
:
beta_schedule
=
model_config
.
beta_schedule
self
.
_register_schedule
(
given_betas
=
None
,
beta_schedule
=
beta_schedule
,
timesteps
=
1000
,
linear_start
=
0.00085
,
linear_end
=
0.012
,
cosine_s
=
8e-3
)
self
.
sigma_data
=
1.0
def
_register_schedule
(
self
,
given_betas
=
None
,
beta_schedule
=
"linear"
,
timesteps
=
1000
,
linear_start
=
1e-4
,
linear_end
=
2e-2
,
cosine_s
=
8e-3
):
if
given_betas
is
not
None
:
betas
=
given_betas
else
:
betas
=
make_beta_schedule
(
beta_schedule
,
timesteps
,
linear_start
=
linear_start
,
linear_end
=
linear_end
,
cosine_s
=
cosine_s
)
alphas
=
1.
-
betas
alphas_cumprod
=
torch
.
tensor
(
np
.
cumprod
(
alphas
,
axis
=
0
),
dtype
=
torch
.
float32
)
# alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps
,
=
betas
.
shape
self
.
num_timesteps
=
int
(
timesteps
)
self
.
linear_start
=
linear_start
self
.
linear_end
=
linear_end
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
sigmas
=
((
1
-
alphas_cumprod
)
/
alphas_cumprod
)
**
0.5
self
.
register_buffer
(
'sigmas'
,
sigmas
)
self
.
register_buffer
(
'log_sigmas'
,
sigmas
.
log
())
@
property
def
sigma_min
(
self
):
return
self
.
sigmas
[
0
]
@
property
def
sigma_max
(
self
):
return
self
.
sigmas
[
-
1
]
def
timestep
(
self
,
sigma
):
log_sigma
=
sigma
.
log
()
dists
=
log_sigma
.
to
(
self
.
log_sigmas
.
device
)
-
self
.
log_sigmas
[:,
None
]
return
dists
.
abs
().
argmin
(
dim
=
0
).
view
(
sigma
.
shape
)
def
sigma
(
self
,
timestep
):
t
=
torch
.
clamp
(
timestep
.
float
(),
min
=
0
,
max
=
(
len
(
self
.
sigmas
)
-
1
))
low_idx
=
t
.
floor
().
long
()
high_idx
=
t
.
ceil
().
long
()
w
=
t
.
frac
()
log_sigma
=
(
1
-
w
)
*
self
.
log_sigmas
[
low_idx
]
+
w
*
self
.
log_sigmas
[
high_idx
]
return
log_sigma
.
exp
()
def
percent_to_sigma
(
self
,
percent
):
return
self
.
sigma
(
torch
.
tensor
(
percent
*
999.0
))
from
comfy.model_sampling
import
EPS
,
V_PREDICTION
,
ModelSamplingDiscrete
def
model_sampling
(
model_config
,
model_type
):
if
model_type
==
ModelType
.
EPS
:
...
...
@@ -102,7 +28,6 @@ def model_sampling(model_config, model_type):
return
ModelSampling
(
model_config
)
class
BaseModel
(
torch
.
nn
.
Module
):
def
__init__
(
self
,
model_config
,
model_type
=
ModelType
.
EPS
,
device
=
None
):
super
().
__init__
()
...
...
comfy/model_sampling.py
0 → 100644
View file @
1ffa8858
import
torch
import
numpy
as
np
from
comfy.ldm.modules.diffusionmodules.util
import
make_beta_schedule
class
EPS
:
def
calculate_input
(
self
,
sigma
,
noise
):
sigma
=
sigma
.
view
(
sigma
.
shape
[:
1
]
+
(
1
,)
*
(
noise
.
ndim
-
1
))
return
noise
/
(
sigma
**
2
+
self
.
sigma_data
**
2
)
**
0.5
def
calculate_denoised
(
self
,
sigma
,
model_output
,
model_input
):
sigma
=
sigma
.
view
(
sigma
.
shape
[:
1
]
+
(
1
,)
*
(
model_output
.
ndim
-
1
))
return
model_input
-
model_output
*
sigma
class
V_PREDICTION
(
EPS
):
def
calculate_denoised
(
self
,
sigma
,
model_output
,
model_input
):
sigma
=
sigma
.
view
(
sigma
.
shape
[:
1
]
+
(
1
,)
*
(
model_output
.
ndim
-
1
))
return
model_input
*
self
.
sigma_data
**
2
/
(
sigma
**
2
+
self
.
sigma_data
**
2
)
-
model_output
*
sigma
*
self
.
sigma_data
/
(
sigma
**
2
+
self
.
sigma_data
**
2
)
**
0.5
class
ModelSamplingDiscrete
(
torch
.
nn
.
Module
):
def
__init__
(
self
,
model_config
=
None
):
super
().
__init__
()
beta_schedule
=
"linear"
if
model_config
is
not
None
:
beta_schedule
=
model_config
.
beta_schedule
self
.
_register_schedule
(
given_betas
=
None
,
beta_schedule
=
beta_schedule
,
timesteps
=
1000
,
linear_start
=
0.00085
,
linear_end
=
0.012
,
cosine_s
=
8e-3
)
self
.
sigma_data
=
1.0
def
_register_schedule
(
self
,
given_betas
=
None
,
beta_schedule
=
"linear"
,
timesteps
=
1000
,
linear_start
=
1e-4
,
linear_end
=
2e-2
,
cosine_s
=
8e-3
):
if
given_betas
is
not
None
:
betas
=
given_betas
else
:
betas
=
make_beta_schedule
(
beta_schedule
,
timesteps
,
linear_start
=
linear_start
,
linear_end
=
linear_end
,
cosine_s
=
cosine_s
)
alphas
=
1.
-
betas
alphas_cumprod
=
torch
.
tensor
(
np
.
cumprod
(
alphas
,
axis
=
0
),
dtype
=
torch
.
float32
)
# alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps
,
=
betas
.
shape
self
.
num_timesteps
=
int
(
timesteps
)
self
.
linear_start
=
linear_start
self
.
linear_end
=
linear_end
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
sigmas
=
((
1
-
alphas_cumprod
)
/
alphas_cumprod
)
**
0.5
self
.
register_buffer
(
'sigmas'
,
sigmas
)
self
.
register_buffer
(
'log_sigmas'
,
sigmas
.
log
())
@
property
def
sigma_min
(
self
):
return
self
.
sigmas
[
0
]
@
property
def
sigma_max
(
self
):
return
self
.
sigmas
[
-
1
]
def
timestep
(
self
,
sigma
):
log_sigma
=
sigma
.
log
()
dists
=
log_sigma
.
to
(
self
.
log_sigmas
.
device
)
-
self
.
log_sigmas
[:,
None
]
return
dists
.
abs
().
argmin
(
dim
=
0
).
view
(
sigma
.
shape
)
def
sigma
(
self
,
timestep
):
t
=
torch
.
clamp
(
timestep
.
float
(),
min
=
0
,
max
=
(
len
(
self
.
sigmas
)
-
1
))
low_idx
=
t
.
floor
().
long
()
high_idx
=
t
.
ceil
().
long
()
w
=
t
.
frac
()
log_sigma
=
(
1
-
w
)
*
self
.
log_sigmas
[
low_idx
]
+
w
*
self
.
log_sigmas
[
high_idx
]
return
log_sigma
.
exp
()
def
percent_to_sigma
(
self
,
percent
):
return
self
.
sigma
(
torch
.
tensor
(
percent
*
999.0
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment