Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
0ed72bef
Commit
0ed72bef
authored
Mar 11, 2024
by
comfyanonymous
Browse files
Change log levels.
Logging level now defaults to info. --verbose sets it to debug.
parent
dc6d4151
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
38 additions
and
37 deletions
+38
-37
comfy/cli_args.py
comfy/cli_args.py
+1
-1
comfy/controlnet.py
comfy/controlnet.py
+2
-2
comfy/diffusers_convert.py
comfy/diffusers_convert.py
+1
-1
comfy/model_base.py
comfy/model_base.py
+2
-2
comfy/model_management.py
comfy/model_management.py
+14
-14
comfy/sd.py
comfy/sd.py
+6
-6
comfy/utils.py
comfy/utils.py
+1
-1
nodes.py
nodes.py
+3
-3
server.py
server.py
+8
-7
No files found.
comfy/cli_args.py
View file @
0ed72bef
...
...
@@ -129,7 +129,7 @@ if args.disable_auto_launch:
args
.
auto_launch
=
False
import
logging
logging_level
=
logging
.
WARNING
logging_level
=
logging
.
INFO
if
args
.
verbose
:
logging_level
=
logging
.
DEBUG
...
...
comfy/controlnet.py
View file @
0ed72bef
...
...
@@ -432,7 +432,7 @@ def load_controlnet(ckpt_path, model=None):
logging
.
warning
(
"missing controlnet keys: {}"
.
format
(
missing
))
if
len
(
unexpected
)
>
0
:
logging
.
info
(
"unexpected controlnet keys: {}"
.
format
(
unexpected
))
logging
.
debug
(
"unexpected controlnet keys: {}"
.
format
(
unexpected
))
global_average_pooling
=
False
filename
=
os
.
path
.
splitext
(
ckpt_path
)[
0
]
...
...
@@ -545,6 +545,6 @@ def load_t2i_adapter(t2i_data):
logging
.
warning
(
"t2i missing {}"
.
format
(
missing
))
if
len
(
unexpected
)
>
0
:
logging
.
info
(
"t2i unexpected {}"
.
format
(
unexpected
))
logging
.
debug
(
"t2i unexpected {}"
.
format
(
unexpected
))
return
T2IAdapter
(
model_ad
,
model_ad
.
input_channels
,
compression_ratio
,
upscale_algorithm
)
comfy/diffusers_convert.py
View file @
0ed72bef
...
...
@@ -178,7 +178,7 @@ def convert_vae_state_dict(vae_state_dict):
for
k
,
v
in
new_state_dict
.
items
():
for
weight_name
in
weights_to_convert
:
if
f
"mid.attn_1.
{
weight_name
}
.weight"
in
k
:
logging
.
info
(
f
"Reshaping
{
k
}
for SD format"
)
logging
.
debug
(
f
"Reshaping
{
k
}
for SD format"
)
new_state_dict
[
k
]
=
reshape_weight_for_sd
(
v
)
return
new_state_dict
...
...
comfy/model_base.py
View file @
0ed72bef
...
...
@@ -67,8 +67,8 @@ class BaseModel(torch.nn.Module):
if
self
.
adm_channels
is
None
:
self
.
adm_channels
=
0
self
.
inpaint_model
=
False
logging
.
warning
(
"model_type {}"
.
format
(
model_type
.
name
))
logging
.
info
(
"adm {}"
.
format
(
self
.
adm_channels
))
logging
.
info
(
"model_type {}"
.
format
(
model_type
.
name
))
logging
.
debug
(
"adm {}"
.
format
(
self
.
adm_channels
))
def
apply_model
(
self
,
x
,
t
,
c_concat
=
None
,
c_crossattn
=
None
,
control
=
None
,
transformer_options
=
{},
**
kwargs
):
sigma
=
t
...
...
comfy/model_management.py
View file @
0ed72bef
...
...
@@ -30,7 +30,7 @@ lowvram_available = True
xpu_available
=
False
if
args
.
deterministic
:
logging
.
warning
(
"Using deterministic algorithms for pytorch"
)
logging
.
info
(
"Using deterministic algorithms for pytorch"
)
torch
.
use_deterministic_algorithms
(
True
,
warn_only
=
True
)
directml_enabled
=
False
...
...
@@ -42,7 +42,7 @@ if args.directml is not None:
directml_device
=
torch_directml
.
device
()
else
:
directml_device
=
torch_directml
.
device
(
device_index
)
logging
.
warning
(
"Using directml with device: {}"
.
format
(
torch_directml
.
device_name
(
device_index
)))
logging
.
info
(
"Using directml with device: {}"
.
format
(
torch_directml
.
device_name
(
device_index
)))
# torch_directml.disable_tiled_resources(True)
lowvram_available
=
False
#TODO: need to find a way to get free memory in directml before this can be enabled by default.
...
...
@@ -118,7 +118,7 @@ def get_total_memory(dev=None, torch_total_too=False):
total_vram
=
get_total_memory
(
get_torch_device
())
/
(
1024
*
1024
)
total_ram
=
psutil
.
virtual_memory
().
total
/
(
1024
*
1024
)
logging
.
warning
(
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB"
.
format
(
total_vram
,
total_ram
))
logging
.
info
(
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB"
.
format
(
total_vram
,
total_ram
))
if
not
args
.
normalvram
and
not
args
.
cpu
:
if
lowvram_available
and
total_vram
<=
4096
:
logging
.
warning
(
"Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram"
)
...
...
@@ -144,7 +144,7 @@ else:
pass
try
:
XFORMERS_VERSION
=
xformers
.
version
.
__version__
logging
.
warning
(
"xformers version: {}"
.
format
(
XFORMERS_VERSION
))
logging
.
info
(
"xformers version: {}"
.
format
(
XFORMERS_VERSION
))
if
XFORMERS_VERSION
.
startswith
(
"0.0.18"
):
logging
.
warning
(
"
\n
WARNING: This version of xformers has a major bug where you will get black images when generating high resolution images."
)
logging
.
warning
(
"Please downgrade or upgrade xformers to a different version.
\n
"
)
...
...
@@ -212,11 +212,11 @@ elif args.highvram or args.gpu_only:
FORCE_FP32
=
False
FORCE_FP16
=
False
if
args
.
force_fp32
:
logging
.
warning
(
"Forcing FP32, if this improves things please report it."
)
logging
.
info
(
"Forcing FP32, if this improves things please report it."
)
FORCE_FP32
=
True
if
args
.
force_fp16
:
logging
.
warning
(
"Forcing FP16."
)
logging
.
info
(
"Forcing FP16."
)
FORCE_FP16
=
True
if
lowvram_available
:
...
...
@@ -230,12 +230,12 @@ if cpu_state != CPUState.GPU:
if
cpu_state
==
CPUState
.
MPS
:
vram_state
=
VRAMState
.
SHARED
logging
.
warning
(
f
"Set vram state to:
{
vram_state
.
name
}
"
)
logging
.
info
(
f
"Set vram state to:
{
vram_state
.
name
}
"
)
DISABLE_SMART_MEMORY
=
args
.
disable_smart_memory
if
DISABLE_SMART_MEMORY
:
logging
.
warning
(
"Disabling smart memory management"
)
logging
.
info
(
"Disabling smart memory management"
)
def
get_torch_device_name
(
device
):
if
hasattr
(
device
,
'type'
):
...
...
@@ -253,11 +253,11 @@ def get_torch_device_name(device):
return
"CUDA {}: {}"
.
format
(
device
,
torch
.
cuda
.
get_device_name
(
device
))
try
:
logging
.
warning
(
"Device: {}"
.
format
(
get_torch_device_name
(
get_torch_device
())))
logging
.
info
(
"Device: {}"
.
format
(
get_torch_device_name
(
get_torch_device
())))
except
:
logging
.
warning
(
"Could not pick default device."
)
logging
.
warning
(
"VAE dtype: {}"
.
format
(
VAE_DTYPE
))
logging
.
info
(
"VAE dtype: {}"
.
format
(
VAE_DTYPE
))
current_loaded_models
=
[]
...
...
@@ -300,7 +300,7 @@ class LoadedModel:
raise
e
if
lowvram_model_memory
>
0
:
logging
.
warning
(
"loading in lowvram mode {}"
.
format
(
lowvram_model_memory
/
(
1024
*
1024
)))
logging
.
info
(
"loading in lowvram mode {}"
.
format
(
lowvram_model_memory
/
(
1024
*
1024
)))
mem_counter
=
0
for
m
in
self
.
real_model
.
modules
():
if
hasattr
(
m
,
"comfy_cast_weights"
):
...
...
@@ -347,7 +347,7 @@ def unload_model_clones(model):
to_unload
=
[
i
]
+
to_unload
for
i
in
to_unload
:
logging
.
warnin
g
(
"unload clone {}"
.
format
(
i
))
logging
.
debu
g
(
"unload clone {}"
.
format
(
i
))
current_loaded_models
.
pop
(
i
).
model_unload
()
def
free_memory
(
memory_required
,
device
,
keep_loaded
=
[]):
...
...
@@ -389,7 +389,7 @@ def load_models_gpu(models, memory_required=0):
models_already_loaded
.
append
(
loaded_model
)
else
:
if
hasattr
(
x
,
"model"
):
logging
.
warning
(
f
"Requested to load
{
x
.
model
.
__class__
.
__name__
}
"
)
logging
.
info
(
f
"Requested to load
{
x
.
model
.
__class__
.
__name__
}
"
)
models_to_load
.
append
(
loaded_model
)
if
len
(
models_to_load
)
==
0
:
...
...
@@ -399,7 +399,7 @@ def load_models_gpu(models, memory_required=0):
free_memory
(
extra_mem
,
d
,
models_already_loaded
)
return
logging
.
warning
(
f
"Loading
{
len
(
models_to_load
)
}
new model
{
's'
if
len
(
models_to_load
)
>
1
else
''
}
"
)
logging
.
info
(
f
"Loading
{
len
(
models_to_load
)
}
new model
{
's'
if
len
(
models_to_load
)
>
1
else
''
}
"
)
total_memory_required
=
{}
for
loaded_model
in
models_to_load
:
...
...
comfy/sd.py
View file @
0ed72bef
...
...
@@ -229,7 +229,7 @@ class VAE:
logging
.
warning
(
"Missing VAE keys {}"
.
format
(
m
))
if
len
(
u
)
>
0
:
logging
.
info
(
"Leftover VAE keys {}"
.
format
(
u
))
logging
.
debug
(
"Leftover VAE keys {}"
.
format
(
u
))
if
device
is
None
:
device
=
model_management
.
vae_device
()
...
...
@@ -397,7 +397,7 @@ def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DI
logging
.
warning
(
"clip missing: {}"
.
format
(
m
))
if
len
(
u
)
>
0
:
logging
.
info
(
"clip unexpected: {}"
.
format
(
u
))
logging
.
debug
(
"clip unexpected: {}"
.
format
(
u
))
return
clip
def
load_gligen
(
ckpt_path
):
...
...
@@ -538,18 +538,18 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
logging
.
warning
(
"clip missing: {}"
.
format
(
m
))
if
len
(
u
)
>
0
:
logging
.
info
(
"clip unexpected {}:"
.
format
(
u
))
logging
.
debug
(
"clip unexpected {}:"
.
format
(
u
))
else
:
logging
.
warning
(
"no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded."
)
left_over
=
sd
.
keys
()
if
len
(
left_over
)
>
0
:
logging
.
info
(
"left over keys: {}"
.
format
(
left_over
))
logging
.
debug
(
"left over keys: {}"
.
format
(
left_over
))
if
output_model
:
model_patcher
=
comfy
.
model_patcher
.
ModelPatcher
(
model
,
load_device
=
load_device
,
offload_device
=
model_management
.
unet_offload_device
(),
current_device
=
inital_load_device
)
if
inital_load_device
!=
torch
.
device
(
"cpu"
):
logging
.
warning
(
"loaded straight to GPU"
)
logging
.
info
(
"loaded straight to GPU"
)
model_management
.
load_model_gpu
(
model_patcher
)
return
(
model_patcher
,
clip
,
vae
,
clipvision
)
...
...
@@ -589,7 +589,7 @@ def load_unet_state_dict(sd): #load unet in diffusers format
model
.
load_model_weights
(
new_sd
,
""
)
left_over
=
sd
.
keys
()
if
len
(
left_over
)
>
0
:
logging
.
warning
(
"left over keys in unet: {}"
.
format
(
left_over
))
logging
.
info
(
"left over keys in unet: {}"
.
format
(
left_over
))
return
comfy
.
model_patcher
.
ModelPatcher
(
model
,
load_device
=
load_device
,
offload_device
=
offload_device
)
def
load_unet
(
unet_path
):
...
...
comfy/utils.py
View file @
0ed72bef
...
...
@@ -22,7 +22,7 @@ def load_torch_file(ckpt, safe_load=False, device=None):
else
:
pl_sd
=
torch
.
load
(
ckpt
,
map_location
=
device
,
pickle_module
=
comfy
.
checkpoint_pickle
)
if
"global_step"
in
pl_sd
:
logging
.
info
(
f
"Global Step:
{
pl_sd
[
'global_step'
]
}
"
)
logging
.
debug
(
f
"Global Step:
{
pl_sd
[
'global_step'
]
}
"
)
if
"state_dict"
in
pl_sd
:
sd
=
pl_sd
[
"state_dict"
]
else
:
...
...
nodes.py
View file @
0ed72bef
...
...
@@ -1925,14 +1925,14 @@ def load_custom_nodes():
node_import_times
.
append
((
time
.
perf_counter
()
-
time_before
,
module_path
,
success
))
if
len
(
node_import_times
)
>
0
:
logging
.
warning
(
"
\n
Import times for custom nodes:"
)
logging
.
info
(
"
\n
Import times for custom nodes:"
)
for
n
in
sorted
(
node_import_times
):
if
n
[
2
]:
import_message
=
""
else
:
import_message
=
" (IMPORT FAILED)"
logging
.
warning
(
"{:6.1f} seconds{}: {}"
.
format
(
n
[
0
],
import_message
,
n
[
1
]))
logging
.
warning
(
""
)
logging
.
info
(
"{:6.1f} seconds{}: {}"
.
format
(
n
[
0
],
import_message
,
n
[
1
]))
logging
.
info
(
""
)
def
init_custom_nodes
():
extras_dir
=
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"comfy_extras"
)
...
...
server.py
View file @
0ed72bef
...
...
@@ -17,6 +17,7 @@ from io import BytesIO
import
aiohttp
from
aiohttp
import
web
import
logging
import
mimetypes
from
comfy.cli_args
import
args
...
...
@@ -33,7 +34,7 @@ async def send_socket_catch_exception(function, message):
try
:
await
function
(
message
)
except
(
aiohttp
.
ClientError
,
aiohttp
.
ClientPayloadError
,
ConnectionResetError
)
as
err
:
pr
in
t
(
"send error:
"
,
err
)
logging
.
warn
in
g
(
"send error:
{}"
.
format
(
err
)
)
@
web
.
middleware
async
def
cache_control
(
request
:
web
.
Request
,
handler
):
...
...
@@ -111,7 +112,7 @@ class PromptServer():
async
for
msg
in
ws
:
if
msg
.
type
==
aiohttp
.
WSMsgType
.
ERROR
:
pr
in
t
(
'ws connection closed with exception %s'
%
ws
.
exception
())
logging
.
warn
in
g
(
'ws connection closed with exception %s'
%
ws
.
exception
())
finally
:
self
.
sockets
.
pop
(
sid
,
None
)
return
ws
...
...
@@ -446,7 +447,7 @@ class PromptServer():
@
routes
.
post
(
"/prompt"
)
async
def
post_prompt
(
request
):
print
(
"got prompt"
)
logging
.
info
(
"got prompt"
)
resp_code
=
200
out_string
=
""
json_data
=
await
request
.
json
()
...
...
@@ -478,7 +479,7 @@ class PromptServer():
response
=
{
"prompt_id"
:
prompt_id
,
"number"
:
number
,
"node_errors"
:
valid
[
3
]}
return
web
.
json_response
(
response
)
else
:
pr
in
t
(
"invalid prompt:
"
,
valid
[
1
])
logging
.
warn
in
g
(
"invalid prompt:
{}"
.
format
(
valid
[
1
])
)
return
web
.
json_response
({
"error"
:
valid
[
1
],
"node_errors"
:
valid
[
3
]},
status
=
400
)
else
:
return
web
.
json_response
({
"error"
:
"no prompt"
,
"node_errors"
:
[]},
status
=
400
)
...
...
@@ -626,8 +627,8 @@ class PromptServer():
await
site
.
start
()
if
verbose
:
print
(
"Starting server
\n
"
)
print
(
"To see the GUI go to: http://{}:{}"
.
format
(
address
,
port
))
logging
.
info
(
"Starting server
\n
"
)
logging
.
info
(
"To see the GUI go to: http://{}:{}"
.
format
(
address
,
port
))
if
call_on_start
is
not
None
:
call_on_start
(
address
,
port
)
...
...
@@ -639,7 +640,7 @@ class PromptServer():
try
:
json_data
=
handler
(
json_data
)
except
Exception
as
e
:
pr
in
t
(
f
"[ERROR] An error occurred during the on_prompt_handler processing"
)
logging
.
warn
in
g
(
f
"[ERROR] An error occurred during the on_prompt_handler processing"
)
traceback
.
print_exc
()
return
json_data
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment