Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
a094b45c
"docs/source/ko/using-diffusers/loading.mdx" did not exist on "a0597f33aca9ead4323800120c6ecfc323ccba48"
Commit
a094b45c
authored
Aug 28, 2023
by
comfyanonymous
Browse files
Load clipvision model to GPU for faster performance.
parent
1300a1bb
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
24 additions
and
2 deletions
+24
-2
comfy/clip_vision.py
comfy/clip_vision.py
+24
-2
No files found.
comfy/clip_vision.py
View file @
a094b45c
...
@@ -2,14 +2,27 @@ from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPIm
...
@@ -2,14 +2,27 @@ from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPIm
from
.utils
import
load_torch_file
,
transformers_convert
from
.utils
import
load_torch_file
,
transformers_convert
import
os
import
os
import
torch
import
torch
import
contextlib
import
comfy.ops
import
comfy.ops
import
comfy.model_patcher
import
comfy.model_management
class
ClipVisionModel
():
class
ClipVisionModel
():
def
__init__
(
self
,
json_config
):
def
__init__
(
self
,
json_config
):
config
=
CLIPVisionConfig
.
from_json_file
(
json_config
)
config
=
CLIPVisionConfig
.
from_json_file
(
json_config
)
with
comfy
.
ops
.
use_comfy_ops
():
self
.
load_device
=
comfy
.
model_management
.
text_encoder_device
()
offload_device
=
comfy
.
model_management
.
text_encoder_offload_device
()
self
.
dtype
=
torch
.
float32
if
comfy
.
model_management
.
should_use_fp16
(
self
.
load_device
,
prioritize_performance
=
False
):
self
.
dtype
=
torch
.
float16
with
comfy
.
ops
.
use_comfy_ops
(
offload_device
,
self
.
dtype
):
with
modeling_utils
.
no_init_weights
():
with
modeling_utils
.
no_init_weights
():
self
.
model
=
CLIPVisionModelWithProjection
(
config
)
self
.
model
=
CLIPVisionModelWithProjection
(
config
)
self
.
model
.
to
(
self
.
dtype
)
self
.
patcher
=
comfy
.
model_patcher
.
ModelPatcher
(
self
.
model
,
load_device
=
self
.
load_device
,
offload_device
=
offload_device
)
self
.
processor
=
CLIPImageProcessor
(
crop_size
=
224
,
self
.
processor
=
CLIPImageProcessor
(
crop_size
=
224
,
do_center_crop
=
True
,
do_center_crop
=
True
,
do_convert_rgb
=
True
,
do_convert_rgb
=
True
,
...
@@ -27,7 +40,16 @@ class ClipVisionModel():
...
@@ -27,7 +40,16 @@ class ClipVisionModel():
img
=
torch
.
clip
((
255.
*
image
),
0
,
255
).
round
().
int
()
img
=
torch
.
clip
((
255.
*
image
),
0
,
255
).
round
().
int
()
img
=
list
(
map
(
lambda
a
:
a
,
img
))
img
=
list
(
map
(
lambda
a
:
a
,
img
))
inputs
=
self
.
processor
(
images
=
img
,
return_tensors
=
"pt"
)
inputs
=
self
.
processor
(
images
=
img
,
return_tensors
=
"pt"
)
outputs
=
self
.
model
(
**
inputs
)
comfy
.
model_management
.
load_model_gpu
(
self
.
patcher
)
pixel_values
=
inputs
[
'pixel_values'
].
to
(
self
.
load_device
)
if
self
.
dtype
!=
torch
.
float32
:
precision_scope
=
torch
.
autocast
else
:
precision_scope
=
lambda
a
,
b
:
contextlib
.
nullcontext
(
a
)
with
precision_scope
(
comfy
.
model_management
.
get_autocast_device
(
self
.
load_device
),
torch
.
float32
):
outputs
=
self
.
model
(
pixel_values
=
pixel_values
)
return
outputs
return
outputs
def
convert_to_transformers
(
sd
,
prefix
):
def
convert_to_transformers
(
sd
,
prefix
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment