clip_vision.py 4.98 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, modeling_utils
from .utils import load_torch_file, transformers_convert, common_upscale
3
import os
4
import torch
5
6
import contextlib

7
import comfy.ops
8
9
import comfy.model_patcher
import comfy.model_management
comfyanonymous's avatar
comfyanonymous committed
10
11
12
13
14
15
16
17
18
19
20
21
import comfy.utils

def clip_preprocess(image, size=224):
    mean = torch.tensor([ 0.48145466,0.4578275,0.40821073], device=image.device, dtype=image.dtype)
    std = torch.tensor([0.26862954,0.26130258,0.27577711], device=image.device, dtype=image.dtype)
    scale = (size / min(image.shape[1], image.shape[2]))
    image = torch.nn.functional.interpolate(image.movedim(-1, 1), size=(round(scale * image.shape[1]), round(scale * image.shape[2])), mode="bicubic", antialias=True)
    h = (image.shape[2] - size)//2
    w = (image.shape[3] - size)//2
    image = image[:,:,h:h+size,w:w+size]
    image = torch.clip((255. * image), 0, 255).round() / 255.0
    return (image - mean.view([3,1,1])) / std.view([3,1,1])
22
23
24
25

class ClipVisionModel():
    def __init__(self, json_config):
        config = CLIPVisionConfig.from_json_file(json_config)
26
27
28
29
30
31
32
        self.load_device = comfy.model_management.text_encoder_device()
        offload_device = comfy.model_management.text_encoder_offload_device()
        self.dtype = torch.float32
        if comfy.model_management.should_use_fp16(self.load_device, prioritize_performance=False):
            self.dtype = torch.float16

        with comfy.ops.use_comfy_ops(offload_device, self.dtype):
33
34
            with modeling_utils.no_init_weights():
                self.model = CLIPVisionModelWithProjection(config)
35
36
37
        self.model.to(self.dtype)

        self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device)
38
    def load_sd(self, sd):
39
        return self.model.load_state_dict(sd, strict=False)
40
41

    def encode_image(self, image):
42
        comfy.model_management.load_model_gpu(self.patcher)
comfyanonymous's avatar
comfyanonymous committed
43
        pixel_values = clip_preprocess(image.to(self.load_device))
44
45
46
47
48
49
50

        if self.dtype != torch.float32:
            precision_scope = torch.autocast
        else:
            precision_scope = lambda a, b: contextlib.nullcontext(a)

        with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32):
51
            outputs = self.model(pixel_values=pixel_values, output_hidden_states=True)
52
53
54
55

        for k in outputs:
            t = outputs[k]
            if t is not None:
56
57
                if k == 'hidden_states':
                    outputs["penultimate_hidden_states"] = t[-2].cpu()
58
                    outputs["hidden_states"] = None
59
60
61
                else:
                    outputs[k] = t.cpu()

62
63
        return outputs

64
def convert_to_transformers(sd, prefix):
65
    sd_k = sd.keys()
66
    if "{}transformer.resblocks.0.attn.in_proj_weight".format(prefix) in sd_k:
67
        keys_to_replace = {
68
69
70
71
72
73
74
            "{}class_embedding".format(prefix): "vision_model.embeddings.class_embedding",
            "{}conv1.weight".format(prefix): "vision_model.embeddings.patch_embedding.weight",
            "{}positional_embedding".format(prefix): "vision_model.embeddings.position_embedding.weight",
            "{}ln_post.bias".format(prefix): "vision_model.post_layernorm.bias",
            "{}ln_post.weight".format(prefix): "vision_model.post_layernorm.weight",
            "{}ln_pre.bias".format(prefix): "vision_model.pre_layrnorm.bias",
            "{}ln_pre.weight".format(prefix): "vision_model.pre_layrnorm.weight",
75
76
77
78
79
80
        }

        for x in keys_to_replace:
            if x in sd_k:
                sd[keys_to_replace[x]] = sd.pop(x)

81
82
        if "{}proj".format(prefix) in sd_k:
            sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1)
83

84
        sd = transformers_convert(sd, prefix, "vision_model.", 48)
85
86
    return sd

87
88
89
def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
    if convert_keys:
        sd = convert_to_transformers(sd, prefix)
90
91
92
    if "vision_model.encoder.layers.47.layer_norm1.weight" in sd:
        json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_g.json")
    elif "vision_model.encoder.layers.30.layer_norm1.weight" in sd:
93
        json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_h.json")
comfyanonymous's avatar
comfyanonymous committed
94
    elif "vision_model.encoder.layers.22.layer_norm1.weight" in sd:
95
        json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json")
comfyanonymous's avatar
comfyanonymous committed
96
97
98
    else:
        return None

99
    clip = ClipVisionModel(json_config)
100
    m, u = clip.load_sd(sd)
101
102
    if len(m) > 0:
        print("missing clip vision:", m)
103
104
105
106
107
108
    u = set(u)
    keys = list(sd.keys())
    for k in keys:
        if k not in u:
            t = sd.pop(k)
            del t
109
110
111
112
    return clip

def load(ckpt_path):
    sd = load_torch_file(ckpt_path)
113
114
115
116
    if "visual.transformer.resblocks.0.attn.in_proj_weight" in sd:
        return load_clipvision_from_sd(sd, prefix="visual.", convert_keys=True)
    else:
        return load_clipvision_from_sd(sd)