clip_vision.py 4.75 KB
Newer Older
1
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor, modeling_utils
2
3
from .utils import load_torch_file, transformers_convert
import os
4
import torch
5
6
import contextlib

7
import comfy.ops
8
9
import comfy.model_patcher
import comfy.model_management
10
11
12
13

class ClipVisionModel():
    def __init__(self, json_config):
        config = CLIPVisionConfig.from_json_file(json_config)
14
15
16
17
18
19
20
        self.load_device = comfy.model_management.text_encoder_device()
        offload_device = comfy.model_management.text_encoder_offload_device()
        self.dtype = torch.float32
        if comfy.model_management.should_use_fp16(self.load_device, prioritize_performance=False):
            self.dtype = torch.float16

        with comfy.ops.use_comfy_ops(offload_device, self.dtype):
21
22
            with modeling_utils.no_init_weights():
                self.model = CLIPVisionModelWithProjection(config)
23
24
25
        self.model.to(self.dtype)

        self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device)
26
27
28
29
30
31
32
33
34
35
36
        self.processor = CLIPImageProcessor(crop_size=224,
                                            do_center_crop=True,
                                            do_convert_rgb=True,
                                            do_normalize=True,
                                            do_resize=True,
                                            image_mean=[ 0.48145466,0.4578275,0.40821073],
                                            image_std=[0.26862954,0.26130258,0.27577711],
                                            resample=3, #bicubic
                                            size=224)

    def load_sd(self, sd):
37
        return self.model.load_state_dict(sd, strict=False)
38
39

    def encode_image(self, image):
40
        img = torch.clip((255. * image), 0, 255).round().int()
41
        img = list(map(lambda a: a, img))
42
        inputs = self.processor(images=img, return_tensors="pt")
43
44
45
46
47
48
49
50
51
52
        comfy.model_management.load_model_gpu(self.patcher)
        pixel_values = inputs['pixel_values'].to(self.load_device)

        if self.dtype != torch.float32:
            precision_scope = torch.autocast
        else:
            precision_scope = lambda a, b: contextlib.nullcontext(a)

        with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32):
            outputs = self.model(pixel_values=pixel_values)
53
54
55
56
57

        for k in outputs:
            t = outputs[k]
            if t is not None:
                outputs[k] = t.cpu()
58
59
        return outputs

60
def convert_to_transformers(sd, prefix):
61
    sd_k = sd.keys()
62
    if "{}transformer.resblocks.0.attn.in_proj_weight".format(prefix) in sd_k:
63
        keys_to_replace = {
64
65
66
67
68
69
70
            "{}class_embedding".format(prefix): "vision_model.embeddings.class_embedding",
            "{}conv1.weight".format(prefix): "vision_model.embeddings.patch_embedding.weight",
            "{}positional_embedding".format(prefix): "vision_model.embeddings.position_embedding.weight",
            "{}ln_post.bias".format(prefix): "vision_model.post_layernorm.bias",
            "{}ln_post.weight".format(prefix): "vision_model.post_layernorm.weight",
            "{}ln_pre.bias".format(prefix): "vision_model.pre_layrnorm.bias",
            "{}ln_pre.weight".format(prefix): "vision_model.pre_layrnorm.weight",
71
72
73
74
75
76
        }

        for x in keys_to_replace:
            if x in sd_k:
                sd[keys_to_replace[x]] = sd.pop(x)

77
78
        if "{}proj".format(prefix) in sd_k:
            sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1)
79

80
        sd = transformers_convert(sd, prefix, "vision_model.", 48)
81
82
    return sd

83
84
85
def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
    if convert_keys:
        sd = convert_to_transformers(sd, prefix)
86
87
88
    if "vision_model.encoder.layers.47.layer_norm1.weight" in sd:
        json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_g.json")
    elif "vision_model.encoder.layers.30.layer_norm1.weight" in sd:
89
90
91
92
        json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_h.json")
    else:
        json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json")
    clip = ClipVisionModel(json_config)
93
    m, u = clip.load_sd(sd)
94
95
    if len(m) > 0:
        print("missing clip vision:", m)
96
97
98
99
100
101
    u = set(u)
    keys = list(sd.keys())
    for k in keys:
        if k not in u:
            t = sd.pop(k)
            del t
102
103
104
105
    return clip

def load(ckpt_path):
    sd = load_torch_file(ckpt_path)
106
107
108
109
    if "visual.transformer.resblocks.0.attn.in_proj_weight" in sd:
        return load_clipvision_from_sd(sd, prefix="visual.", convert_keys=True)
    else:
        return load_clipvision_from_sd(sd)