sd1_clip.py 10.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
import os

from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig
import torch
5
import traceback
comfyanonymous's avatar
comfyanonymous committed
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66

class ClipTokenWeightEncoder:
    def encode_token_weights(self, token_weight_pairs):
        z_empty = self.encode(self.empty_tokens)
        output = []
        for x in token_weight_pairs:
            tokens = [list(map(lambda a: a[0], x))]
            z = self.encode(tokens)
            for i in range(len(z)):
                for j in range(len(z[i])):
                    weight = x[j][1]
                    z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j]
            output += [z]
        if (len(output) == 0):
            return self.encode(self.empty_tokens)
        return torch.cat(output, dim=-2)

class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
    """Uses the CLIP transformer encoder for text (from huggingface)"""
    LAYERS = [
        "last",
        "pooled",
        "hidden"
    ]
    def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
                 freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None):  # clip-vit-base-patch32
        super().__init__()
        assert layer in self.LAYERS
        if textmodel_path is not None:
            self.transformer = CLIPTextModel.from_pretrained(textmodel_path)
        else:
            if textmodel_json_config is None:
                textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json")
            config = CLIPTextConfig.from_json_file(textmodel_json_config)
            self.transformer = CLIPTextModel(config)

        self.device = device
        self.max_length = max_length
        if freeze:
            self.freeze()
        self.layer = layer
        self.layer_idx = None
        self.empty_tokens = [[49406] + [49407] * 76]
        if layer == "hidden":
            assert layer_idx is not None
            assert abs(layer_idx) <= 12
            self.clip_layer(layer_idx)

    def freeze(self):
        self.transformer = self.transformer.eval()
        #self.train = disabled_train
        for param in self.parameters():
            param.requires_grad = False

    def clip_layer(self, layer_idx):
        if abs(layer_idx) >= 12:
            self.layer = "last"
        else:
            self.layer = "hidden"
            self.layer_idx = layer_idx

67
68
69
70
71
72
73
74
75
76
77
    def set_up_textual_embeddings(self, tokens, current_embeds):
        out_tokens = []
        next_new_token = token_dict_size = current_embeds.weight.shape[0]
        embedding_weights = []

        for x in tokens:
            tokens_temp = []
            for y in x:
                if isinstance(y, int):
                    tokens_temp += [y]
                else:
78
79
80
81
82
83
                    if y.shape[0] == current_embeds.weight.shape[1]:
                        embedding_weights += [y]
                        tokens_temp += [next_new_token]
                        next_new_token += 1
                    else:
                        print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1])
84
85
86
87
88
89
90
91
92
93
94
95
            out_tokens += [tokens_temp]

        if len(embedding_weights) > 0:
            new_embedding = torch.nn.Embedding(next_new_token, current_embeds.weight.shape[1])
            new_embedding.weight[:token_dict_size] = current_embeds.weight[:]
            n = token_dict_size
            for x in embedding_weights:
                new_embedding.weight[n] = x
                n += 1
            self.transformer.set_input_embeddings(new_embedding)
        return out_tokens

comfyanonymous's avatar
comfyanonymous committed
96
    def forward(self, tokens):
97
98
        backup_embeds = self.transformer.get_input_embeddings()
        tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
comfyanonymous's avatar
comfyanonymous committed
99
100
        tokens = torch.LongTensor(tokens).to(self.device)
        outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
101
        self.transformer.set_input_embeddings(backup_embeds)
comfyanonymous's avatar
comfyanonymous committed
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

        if self.layer == "last":
            z = outputs.last_hidden_state
        elif self.layer == "pooled":
            z = outputs.pooler_output[:, None, :]
        else:
            z = outputs.hidden_states[self.layer_idx]
            z = self.transformer.text_model.final_layer_norm(z)

        return z

    def encode(self, tokens):
        return self(tokens)

def parse_parentheses(string):
    result = []
    current_item = ""
    nesting_level = 0
    for char in string:
        if char == "(":
            if nesting_level == 0:
                if current_item:
                    result.append(current_item)
                    current_item = "("
                else:
                    current_item = "("
            else:
                current_item += char
            nesting_level += 1
        elif char == ")":
            nesting_level -= 1
            if nesting_level == 0:
                result.append(current_item + ")")
                current_item = ""
            else:
                current_item += char
        else:
            current_item += char
    if current_item:
        result.append(current_item)
    return result

def token_weights(string, current_weight):
    a = parse_parentheses(string)
    out = []
    for x in a:
        weight = current_weight
        if len(x) >= 2 and x[-1] == ')' and x[0] == '(':
            x = x[1:-1]
            xx = x.rfind(":")
            weight *= 1.1
            if xx > 0:
                try:
                    weight = float(x[xx+1:])
                    x = x[:xx]
                except:
                    pass
            out += token_weights(x, weight)
        else:
            out += [(x, current_weight)]
    return out

def escape_important(text):
    text = text.replace("\\)", "\0\1")
    text = text.replace("\\(", "\0\2")
    return text

def unescape_important(text):
    text = text.replace("\0\1", ")")
    text = text.replace("\0\2", "(")
    return text

174
def load_embed(embedding_name, embedding_directory):
175
176
177
178
179
180
181
182
183
184
185
186
187
    if isinstance(embedding_directory, str):
        embedding_directory = [embedding_directory]

    valid_file = None
    for embed_dir in embedding_directory:
        embed_path = os.path.join(embed_dir, embedding_name)
        if not os.path.isfile(embed_path):
            extensions = ['.safetensors', '.pt', '.bin']
            for x in extensions:
                t = embed_path + x
                if os.path.isfile(t):
                    valid_file = t
                    break
188
        else:
189
190
191
192
193
194
195
196
            valid_file = embed_path
        if valid_file is not None:
            break

    if valid_file is None:
        return None

    embed_path = valid_file
197

198
199
200
201
    try:
        if embed_path.lower().endswith(".safetensors"):
            import safetensors.torch
            embed = safetensors.torch.load_file(embed_path, device="cpu")
comfyanonymous's avatar
comfyanonymous committed
202
        else:
203
204
205
206
207
208
209
210
211
212
            if 'weights_only' in torch.load.__code__.co_varnames:
                embed = torch.load(embed_path, weights_only=True, map_location="cpu")
            else:
                embed = torch.load(embed_path, map_location="cpu")
    except Exception as e:
        print(traceback.format_exc())
        print()
        print("error loading embedding, skipping loading:", embedding_name)
        return None

213
214
215
216
217
218
    if 'string_to_param' in embed:
        values = embed['string_to_param'].values()
    else:
        values = embed.values()
    return next(iter(values))

comfyanonymous's avatar
comfyanonymous committed
219
class SD1Tokenizer:
220
    def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None):
comfyanonymous's avatar
comfyanonymous committed
221
222
223
224
        if tokenizer_path is None:
            tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
        self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
        self.max_length = max_length
225
226
        self.max_tokens_per_section = self.max_length - 2

comfyanonymous's avatar
comfyanonymous committed
227
228
229
230
231
232
        empty = self.tokenizer('')["input_ids"]
        self.start_token = empty[0]
        self.end_token = empty[1]
        self.pad_with_end = pad_with_end
        vocab = self.tokenizer.get_vocab()
        self.inv_vocab = {v: k for k, v in vocab.items()}
233
234
        self.embedding_directory = embedding_directory
        self.max_word_length = 8
comfyanonymous's avatar
comfyanonymous committed
235
236
237
238
239
240
241

    def tokenize_with_weights(self, text):
        text = escape_important(text)
        parsed_weights = token_weights(text, 1.0)

        tokens = []
        for t in parsed_weights:
242
            to_tokenize = unescape_important(t[0]).replace("\n", " ").split(' ')
243
244
            while len(to_tokenize) > 0:
                word = to_tokenize.pop(0)
245
246
247
248
249
                temp_tokens = []
                embedding_identifier = "embedding:"
                if word.startswith(embedding_identifier) and self.embedding_directory is not None:
                    embedding_name = word[len(embedding_identifier):].strip('\n')
                    embed = load_embed(embedding_name, self.embedding_directory)
250
251
252
253
254
255
256
                    if embed is None:
                        stripped = embedding_name.strip(',')
                        if len(stripped) < len(embedding_name):
                            embed = load_embed(stripped, self.embedding_directory)
                            if embed is not None:
                                to_tokenize.insert(0, embedding_name[len(stripped):])

257
258
259
260
261
262
                    if embed is not None:
                        if len(embed.shape) == 1:
                            temp_tokens += [(embed, t[1])]
                        else:
                            for x in range(embed.shape[0]):
                                temp_tokens += [(embed[x], t[1])]
263
264
                    else:
                        print("warning, embedding:{} does not exist, ignoring".format(embedding_name))
265
266
267
268
269
270
271
272
273
274
275
                elif len(word) > 0:
                    tt = self.tokenizer(word)["input_ids"][1:-1]
                    for x in tt:
                        temp_tokens += [(x, t[1])]
                tokens_left = self.max_tokens_per_section - (len(tokens) % self.max_tokens_per_section)

                #try not to split words in different sections
                if tokens_left < len(temp_tokens) and len(temp_tokens) < (self.max_word_length):
                    for x in range(tokens_left):
                        tokens += [(self.end_token, 1.0)]
                tokens += temp_tokens
comfyanonymous's avatar
comfyanonymous committed
276
277

        out_tokens = []
278
279
        for x in range(0, len(tokens), self.max_tokens_per_section):
            o_token = [(self.start_token, 1.0)] + tokens[x:min(self.max_tokens_per_section + x, len(tokens))]
comfyanonymous's avatar
comfyanonymous committed
280
281
282
283
284
285
286
287
288
289
290
291
            o_token += [(self.end_token, 1.0)]
            if self.pad_with_end:
                o_token +=[(self.end_token, 1.0)] * (self.max_length - len(o_token))
            else:
                o_token +=[(0, 1.0)] * (self.max_length - len(o_token))

            out_tokens += [o_token]

        return out_tokens

    def untokenize(self, token_weight_pair):
        return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))