sd1_clip.py 14.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
import os

3
from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig, modeling_utils
4
import comfy.ops
comfyanonymous's avatar
comfyanonymous committed
5
import torch
6
import traceback
7
import zipfile
comfyanonymous's avatar
comfyanonymous committed
8
9
10

class ClipTokenWeightEncoder:
    def encode_token_weights(self, token_weight_pairs):
11
        z_empty, _ = self.encode(self.empty_tokens)
comfyanonymous's avatar
comfyanonymous committed
12
        output = []
13
        first_pooled = None
comfyanonymous's avatar
comfyanonymous committed
14
15
        for x in token_weight_pairs:
            tokens = [list(map(lambda a: a[0], x))]
16
17
18
            z, pooled = self.encode(tokens)
            if first_pooled is None:
                first_pooled = pooled
comfyanonymous's avatar
comfyanonymous committed
19
20
21
22
23
24
25
            for i in range(len(z)):
                for j in range(len(z[i])):
                    weight = x[j][1]
                    z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j]
            output += [z]
        if (len(output) == 0):
            return self.encode(self.empty_tokens)
26
        return torch.cat(output, dim=-2).cpu(), first_pooled.cpu()
comfyanonymous's avatar
comfyanonymous committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
    """Uses the CLIP transformer encoder for text (from huggingface)"""
    LAYERS = [
        "last",
        "pooled",
        "hidden"
    ]
    def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
                 freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None):  # clip-vit-base-patch32
        super().__init__()
        assert layer in self.LAYERS
        if textmodel_path is not None:
            self.transformer = CLIPTextModel.from_pretrained(textmodel_path)
        else:
            if textmodel_json_config is None:
                textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json")
            config = CLIPTextConfig.from_json_file(textmodel_json_config)
45
46
47
            with comfy.ops.use_comfy_ops():
                with modeling_utils.no_init_weights():
                    self.transformer = CLIPTextModel(config)
comfyanonymous's avatar
comfyanonymous committed
48
49
50
51
52
53
54
55

        self.device = device
        self.max_length = max_length
        if freeze:
            self.freeze()
        self.layer = layer
        self.layer_idx = None
        self.empty_tokens = [[49406] + [49407] * 76]
56
57
        self.text_projection = None
        self.layer_norm_hidden_state = True
comfyanonymous's avatar
comfyanonymous committed
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
        if layer == "hidden":
            assert layer_idx is not None
            assert abs(layer_idx) <= 12
            self.clip_layer(layer_idx)

    def freeze(self):
        self.transformer = self.transformer.eval()
        #self.train = disabled_train
        for param in self.parameters():
            param.requires_grad = False

    def clip_layer(self, layer_idx):
        if abs(layer_idx) >= 12:
            self.layer = "last"
        else:
            self.layer = "hidden"
            self.layer_idx = layer_idx

76
77
78
79
80
81
82
83
84
85
86
    def set_up_textual_embeddings(self, tokens, current_embeds):
        out_tokens = []
        next_new_token = token_dict_size = current_embeds.weight.shape[0]
        embedding_weights = []

        for x in tokens:
            tokens_temp = []
            for y in x:
                if isinstance(y, int):
                    tokens_temp += [y]
                else:
87
88
89
90
91
92
                    if y.shape[0] == current_embeds.weight.shape[1]:
                        embedding_weights += [y]
                        tokens_temp += [next_new_token]
                        next_new_token += 1
                    else:
                        print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1])
93
94
            while len(tokens_temp) < len(x):
                tokens_temp += [self.empty_tokens[0][-1]]
95
96
97
98
99
100
101
102
103
104
105
106
            out_tokens += [tokens_temp]

        if len(embedding_weights) > 0:
            new_embedding = torch.nn.Embedding(next_new_token, current_embeds.weight.shape[1])
            new_embedding.weight[:token_dict_size] = current_embeds.weight[:]
            n = token_dict_size
            for x in embedding_weights:
                new_embedding.weight[n] = x
                n += 1
            self.transformer.set_input_embeddings(new_embedding)
        return out_tokens

comfyanonymous's avatar
comfyanonymous committed
107
    def forward(self, tokens):
108
109
        backup_embeds = self.transformer.get_input_embeddings()
        tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
comfyanonymous's avatar
comfyanonymous committed
110
111
        tokens = torch.LongTensor(tokens).to(self.device)
        outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
112
        self.transformer.set_input_embeddings(backup_embeds)
comfyanonymous's avatar
comfyanonymous committed
113
114
115
116
117
118
119

        if self.layer == "last":
            z = outputs.last_hidden_state
        elif self.layer == "pooled":
            z = outputs.pooler_output[:, None, :]
        else:
            z = outputs.hidden_states[self.layer_idx]
120
121
            if self.layer_norm_hidden_state:
                z = self.transformer.text_model.final_layer_norm(z)
comfyanonymous's avatar
comfyanonymous committed
122

123
124
125
126
        pooled_output = outputs.pooler_output
        if self.text_projection is not None:
            pooled_output = pooled_output @ self.text_projection
        return z, pooled_output
comfyanonymous's avatar
comfyanonymous committed
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188

    def encode(self, tokens):
        return self(tokens)

def parse_parentheses(string):
    result = []
    current_item = ""
    nesting_level = 0
    for char in string:
        if char == "(":
            if nesting_level == 0:
                if current_item:
                    result.append(current_item)
                    current_item = "("
                else:
                    current_item = "("
            else:
                current_item += char
            nesting_level += 1
        elif char == ")":
            nesting_level -= 1
            if nesting_level == 0:
                result.append(current_item + ")")
                current_item = ""
            else:
                current_item += char
        else:
            current_item += char
    if current_item:
        result.append(current_item)
    return result

def token_weights(string, current_weight):
    a = parse_parentheses(string)
    out = []
    for x in a:
        weight = current_weight
        if len(x) >= 2 and x[-1] == ')' and x[0] == '(':
            x = x[1:-1]
            xx = x.rfind(":")
            weight *= 1.1
            if xx > 0:
                try:
                    weight = float(x[xx+1:])
                    x = x[:xx]
                except:
                    pass
            out += token_weights(x, weight)
        else:
            out += [(x, current_weight)]
    return out

def escape_important(text):
    text = text.replace("\\)", "\0\1")
    text = text.replace("\\(", "\0\2")
    return text

def unescape_important(text):
    text = text.replace("\0\1", ")")
    text = text.replace("\0\2", "(")
    return text

189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
def safe_load_embed_zip(embed_path):
    with zipfile.ZipFile(embed_path) as myzip:
        names = list(filter(lambda a: "data/" in a, myzip.namelist()))
        names.reverse()
        for n in names:
            with myzip.open(n) as myfile:
                data = myfile.read()
                number = len(data) // 4
                length_embed = 1024 #sd2.x
                if number < 768:
                    continue
                if number % 768 == 0:
                    length_embed = 768 #sd1.x
                num_embeds = number // length_embed
                embed = torch.frombuffer(data, dtype=torch.float)
                out = embed.reshape((num_embeds, length_embed)).clone()
                del embed
                return out

208
209
210
211
212
213
214
def expand_directory_list(directories):
    dirs = set()
    for x in directories:
        dirs.add(x)
        for root, subdir, file in os.walk(x, followlinks=True):
            dirs.add(root)
    return list(dirs)
215

216
def load_embed(embedding_name, embedding_directory, embedding_size):
217
218
219
    if isinstance(embedding_directory, str):
        embedding_directory = [embedding_directory]

220
221
    embedding_directory = expand_directory_list(embedding_directory)

222
223
224
225
226
227
228
229
230
231
    valid_file = None
    for embed_dir in embedding_directory:
        embed_path = os.path.join(embed_dir, embedding_name)
        if not os.path.isfile(embed_path):
            extensions = ['.safetensors', '.pt', '.bin']
            for x in extensions:
                t = embed_path + x
                if os.path.isfile(t):
                    valid_file = t
                    break
232
        else:
233
234
235
236
237
238
239
240
            valid_file = embed_path
        if valid_file is not None:
            break

    if valid_file is None:
        return None

    embed_path = valid_file
241

242
243
    embed_out = None

244
245
246
247
    try:
        if embed_path.lower().endswith(".safetensors"):
            import safetensors.torch
            embed = safetensors.torch.load_file(embed_path, device="cpu")
comfyanonymous's avatar
comfyanonymous committed
248
        else:
249
            if 'weights_only' in torch.load.__code__.co_varnames:
250
251
252
253
                try:
                    embed = torch.load(embed_path, weights_only=True, map_location="cpu")
                except:
                    embed_out = safe_load_embed_zip(embed_path)
254
255
256
257
258
259
260
261
            else:
                embed = torch.load(embed_path, map_location="cpu")
    except Exception as e:
        print(traceback.format_exc())
        print()
        print("error loading embedding, skipping loading:", embedding_name)
        return None

262
263
264
    if embed_out is None:
        if 'string_to_param' in embed:
            values = embed['string_to_param'].values()
265
266
267
268
269
270
271
272
273
274
            embed_out = next(iter(values))
        elif isinstance(embed, list):
            out_list = []
            for x in range(len(embed)):
                for k in embed[x]:
                    t = embed[x][k]
                    if t.shape[-1] != embedding_size:
                        continue
                    out_list.append(t.reshape(-1, t.shape[-1]))
            embed_out = torch.cat(out_list, dim=0)
275
276
        else:
            values = embed.values()
277
            embed_out = next(iter(values))
278
    return embed_out
279

comfyanonymous's avatar
comfyanonymous committed
280
class SD1Tokenizer:
281
    def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768):
comfyanonymous's avatar
comfyanonymous committed
282
283
284
285
        if tokenizer_path is None:
            tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
        self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
        self.max_length = max_length
286
287
        self.max_tokens_per_section = self.max_length - 2

comfyanonymous's avatar
comfyanonymous committed
288
289
290
291
292
293
        empty = self.tokenizer('')["input_ids"]
        self.start_token = empty[0]
        self.end_token = empty[1]
        self.pad_with_end = pad_with_end
        vocab = self.tokenizer.get_vocab()
        self.inv_vocab = {v: k for k, v in vocab.items()}
294
295
        self.embedding_directory = embedding_directory
        self.max_word_length = 8
296
        self.embedding_identifier = "embedding:"
297
        self.embedding_size = embedding_size
298

299
    def _try_get_embedding(self, embedding_name:str):
300
301
302
303
        '''
        Takes a potential embedding name and tries to retrieve it.
        Returns a Tuple consisting of the embedding and any leftover string, embedding can be None.
        '''
304
        embed = load_embed(embedding_name, self.embedding_directory, self.embedding_size)
305
306
307
        if embed is None:
            stripped = embedding_name.strip(',')
            if len(stripped) < len(embedding_name):
308
                embed = load_embed(stripped, self.embedding_directory, self.embedding_size)
309
310
311
312
                return (embed, embedding_name[len(stripped):])
        return (embed, "")


313
    def tokenize_with_weights(self, text:str, return_word_ids=False):
314
315
316
317
318
319
        '''
        Takes a prompt and converts it to a list of (token, weight, word id) elements.
        Tokens can both be integer tokens and pre computed CLIP tensors.
        Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens.
        Returned list has the dimensions NxM where M is the input size of CLIP
        '''
BlenderNeko's avatar
BlenderNeko committed
320
321
322
323
        if self.pad_with_end:
            pad_token = self.end_token
        else:
            pad_token = 0
comfyanonymous's avatar
comfyanonymous committed
324
325
326
327

        text = escape_important(text)
        parsed_weights = token_weights(text, 1.0)

328
        #tokenize words
comfyanonymous's avatar
comfyanonymous committed
329
        tokens = []
330
331
332
333
334
335
        for weighted_segment, weight in parsed_weights:
            to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ')
            to_tokenize = [x for x in to_tokenize if x != ""]
            for word in to_tokenize:
                #if we find an embedding, deal with the embedding
                if word.startswith(self.embedding_identifier) and self.embedding_directory is not None:
336
337
                    embedding_name = word[len(self.embedding_identifier):].strip('\n')
                    embed, leftover = self._try_get_embedding(embedding_name)
338
                    if embed is None:
339
                        print(f"warning, embedding:{embedding_name} does not exist, ignoring")
340
                    else:
341
                        if len(embed.shape) == 1:
342
                            tokens.append([(embed, weight)])
343
                        else:
344
345
346
347
                            tokens.append([(embed[x], weight) for x in range(embed.shape[0])])
                    #if we accidentally have leftover text, continue parsing using leftover, else move on to next word
                    if leftover != "":
                        word = leftover
348
                    else:
349
350
351
                        continue
                #parse word
                tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
352

353
354
        #reshape token array to CLIP input size
        batched_tokens = []
BlenderNeko's avatar
BlenderNeko committed
355
        batch = [(self.start_token, 1.0, 0)]
356
357
        batched_tokens.append(batch)
        for i, t_group in enumerate(tokens):
358
359
            #determine if we're going to try and keep the tokens in a single batch
            is_large = len(t_group) >= self.max_word_length
BlenderNeko's avatar
BlenderNeko committed
360

361
            while len(t_group) > 0:
BlenderNeko's avatar
BlenderNeko committed
362
363
364
                if len(t_group) + len(batch) > self.max_length - 1:
                    remaining_length = self.max_length - len(batch) - 1
                    #break word in two and add end token
365
366
                    if is_large:
                        batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]])
BlenderNeko's avatar
BlenderNeko committed
367
                        batch.append((self.end_token, 1.0, 0))
368
                        t_group = t_group[remaining_length:]
BlenderNeko's avatar
BlenderNeko committed
369
                    #add end token and pad
370
                    else:
BlenderNeko's avatar
BlenderNeko committed
371
372
373
374
                        batch.append((self.end_token, 1.0, 0))
                        batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
                    #start new batch
                    batch = [(self.start_token, 1.0, 0)]
375
                    batched_tokens.append(batch)
376
                else:
377
378
                    batch.extend([(t,w,i+1) for t,w in t_group])
                    t_group = []
379

380
        #fill last batch
BlenderNeko's avatar
BlenderNeko committed
381
        batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
comfyanonymous's avatar
comfyanonymous committed
382

383
384
        if not return_word_ids:
            batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
comfyanonymous's avatar
comfyanonymous committed
385

386
        return batched_tokens
comfyanonymous's avatar
comfyanonymous committed
387
388
389
390


    def untokenize(self, token_weight_pair):
        return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))