sd1_clip.py 20.6 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
import os

3
from transformers import CLIPTokenizer
4
import comfy.ops
comfyanonymous's avatar
comfyanonymous committed
5
import torch
6
import traceback
7
import zipfile
8
from . import model_management
9
10
import comfy.clip_model
import json
11
import logging
Mario Klingemann's avatar
Mario Klingemann committed
12
import numbers
comfyanonymous's avatar
comfyanonymous committed
13

14
15
16
17
18
19
20
21
22
23
24
25
def gen_empty_tokens(special_tokens, length):
    start_token = special_tokens.get("start", None)
    end_token = special_tokens.get("end", None)
    pad_token = special_tokens.get("pad")
    output = []
    if start_token is not None:
        output.append(start_token)
    if end_token is not None:
        output.append(end_token)
    output += [pad_token] * (length - len(output))
    return output

comfyanonymous's avatar
comfyanonymous committed
26
27
class ClipTokenWeightEncoder:
    def encode_token_weights(self, token_weight_pairs):
28
29
30
        to_encode = list()
        max_token_len = 0
        has_weights = False
comfyanonymous's avatar
comfyanonymous committed
31
        for x in token_weight_pairs:
32
            tokens = list(map(lambda a: a[0], x))
33
34
            max_token_len = max(len(tokens), max_token_len)
            has_weights = has_weights or not all(map(lambda a: a[1] == 1.0, x))
35
36
            to_encode.append(tokens)

37
38
39
40
        sections = len(to_encode)
        if has_weights or sections == 0:
            to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len))

41
        out, pooled = self.encode(to_encode)
42
        if pooled is not None:
43
            first_pooled = pooled[0:1].to(model_management.intermediate_device())
44
        else:
45
            first_pooled = pooled
46
47

        output = []
48
        for k in range(0, sections):
49
            z = out[k:k+1]
50
51
52
53
54
55
56
            if has_weights:
                z_empty = out[-1]
                for i in range(len(z)):
                    for j in range(len(z[i])):
                        weight = token_weight_pairs[k][j][1]
                        if weight != 1.0:
                            z[i][j] = (z[i][j] - z_empty[j]) * weight + z_empty[j]
57
58
            output.append(z)

comfyanonymous's avatar
comfyanonymous committed
59
        if (len(output) == 0):
60
61
            return out[-1:].to(model_management.intermediate_device()), first_pooled
        return torch.cat(output, dim=-2).to(model_management.intermediate_device()), first_pooled
comfyanonymous's avatar
comfyanonymous committed
62

63
class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
comfyanonymous's avatar
comfyanonymous committed
64
65
66
67
68
69
70
    """Uses the CLIP transformer encoder for text (from huggingface)"""
    LAYERS = [
        "last",
        "pooled",
        "hidden"
    ]
    def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
71
                 freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=comfy.clip_model.CLIPTextModel,
72
73
                 special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, enable_attention_masks=False, zero_out_masked=False,
                 return_projected_pooled=True):  # clip-vit-base-patch32
comfyanonymous's avatar
comfyanonymous committed
74
75
        super().__init__()
        assert layer in self.LAYERS
76
77
78
79
80
81
82

        if textmodel_json_config is None:
            textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json")

        with open(textmodel_json_config) as f:
            config = json.load(f)

83
        self.transformer = model_class(config, dtype, device, comfy.ops.manual_cast)
84
        self.num_layers = self.transformer.num_layers
85

comfyanonymous's avatar
comfyanonymous committed
86
87
88
89
90
        self.max_length = max_length
        if freeze:
            self.freeze()
        self.layer = layer
        self.layer_idx = None
91
        self.special_tokens = special_tokens
92

93
        self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055))
94
        self.enable_attention_masks = enable_attention_masks
95
        self.zero_out_masked = zero_out_masked
96

97
        self.layer_norm_hidden_state = layer_norm_hidden_state
98
        self.return_projected_pooled = return_projected_pooled
99

comfyanonymous's avatar
comfyanonymous committed
100
101
        if layer == "hidden":
            assert layer_idx is not None
102
            assert abs(layer_idx) < self.num_layers
103
104
            self.set_clip_options({"layer": layer_idx})
        self.options_default = (self.layer, self.layer_idx, self.return_projected_pooled)
comfyanonymous's avatar
comfyanonymous committed
105
106
107
108
109
110
111

    def freeze(self):
        self.transformer = self.transformer.eval()
        #self.train = disabled_train
        for param in self.parameters():
            param.requires_grad = False

112
113
114
115
    def set_clip_options(self, options):
        layer_idx = options.get("layer", self.layer_idx)
        self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled)
        if layer_idx is None or abs(layer_idx) > self.num_layers:
comfyanonymous's avatar
comfyanonymous committed
116
117
118
119
120
            self.layer = "last"
        else:
            self.layer = "hidden"
            self.layer_idx = layer_idx

121
122
123
124
    def reset_clip_options(self):
        self.layer = self.options_default[0]
        self.layer_idx = self.options_default[1]
        self.return_projected_pooled = self.options_default[2]
125

126
127
    def set_up_textual_embeddings(self, tokens, current_embeds):
        out_tokens = []
128
        next_new_token = token_dict_size = current_embeds.weight.shape[0] - 1
129
130
131
132
133
        embedding_weights = []

        for x in tokens:
            tokens_temp = []
            for y in x:
Mario Klingemann's avatar
Mario Klingemann committed
134
                if isinstance(y, numbers.Integral):
135
136
                    if y == token_dict_size: #EOS token
                        y = -1
Mario Klingemann's avatar
Mario Klingemann committed
137
                    tokens_temp += [int(y)]
138
                else:
139
140
141
142
143
                    if y.shape[0] == current_embeds.weight.shape[1]:
                        embedding_weights += [y]
                        tokens_temp += [next_new_token]
                        next_new_token += 1
                    else:
144
                        logging.warning("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored {} != {}".format(y.shape[0], current_embeds.weight.shape[1]))
145
            while len(tokens_temp) < len(x):
146
                tokens_temp += [self.special_tokens["pad"]]
147
148
            out_tokens += [tokens_temp]

149
        n = token_dict_size
150
        if len(embedding_weights) > 0:
151
152
            new_embedding = torch.nn.Embedding(next_new_token + 1, current_embeds.weight.shape[1], device=current_embeds.weight.device, dtype=current_embeds.weight.dtype)
            new_embedding.weight[:token_dict_size] = current_embeds.weight[:-1]
153
154
155
            for x in embedding_weights:
                new_embedding.weight[n] = x
                n += 1
156
            new_embedding.weight[n] = current_embeds.weight[-1] #EOS embedding
157
            self.transformer.set_input_embeddings(new_embedding)
158
159
160
161
162
163

        processed_tokens = []
        for x in out_tokens:
            processed_tokens += [list(map(lambda a: n if a == -1 else a, x))] #The EOS token should always be the largest one

        return processed_tokens
164

comfyanonymous's avatar
comfyanonymous committed
165
    def forward(self, tokens):
166
        backup_embeds = self.transformer.get_input_embeddings()
167
        device = backup_embeds.weight.device
168
        tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
169
170
        tokens = torch.LongTensor(tokens).to(device)

171
        attention_mask = None
172
        if self.enable_attention_masks or self.zero_out_masked:
173
            attention_mask = torch.zeros_like(tokens)
174
            end_token = self.special_tokens.get("end", -1)
175
176
177
            for x in range(attention_mask.shape[0]):
                for y in range(attention_mask.shape[1]):
                    attention_mask[x, y] = 1
178
                    if tokens[x, y] == end_token:
179
180
                        break

181
182
183
184
185
        attention_mask_model = None
        if self.enable_attention_masks:
            attention_mask_model = attention_mask

        outputs = self.transformer(tokens, attention_mask_model, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state)
186
187
188
        self.transformer.set_input_embeddings(backup_embeds)

        if self.layer == "last":
189
            z = outputs[0].float()
comfyanonymous's avatar
comfyanonymous committed
190
        else:
191
192
            z = outputs[1].float()

193
        if self.zero_out_masked:
194
            z *= attention_mask.unsqueeze(-1).float()
195

196
197
198
199
200
201
        pooled_output = None
        if len(outputs) >= 3:
            if not self.return_projected_pooled and len(outputs) >= 4 and outputs[3] is not None:
                pooled_output = outputs[3].float()
            elif outputs[2] is not None:
                pooled_output = outputs[2].float()
202

203
        return z, pooled_output
comfyanonymous's avatar
comfyanonymous committed
204
205
206
207

    def encode(self, tokens):
        return self(tokens)

208
209
210
    def load_sd(self, sd):
        return self.transformer.load_state_dict(sd, strict=False)

comfyanonymous's avatar
comfyanonymous committed
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
def parse_parentheses(string):
    result = []
    current_item = ""
    nesting_level = 0
    for char in string:
        if char == "(":
            if nesting_level == 0:
                if current_item:
                    result.append(current_item)
                    current_item = "("
                else:
                    current_item = "("
            else:
                current_item += char
            nesting_level += 1
        elif char == ")":
            nesting_level -= 1
            if nesting_level == 0:
                result.append(current_item + ")")
                current_item = ""
            else:
                current_item += char
        else:
            current_item += char
    if current_item:
        result.append(current_item)
    return result

def token_weights(string, current_weight):
    a = parse_parentheses(string)
    out = []
    for x in a:
        weight = current_weight
        if len(x) >= 2 and x[-1] == ')' and x[0] == '(':
            x = x[1:-1]
            xx = x.rfind(":")
            weight *= 1.1
            if xx > 0:
                try:
                    weight = float(x[xx+1:])
                    x = x[:xx]
                except:
                    pass
            out += token_weights(x, weight)
        else:
            out += [(x, current_weight)]
    return out

def escape_important(text):
    text = text.replace("\\)", "\0\1")
    text = text.replace("\\(", "\0\2")
    return text

def unescape_important(text):
    text = text.replace("\0\1", ")")
    text = text.replace("\0\2", "(")
    return text

269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
def safe_load_embed_zip(embed_path):
    with zipfile.ZipFile(embed_path) as myzip:
        names = list(filter(lambda a: "data/" in a, myzip.namelist()))
        names.reverse()
        for n in names:
            with myzip.open(n) as myfile:
                data = myfile.read()
                number = len(data) // 4
                length_embed = 1024 #sd2.x
                if number < 768:
                    continue
                if number % 768 == 0:
                    length_embed = 768 #sd1.x
                num_embeds = number // length_embed
                embed = torch.frombuffer(data, dtype=torch.float)
                out = embed.reshape((num_embeds, length_embed)).clone()
                del embed
                return out

288
289
290
291
292
293
294
def expand_directory_list(directories):
    dirs = set()
    for x in directories:
        dirs.add(x)
        for root, subdir, file in os.walk(x, followlinks=True):
            dirs.add(root)
    return list(dirs)
295

296
def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=None):
297
298
299
    if isinstance(embedding_directory, str):
        embedding_directory = [embedding_directory]

300
301
    embedding_directory = expand_directory_list(embedding_directory)

302
303
    valid_file = None
    for embed_dir in embedding_directory:
304
305
306
307
308
309
310
        embed_path = os.path.abspath(os.path.join(embed_dir, embedding_name))
        embed_dir = os.path.abspath(embed_dir)
        try:
            if os.path.commonpath((embed_dir, embed_path)) != embed_dir:
                continue
        except:
            continue
311
312
313
314
315
316
317
        if not os.path.isfile(embed_path):
            extensions = ['.safetensors', '.pt', '.bin']
            for x in extensions:
                t = embed_path + x
                if os.path.isfile(t):
                    valid_file = t
                    break
318
        else:
319
320
321
322
323
324
325
326
            valid_file = embed_path
        if valid_file is not None:
            break

    if valid_file is None:
        return None

    embed_path = valid_file
327

328
329
    embed_out = None

330
331
332
333
    try:
        if embed_path.lower().endswith(".safetensors"):
            import safetensors.torch
            embed = safetensors.torch.load_file(embed_path, device="cpu")
comfyanonymous's avatar
comfyanonymous committed
334
        else:
335
            if 'weights_only' in torch.load.__code__.co_varnames:
336
337
338
339
                try:
                    embed = torch.load(embed_path, weights_only=True, map_location="cpu")
                except:
                    embed_out = safe_load_embed_zip(embed_path)
340
341
342
            else:
                embed = torch.load(embed_path, map_location="cpu")
    except Exception as e:
343
        logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name))
344
345
        return None

346
347
348
    if embed_out is None:
        if 'string_to_param' in embed:
            values = embed['string_to_param'].values()
349
350
351
352
353
354
355
356
357
358
            embed_out = next(iter(values))
        elif isinstance(embed, list):
            out_list = []
            for x in range(len(embed)):
                for k in embed[x]:
                    t = embed[x][k]
                    if t.shape[-1] != embedding_size:
                        continue
                    out_list.append(t.reshape(-1, t.shape[-1]))
            embed_out = torch.cat(out_list, dim=0)
359
360
        elif embed_key is not None and embed_key in embed:
            embed_out = embed[embed_key]
361
362
        else:
            values = embed.values()
363
            embed_out = next(iter(values))
364
    return embed_out
365

366
class SDTokenizer:
367
    def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True, min_length=None):
comfyanonymous's avatar
comfyanonymous committed
368
369
        if tokenizer_path is None:
            tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
370
        self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path)
comfyanonymous's avatar
comfyanonymous committed
371
        self.max_length = max_length
372
        self.min_length = min_length
373

comfyanonymous's avatar
comfyanonymous committed
374
        empty = self.tokenizer('')["input_ids"]
375
376
377
378
379
380
381
382
        if has_start_token:
            self.tokens_start = 1
            self.start_token = empty[0]
            self.end_token = empty[1]
        else:
            self.tokens_start = 0
            self.start_token = None
            self.end_token = empty[0]
comfyanonymous's avatar
comfyanonymous committed
383
        self.pad_with_end = pad_with_end
384
385
        self.pad_to_max_length = pad_to_max_length

comfyanonymous's avatar
comfyanonymous committed
386
387
        vocab = self.tokenizer.get_vocab()
        self.inv_vocab = {v: k for k, v in vocab.items()}
388
389
        self.embedding_directory = embedding_directory
        self.max_word_length = 8
390
        self.embedding_identifier = "embedding:"
391
        self.embedding_size = embedding_size
392
        self.embedding_key = embedding_key
393

394
    def _try_get_embedding(self, embedding_name:str):
395
396
397
398
        '''
        Takes a potential embedding name and tries to retrieve it.
        Returns a Tuple consisting of the embedding and any leftover string, embedding can be None.
        '''
399
        embed = load_embed(embedding_name, self.embedding_directory, self.embedding_size, self.embedding_key)
400
401
402
        if embed is None:
            stripped = embedding_name.strip(',')
            if len(stripped) < len(embedding_name):
403
                embed = load_embed(stripped, self.embedding_directory, self.embedding_size, self.embedding_key)
404
405
406
407
                return (embed, embedding_name[len(stripped):])
        return (embed, "")


408
    def tokenize_with_weights(self, text:str, return_word_ids=False):
409
410
411
412
413
414
        '''
        Takes a prompt and converts it to a list of (token, weight, word id) elements.
        Tokens can both be integer tokens and pre computed CLIP tensors.
        Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens.
        Returned list has the dimensions NxM where M is the input size of CLIP
        '''
BlenderNeko's avatar
BlenderNeko committed
415
416
417
418
        if self.pad_with_end:
            pad_token = self.end_token
        else:
            pad_token = 0
comfyanonymous's avatar
comfyanonymous committed
419
420
421
422

        text = escape_important(text)
        parsed_weights = token_weights(text, 1.0)

423
        #tokenize words
comfyanonymous's avatar
comfyanonymous committed
424
        tokens = []
425
426
427
428
429
430
        for weighted_segment, weight in parsed_weights:
            to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ')
            to_tokenize = [x for x in to_tokenize if x != ""]
            for word in to_tokenize:
                #if we find an embedding, deal with the embedding
                if word.startswith(self.embedding_identifier) and self.embedding_directory is not None:
431
432
                    embedding_name = word[len(self.embedding_identifier):].strip('\n')
                    embed, leftover = self._try_get_embedding(embedding_name)
433
                    if embed is None:
434
                        logging.warning(f"warning, embedding:{embedding_name} does not exist, ignoring")
435
                    else:
436
                        if len(embed.shape) == 1:
437
                            tokens.append([(embed, weight)])
438
                        else:
439
440
441
442
                            tokens.append([(embed[x], weight) for x in range(embed.shape[0])])
                    #if we accidentally have leftover text, continue parsing using leftover, else move on to next word
                    if leftover != "":
                        word = leftover
443
                    else:
444
445
                        continue
                #parse word
446
                tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]])
447

448
449
        #reshape token array to CLIP input size
        batched_tokens = []
450
451
452
        batch = []
        if self.start_token is not None:
            batch.append((self.start_token, 1.0, 0))
453
454
        batched_tokens.append(batch)
        for i, t_group in enumerate(tokens):
455
456
            #determine if we're going to try and keep the tokens in a single batch
            is_large = len(t_group) >= self.max_word_length
BlenderNeko's avatar
BlenderNeko committed
457

458
            while len(t_group) > 0:
BlenderNeko's avatar
BlenderNeko committed
459
460
461
                if len(t_group) + len(batch) > self.max_length - 1:
                    remaining_length = self.max_length - len(batch) - 1
                    #break word in two and add end token
462
463
                    if is_large:
                        batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]])
BlenderNeko's avatar
BlenderNeko committed
464
                        batch.append((self.end_token, 1.0, 0))
465
                        t_group = t_group[remaining_length:]
BlenderNeko's avatar
BlenderNeko committed
466
                    #add end token and pad
467
                    else:
BlenderNeko's avatar
BlenderNeko committed
468
                        batch.append((self.end_token, 1.0, 0))
469
470
                        if self.pad_to_max_length:
                            batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
BlenderNeko's avatar
BlenderNeko committed
471
                    #start new batch
472
473
474
                    batch = []
                    if self.start_token is not None:
                        batch.append((self.start_token, 1.0, 0))
475
                    batched_tokens.append(batch)
476
                else:
477
478
                    batch.extend([(t,w,i+1) for t,w in t_group])
                    t_group = []
479

480
        #fill last batch
481
482
483
        batch.append((self.end_token, 1.0, 0))
        if self.pad_to_max_length:
            batch.extend([(pad_token, 1.0, 0)] * (self.max_length - len(batch)))
484
485
        if self.min_length is not None and len(batch) < self.min_length:
            batch.extend([(pad_token, 1.0, 0)] * (self.min_length - len(batch)))
comfyanonymous's avatar
comfyanonymous committed
486

487
488
        if not return_word_ids:
            batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
comfyanonymous's avatar
comfyanonymous committed
489

490
        return batched_tokens
comfyanonymous's avatar
comfyanonymous committed
491
492
493
494


    def untokenize(self, token_weight_pair):
        return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512


class SD1Tokenizer:
    def __init__(self, embedding_directory=None, clip_name="l", tokenizer=SDTokenizer):
        self.clip_name = clip_name
        self.clip = "clip_{}".format(self.clip_name)
        setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory))

    def tokenize_with_weights(self, text:str, return_word_ids=False):
        out = {}
        out[self.clip_name] = getattr(self, self.clip).tokenize_with_weights(text, return_word_ids)
        return out

    def untokenize(self, token_weight_pair):
        return getattr(self, self.clip).untokenize(token_weight_pair)


class SD1ClipModel(torch.nn.Module):
513
    def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel, **kwargs):
514
515
516
        super().__init__()
        self.clip_name = clip_name
        self.clip = "clip_{}".format(self.clip_name)
517
        setattr(self, self.clip, clip_model(device=device, dtype=dtype, **kwargs))
518

519
520
521
522
        self.dtypes = set()
        if dtype is not None:
            self.dtypes.add(dtype)

523
524
    def set_clip_options(self, options):
        getattr(self, self.clip).set_clip_options(options)
525

526
527
    def reset_clip_options(self):
        getattr(self, self.clip).reset_clip_options()
528
529
530
531
532
533
534
535

    def encode_token_weights(self, token_weight_pairs):
        token_weight_pairs = token_weight_pairs[self.clip_name]
        out, pooled = getattr(self, self.clip).encode_token_weights(token_weight_pairs)
        return out, pooled

    def load_sd(self, sd):
        return getattr(self, self.clip).load_sd(sd)