sd1_clip.py 15.4 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
import os

3
from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig, modeling_utils
4
import comfy.ops
comfyanonymous's avatar
comfyanonymous committed
5
import torch
6
import traceback
7
import zipfile
8
9
from . import model_management
import contextlib
comfyanonymous's avatar
comfyanonymous committed
10
11
12

class ClipTokenWeightEncoder:
    def encode_token_weights(self, token_weight_pairs):
13
        z_empty, _ = self.encode(self.empty_tokens)
comfyanonymous's avatar
comfyanonymous committed
14
        output = []
15
        first_pooled = None
comfyanonymous's avatar
comfyanonymous committed
16
17
        for x in token_weight_pairs:
            tokens = [list(map(lambda a: a[0], x))]
18
19
20
            z, pooled = self.encode(tokens)
            if first_pooled is None:
                first_pooled = pooled
comfyanonymous's avatar
comfyanonymous committed
21
22
23
24
25
26
27
            for i in range(len(z)):
                for j in range(len(z[i])):
                    weight = x[j][1]
                    z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j]
            output += [z]
        if (len(output) == 0):
            return self.encode(self.empty_tokens)
28
        return torch.cat(output, dim=-2).cpu(), first_pooled.cpu()
comfyanonymous's avatar
comfyanonymous committed
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
    """Uses the CLIP transformer encoder for text (from huggingface)"""
    LAYERS = [
        "last",
        "pooled",
        "hidden"
    ]
    def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
                 freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None):  # clip-vit-base-patch32
        super().__init__()
        assert layer in self.LAYERS
        if textmodel_path is not None:
            self.transformer = CLIPTextModel.from_pretrained(textmodel_path)
        else:
            if textmodel_json_config is None:
                textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json")
            config = CLIPTextConfig.from_json_file(textmodel_json_config)
47
48
49
            with comfy.ops.use_comfy_ops():
                with modeling_utils.no_init_weights():
                    self.transformer = CLIPTextModel(config)
comfyanonymous's avatar
comfyanonymous committed
50
51
52
53
54
55
56

        self.max_length = max_length
        if freeze:
            self.freeze()
        self.layer = layer
        self.layer_idx = None
        self.empty_tokens = [[49406] + [49407] * 76]
57
58
        self.text_projection = None
        self.layer_norm_hidden_state = True
comfyanonymous's avatar
comfyanonymous committed
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
        if layer == "hidden":
            assert layer_idx is not None
            assert abs(layer_idx) <= 12
            self.clip_layer(layer_idx)

    def freeze(self):
        self.transformer = self.transformer.eval()
        #self.train = disabled_train
        for param in self.parameters():
            param.requires_grad = False

    def clip_layer(self, layer_idx):
        if abs(layer_idx) >= 12:
            self.layer = "last"
        else:
            self.layer = "hidden"
            self.layer_idx = layer_idx

77
78
79
80
81
82
83
84
85
86
87
    def set_up_textual_embeddings(self, tokens, current_embeds):
        out_tokens = []
        next_new_token = token_dict_size = current_embeds.weight.shape[0]
        embedding_weights = []

        for x in tokens:
            tokens_temp = []
            for y in x:
                if isinstance(y, int):
                    tokens_temp += [y]
                else:
88
89
90
91
92
93
                    if y.shape[0] == current_embeds.weight.shape[1]:
                        embedding_weights += [y]
                        tokens_temp += [next_new_token]
                        next_new_token += 1
                    else:
                        print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1])
94
95
            while len(tokens_temp) < len(x):
                tokens_temp += [self.empty_tokens[0][-1]]
96
97
98
            out_tokens += [tokens_temp]

        if len(embedding_weights) > 0:
99
            new_embedding = torch.nn.Embedding(next_new_token, current_embeds.weight.shape[1], device=current_embeds.weight.device, dtype=current_embeds.weight.dtype)
100
101
102
103
104
105
106
107
            new_embedding.weight[:token_dict_size] = current_embeds.weight[:]
            n = token_dict_size
            for x in embedding_weights:
                new_embedding.weight[n] = x
                n += 1
            self.transformer.set_input_embeddings(new_embedding)
        return out_tokens

comfyanonymous's avatar
comfyanonymous committed
108
    def forward(self, tokens):
109
        backup_embeds = self.transformer.get_input_embeddings()
110
        device = backup_embeds.weight.device
111
        tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
112
113
114
115
        tokens = torch.LongTensor(tokens).to(device)

        if backup_embeds.weight.dtype != torch.float32:
            precision_scope = torch.autocast
comfyanonymous's avatar
comfyanonymous committed
116
        else:
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
            precision_scope = contextlib.nullcontext

        with precision_scope(model_management.get_autocast_device(device)):
            outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
            self.transformer.set_input_embeddings(backup_embeds)

            if self.layer == "last":
                z = outputs.last_hidden_state
            elif self.layer == "pooled":
                z = outputs.pooler_output[:, None, :]
            else:
                z = outputs.hidden_states[self.layer_idx]
                if self.layer_norm_hidden_state:
                    z = self.transformer.text_model.final_layer_norm(z)

            pooled_output = outputs.pooler_output
            if self.text_projection is not None:
                pooled_output = pooled_output @ self.text_projection
        return z.float(), pooled_output.float()
comfyanonymous's avatar
comfyanonymous committed
136
137
138
139

    def encode(self, tokens):
        return self(tokens)

140
141
142
    def load_sd(self, sd):
        return self.transformer.load_state_dict(sd, strict=False)

comfyanonymous's avatar
comfyanonymous committed
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
def parse_parentheses(string):
    result = []
    current_item = ""
    nesting_level = 0
    for char in string:
        if char == "(":
            if nesting_level == 0:
                if current_item:
                    result.append(current_item)
                    current_item = "("
                else:
                    current_item = "("
            else:
                current_item += char
            nesting_level += 1
        elif char == ")":
            nesting_level -= 1
            if nesting_level == 0:
                result.append(current_item + ")")
                current_item = ""
            else:
                current_item += char
        else:
            current_item += char
    if current_item:
        result.append(current_item)
    return result

def token_weights(string, current_weight):
    a = parse_parentheses(string)
    out = []
    for x in a:
        weight = current_weight
        if len(x) >= 2 and x[-1] == ')' and x[0] == '(':
            x = x[1:-1]
            xx = x.rfind(":")
            weight *= 1.1
            if xx > 0:
                try:
                    weight = float(x[xx+1:])
                    x = x[:xx]
                except:
                    pass
            out += token_weights(x, weight)
        else:
            out += [(x, current_weight)]
    return out

def escape_important(text):
    text = text.replace("\\)", "\0\1")
    text = text.replace("\\(", "\0\2")
    return text

def unescape_important(text):
    text = text.replace("\0\1", ")")
    text = text.replace("\0\2", "(")
    return text

201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
def safe_load_embed_zip(embed_path):
    with zipfile.ZipFile(embed_path) as myzip:
        names = list(filter(lambda a: "data/" in a, myzip.namelist()))
        names.reverse()
        for n in names:
            with myzip.open(n) as myfile:
                data = myfile.read()
                number = len(data) // 4
                length_embed = 1024 #sd2.x
                if number < 768:
                    continue
                if number % 768 == 0:
                    length_embed = 768 #sd1.x
                num_embeds = number // length_embed
                embed = torch.frombuffer(data, dtype=torch.float)
                out = embed.reshape((num_embeds, length_embed)).clone()
                del embed
                return out

220
221
222
223
224
225
226
def expand_directory_list(directories):
    dirs = set()
    for x in directories:
        dirs.add(x)
        for root, subdir, file in os.walk(x, followlinks=True):
            dirs.add(root)
    return list(dirs)
227

228
def load_embed(embedding_name, embedding_directory, embedding_size):
229
230
231
    if isinstance(embedding_directory, str):
        embedding_directory = [embedding_directory]

232
233
    embedding_directory = expand_directory_list(embedding_directory)

234
235
236
237
238
239
240
241
242
243
    valid_file = None
    for embed_dir in embedding_directory:
        embed_path = os.path.join(embed_dir, embedding_name)
        if not os.path.isfile(embed_path):
            extensions = ['.safetensors', '.pt', '.bin']
            for x in extensions:
                t = embed_path + x
                if os.path.isfile(t):
                    valid_file = t
                    break
244
        else:
245
246
247
248
249
250
251
252
            valid_file = embed_path
        if valid_file is not None:
            break

    if valid_file is None:
        return None

    embed_path = valid_file
253

254
255
    embed_out = None

256
257
258
259
    try:
        if embed_path.lower().endswith(".safetensors"):
            import safetensors.torch
            embed = safetensors.torch.load_file(embed_path, device="cpu")
comfyanonymous's avatar
comfyanonymous committed
260
        else:
261
            if 'weights_only' in torch.load.__code__.co_varnames:
262
263
264
265
                try:
                    embed = torch.load(embed_path, weights_only=True, map_location="cpu")
                except:
                    embed_out = safe_load_embed_zip(embed_path)
266
267
268
269
270
271
272
273
            else:
                embed = torch.load(embed_path, map_location="cpu")
    except Exception as e:
        print(traceback.format_exc())
        print()
        print("error loading embedding, skipping loading:", embedding_name)
        return None

274
275
276
    if embed_out is None:
        if 'string_to_param' in embed:
            values = embed['string_to_param'].values()
277
278
279
280
281
282
283
284
285
286
            embed_out = next(iter(values))
        elif isinstance(embed, list):
            out_list = []
            for x in range(len(embed)):
                for k in embed[x]:
                    t = embed[x][k]
                    if t.shape[-1] != embedding_size:
                        continue
                    out_list.append(t.reshape(-1, t.shape[-1]))
            embed_out = torch.cat(out_list, dim=0)
287
288
        else:
            values = embed.values()
289
            embed_out = next(iter(values))
290
    return embed_out
291

comfyanonymous's avatar
comfyanonymous committed
292
class SD1Tokenizer:
293
    def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768):
comfyanonymous's avatar
comfyanonymous committed
294
295
296
297
        if tokenizer_path is None:
            tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
        self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
        self.max_length = max_length
298
299
        self.max_tokens_per_section = self.max_length - 2

comfyanonymous's avatar
comfyanonymous committed
300
301
302
303
304
305
        empty = self.tokenizer('')["input_ids"]
        self.start_token = empty[0]
        self.end_token = empty[1]
        self.pad_with_end = pad_with_end
        vocab = self.tokenizer.get_vocab()
        self.inv_vocab = {v: k for k, v in vocab.items()}
306
307
        self.embedding_directory = embedding_directory
        self.max_word_length = 8
308
        self.embedding_identifier = "embedding:"
309
        self.embedding_size = embedding_size
310

311
    def _try_get_embedding(self, embedding_name:str):
312
313
314
315
        '''
        Takes a potential embedding name and tries to retrieve it.
        Returns a Tuple consisting of the embedding and any leftover string, embedding can be None.
        '''
316
        embed = load_embed(embedding_name, self.embedding_directory, self.embedding_size)
317
318
319
        if embed is None:
            stripped = embedding_name.strip(',')
            if len(stripped) < len(embedding_name):
320
                embed = load_embed(stripped, self.embedding_directory, self.embedding_size)
321
322
323
324
                return (embed, embedding_name[len(stripped):])
        return (embed, "")


325
    def tokenize_with_weights(self, text:str, return_word_ids=False):
326
327
328
329
330
331
        '''
        Takes a prompt and converts it to a list of (token, weight, word id) elements.
        Tokens can both be integer tokens and pre computed CLIP tensors.
        Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens.
        Returned list has the dimensions NxM where M is the input size of CLIP
        '''
BlenderNeko's avatar
BlenderNeko committed
332
333
334
335
        if self.pad_with_end:
            pad_token = self.end_token
        else:
            pad_token = 0
comfyanonymous's avatar
comfyanonymous committed
336
337
338
339

        text = escape_important(text)
        parsed_weights = token_weights(text, 1.0)

340
        #tokenize words
comfyanonymous's avatar
comfyanonymous committed
341
        tokens = []
342
343
344
345
346
347
        for weighted_segment, weight in parsed_weights:
            to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ')
            to_tokenize = [x for x in to_tokenize if x != ""]
            for word in to_tokenize:
                #if we find an embedding, deal with the embedding
                if word.startswith(self.embedding_identifier) and self.embedding_directory is not None:
348
349
                    embedding_name = word[len(self.embedding_identifier):].strip('\n')
                    embed, leftover = self._try_get_embedding(embedding_name)
350
                    if embed is None:
351
                        print(f"warning, embedding:{embedding_name} does not exist, ignoring")
352
                    else:
353
                        if len(embed.shape) == 1:
354
                            tokens.append([(embed, weight)])
355
                        else:
356
357
358
359
                            tokens.append([(embed[x], weight) for x in range(embed.shape[0])])
                    #if we accidentally have leftover text, continue parsing using leftover, else move on to next word
                    if leftover != "":
                        word = leftover
360
                    else:
361
362
363
                        continue
                #parse word
                tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
364

365
366
        #reshape token array to CLIP input size
        batched_tokens = []
BlenderNeko's avatar
BlenderNeko committed
367
        batch = [(self.start_token, 1.0, 0)]
368
369
        batched_tokens.append(batch)
        for i, t_group in enumerate(tokens):
370
371
            #determine if we're going to try and keep the tokens in a single batch
            is_large = len(t_group) >= self.max_word_length
BlenderNeko's avatar
BlenderNeko committed
372

373
            while len(t_group) > 0:
BlenderNeko's avatar
BlenderNeko committed
374
375
376
                if len(t_group) + len(batch) > self.max_length - 1:
                    remaining_length = self.max_length - len(batch) - 1
                    #break word in two and add end token
377
378
                    if is_large:
                        batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]])
BlenderNeko's avatar
BlenderNeko committed
379
                        batch.append((self.end_token, 1.0, 0))
380
                        t_group = t_group[remaining_length:]
BlenderNeko's avatar
BlenderNeko committed
381
                    #add end token and pad
382
                    else:
BlenderNeko's avatar
BlenderNeko committed
383
384
385
386
                        batch.append((self.end_token, 1.0, 0))
                        batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
                    #start new batch
                    batch = [(self.start_token, 1.0, 0)]
387
                    batched_tokens.append(batch)
388
                else:
389
390
                    batch.extend([(t,w,i+1) for t,w in t_group])
                    t_group = []
391

392
        #fill last batch
BlenderNeko's avatar
BlenderNeko committed
393
        batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
comfyanonymous's avatar
comfyanonymous committed
394

395
396
        if not return_word_ids:
            batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
comfyanonymous's avatar
comfyanonymous committed
397

398
        return batched_tokens
comfyanonymous's avatar
comfyanonymous committed
399
400
401
402


    def untokenize(self, token_weight_pair):
        return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))