sd1_clip.py 13.7 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
import os

from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig
import torch
5
import traceback
6
import zipfile
comfyanonymous's avatar
comfyanonymous committed
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

class ClipTokenWeightEncoder:
    def encode_token_weights(self, token_weight_pairs):
        z_empty = self.encode(self.empty_tokens)
        output = []
        for x in token_weight_pairs:
            tokens = [list(map(lambda a: a[0], x))]
            z = self.encode(tokens)
            for i in range(len(z)):
                for j in range(len(z[i])):
                    weight = x[j][1]
                    z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j]
            output += [z]
        if (len(output) == 0):
            return self.encode(self.empty_tokens)
        return torch.cat(output, dim=-2)

class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
    """Uses the CLIP transformer encoder for text (from huggingface)"""
    LAYERS = [
        "last",
        "pooled",
        "hidden"
    ]
    def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
                 freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None):  # clip-vit-base-patch32
        super().__init__()
        assert layer in self.LAYERS
        if textmodel_path is not None:
            self.transformer = CLIPTextModel.from_pretrained(textmodel_path)
        else:
            if textmodel_json_config is None:
                textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json")
            config = CLIPTextConfig.from_json_file(textmodel_json_config)
            self.transformer = CLIPTextModel(config)

        self.device = device
        self.max_length = max_length
        if freeze:
            self.freeze()
        self.layer = layer
        self.layer_idx = None
        self.empty_tokens = [[49406] + [49407] * 76]
        if layer == "hidden":
            assert layer_idx is not None
            assert abs(layer_idx) <= 12
            self.clip_layer(layer_idx)

    def freeze(self):
        self.transformer = self.transformer.eval()
        #self.train = disabled_train
        for param in self.parameters():
            param.requires_grad = False

    def clip_layer(self, layer_idx):
        if abs(layer_idx) >= 12:
            self.layer = "last"
        else:
            self.layer = "hidden"
            self.layer_idx = layer_idx

68
69
70
71
72
73
74
75
76
77
78
    def set_up_textual_embeddings(self, tokens, current_embeds):
        out_tokens = []
        next_new_token = token_dict_size = current_embeds.weight.shape[0]
        embedding_weights = []

        for x in tokens:
            tokens_temp = []
            for y in x:
                if isinstance(y, int):
                    tokens_temp += [y]
                else:
79
80
81
82
83
84
                    if y.shape[0] == current_embeds.weight.shape[1]:
                        embedding_weights += [y]
                        tokens_temp += [next_new_token]
                        next_new_token += 1
                    else:
                        print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1])
85
86
            while len(tokens_temp) < len(x):
                tokens_temp += [self.empty_tokens[0][-1]]
87
88
89
90
91
92
93
94
95
96
97
98
            out_tokens += [tokens_temp]

        if len(embedding_weights) > 0:
            new_embedding = torch.nn.Embedding(next_new_token, current_embeds.weight.shape[1])
            new_embedding.weight[:token_dict_size] = current_embeds.weight[:]
            n = token_dict_size
            for x in embedding_weights:
                new_embedding.weight[n] = x
                n += 1
            self.transformer.set_input_embeddings(new_embedding)
        return out_tokens

comfyanonymous's avatar
comfyanonymous committed
99
    def forward(self, tokens):
100
101
        backup_embeds = self.transformer.get_input_embeddings()
        tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
comfyanonymous's avatar
comfyanonymous committed
102
103
        tokens = torch.LongTensor(tokens).to(self.device)
        outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
104
        self.transformer.set_input_embeddings(backup_embeds)
comfyanonymous's avatar
comfyanonymous committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

        if self.layer == "last":
            z = outputs.last_hidden_state
        elif self.layer == "pooled":
            z = outputs.pooler_output[:, None, :]
        else:
            z = outputs.hidden_states[self.layer_idx]
            z = self.transformer.text_model.final_layer_norm(z)

        return z

    def encode(self, tokens):
        return self(tokens)

def parse_parentheses(string):
    result = []
    current_item = ""
    nesting_level = 0
    for char in string:
        if char == "(":
            if nesting_level == 0:
                if current_item:
                    result.append(current_item)
                    current_item = "("
                else:
                    current_item = "("
            else:
                current_item += char
            nesting_level += 1
        elif char == ")":
            nesting_level -= 1
            if nesting_level == 0:
                result.append(current_item + ")")
                current_item = ""
            else:
                current_item += char
        else:
            current_item += char
    if current_item:
        result.append(current_item)
    return result

def token_weights(string, current_weight):
    a = parse_parentheses(string)
    out = []
    for x in a:
        weight = current_weight
        if len(x) >= 2 and x[-1] == ')' and x[0] == '(':
            x = x[1:-1]
            xx = x.rfind(":")
            weight *= 1.1
            if xx > 0:
                try:
                    weight = float(x[xx+1:])
                    x = x[:xx]
                except:
                    pass
            out += token_weights(x, weight)
        else:
            out += [(x, current_weight)]
    return out

def escape_important(text):
    text = text.replace("\\)", "\0\1")
    text = text.replace("\\(", "\0\2")
    return text

def unescape_important(text):
    text = text.replace("\0\1", ")")
    text = text.replace("\0\2", "(")
    return text

177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
def safe_load_embed_zip(embed_path):
    with zipfile.ZipFile(embed_path) as myzip:
        names = list(filter(lambda a: "data/" in a, myzip.namelist()))
        names.reverse()
        for n in names:
            with myzip.open(n) as myfile:
                data = myfile.read()
                number = len(data) // 4
                length_embed = 1024 #sd2.x
                if number < 768:
                    continue
                if number % 768 == 0:
                    length_embed = 768 #sd1.x
                num_embeds = number // length_embed
                embed = torch.frombuffer(data, dtype=torch.float)
                out = embed.reshape((num_embeds, length_embed)).clone()
                del embed
                return out

196
197
198
199
200
201
202
def expand_directory_list(directories):
    dirs = set()
    for x in directories:
        dirs.add(x)
        for root, subdir, file in os.walk(x, followlinks=True):
            dirs.add(root)
    return list(dirs)
203

204
def load_embed(embedding_name, embedding_directory):
205
206
207
    if isinstance(embedding_directory, str):
        embedding_directory = [embedding_directory]

208
209
    embedding_directory = expand_directory_list(embedding_directory)

210
211
212
213
214
215
216
217
218
219
    valid_file = None
    for embed_dir in embedding_directory:
        embed_path = os.path.join(embed_dir, embedding_name)
        if not os.path.isfile(embed_path):
            extensions = ['.safetensors', '.pt', '.bin']
            for x in extensions:
                t = embed_path + x
                if os.path.isfile(t):
                    valid_file = t
                    break
220
        else:
221
222
223
224
225
226
227
228
            valid_file = embed_path
        if valid_file is not None:
            break

    if valid_file is None:
        return None

    embed_path = valid_file
229

230
231
    embed_out = None

232
233
234
235
    try:
        if embed_path.lower().endswith(".safetensors"):
            import safetensors.torch
            embed = safetensors.torch.load_file(embed_path, device="cpu")
comfyanonymous's avatar
comfyanonymous committed
236
        else:
237
            if 'weights_only' in torch.load.__code__.co_varnames:
238
239
240
241
                try:
                    embed = torch.load(embed_path, weights_only=True, map_location="cpu")
                except:
                    embed_out = safe_load_embed_zip(embed_path)
242
243
244
245
246
247
248
249
            else:
                embed = torch.load(embed_path, map_location="cpu")
    except Exception as e:
        print(traceback.format_exc())
        print()
        print("error loading embedding, skipping loading:", embedding_name)
        return None

250
251
252
253
254
255
256
    if embed_out is None:
        if 'string_to_param' in embed:
            values = embed['string_to_param'].values()
        else:
            values = embed.values()
        embed_out = next(iter(values))
    return embed_out
257

comfyanonymous's avatar
comfyanonymous committed
258
class SD1Tokenizer:
259
    def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None):
comfyanonymous's avatar
comfyanonymous committed
260
261
262
263
        if tokenizer_path is None:
            tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
        self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
        self.max_length = max_length
264
265
        self.max_tokens_per_section = self.max_length - 2

comfyanonymous's avatar
comfyanonymous committed
266
267
268
269
270
271
        empty = self.tokenizer('')["input_ids"]
        self.start_token = empty[0]
        self.end_token = empty[1]
        self.pad_with_end = pad_with_end
        vocab = self.tokenizer.get_vocab()
        self.inv_vocab = {v: k for k, v in vocab.items()}
272
273
        self.embedding_directory = embedding_directory
        self.max_word_length = 8
274
275
        self.embedding_identifier = "embedding:"

276
    def _try_get_embedding(self, embedding_name:str):
277
278
279
280
281
282
283
284
285
286
287
288
289
        '''
        Takes a potential embedding name and tries to retrieve it.
        Returns a Tuple consisting of the embedding and any leftover string, embedding can be None.
        '''
        embed = load_embed(embedding_name, self.embedding_directory)
        if embed is None:
            stripped = embedding_name.strip(',')
            if len(stripped) < len(embedding_name):
                embed = load_embed(stripped, self.embedding_directory)
                return (embed, embedding_name[len(stripped):])
        return (embed, "")


290
    def tokenize_with_weights(self, text:str, return_word_ids=False):
291
292
293
294
295
296
        '''
        Takes a prompt and converts it to a list of (token, weight, word id) elements.
        Tokens can both be integer tokens and pre computed CLIP tensors.
        Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens.
        Returned list has the dimensions NxM where M is the input size of CLIP
        '''
BlenderNeko's avatar
BlenderNeko committed
297
298
299
300
        if self.pad_with_end:
            pad_token = self.end_token
        else:
            pad_token = 0
comfyanonymous's avatar
comfyanonymous committed
301
302
303
304

        text = escape_important(text)
        parsed_weights = token_weights(text, 1.0)

305
        #tokenize words
comfyanonymous's avatar
comfyanonymous committed
306
        tokens = []
307
308
309
310
311
312
        for weighted_segment, weight in parsed_weights:
            to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ')
            to_tokenize = [x for x in to_tokenize if x != ""]
            for word in to_tokenize:
                #if we find an embedding, deal with the embedding
                if word.startswith(self.embedding_identifier) and self.embedding_directory is not None:
313
314
                    embedding_name = word[len(self.embedding_identifier):].strip('\n')
                    embed, leftover = self._try_get_embedding(embedding_name)
315
                    if embed is None:
316
                        print(f"warning, embedding:{embedding_name} does not exist, ignoring")
317
                    else:
318
                        if len(embed.shape) == 1:
319
                            tokens.append([(embed, weight)])
320
                        else:
321
322
323
324
                            tokens.append([(embed[x], weight) for x in range(embed.shape[0])])
                    #if we accidentally have leftover text, continue parsing using leftover, else move on to next word
                    if leftover != "":
                        word = leftover
325
                    else:
326
327
328
                        continue
                #parse word
                tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
329

330
331
        #reshape token array to CLIP input size
        batched_tokens = []
BlenderNeko's avatar
BlenderNeko committed
332
        batch = [(self.start_token, 1.0, 0)]
333
334
        batched_tokens.append(batch)
        for i, t_group in enumerate(tokens):
335
336
            #determine if we're going to try and keep the tokens in a single batch
            is_large = len(t_group) >= self.max_word_length
BlenderNeko's avatar
BlenderNeko committed
337

338
            while len(t_group) > 0:
BlenderNeko's avatar
BlenderNeko committed
339
340
341
                if len(t_group) + len(batch) > self.max_length - 1:
                    remaining_length = self.max_length - len(batch) - 1
                    #break word in two and add end token
342
343
                    if is_large:
                        batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]])
BlenderNeko's avatar
BlenderNeko committed
344
                        batch.append((self.end_token, 1.0, 0))
345
                        t_group = t_group[remaining_length:]
BlenderNeko's avatar
BlenderNeko committed
346
                    #add end token and pad
347
                    else:
BlenderNeko's avatar
BlenderNeko committed
348
349
350
351
                        batch.append((self.end_token, 1.0, 0))
                        batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
                    #start new batch
                    batch = [(self.start_token, 1.0, 0)]
352
                    batched_tokens.append(batch)
353
                else:
354
355
                    batch.extend([(t,w,i+1) for t,w in t_group])
                    t_group = []
356

357
        #fill last batch
BlenderNeko's avatar
BlenderNeko committed
358
        batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
comfyanonymous's avatar
comfyanonymous committed
359

360
361
        if not return_word_ids:
            batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
comfyanonymous's avatar
comfyanonymous committed
362

363
        return batched_tokens
comfyanonymous's avatar
comfyanonymous committed
364
365
366
367


    def untokenize(self, token_weight_pair):
        return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))