sd2_clip.py 953 Bytes
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
import sd1_clip
import torch
3
import os
comfyanonymous's avatar
comfyanonymous committed
4

5
6
7
8
class SD2ClipModel(sd1_clip.SD1ClipModel):
    def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None):
        textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json")
        super().__init__(device=device, freeze=freeze, textmodel_json_config=textmodel_json_config)
comfyanonymous's avatar
comfyanonymous committed
9
        self.empty_tokens = [[49406] + [49407] + [0] * 75]
10
11
12
13
        if layer == "last":
            layer_idx = -1
        elif layer == "penultimate":
            layer_idx = -2
comfyanonymous's avatar
comfyanonymous committed
14
15
16
17
18
        elif self.layer == "hidden":
            assert layer_idx is not None
            assert abs(layer_idx) < 24
        else:
            raise NotImplementedError()
19
        self.clip_layer(layer_idx)
comfyanonymous's avatar
comfyanonymous committed
20
21
22
23
24
25

class SD2Tokenizer(sd1_clip.SD1Tokenizer):
    def __init__(self, tokenizer_path=None):
        super().__init__(tokenizer_path, pad_with_end=False)