model.py 2.55 KB
Newer Older
helloyongyang's avatar
helloyongyang committed
1
2
3
4
import torch
from transformers import AutoModel, AutoTokenizer


Dongz's avatar
Dongz committed
5
class TextEncoderHFLlamaModel:
helloyongyang's avatar
helloyongyang committed
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
    def __init__(self, model_path, device):
        self.device = device
        self.model_path = model_path
        self.init()
        self.load()

    def init(self):
        self.max_length = 351
        self.hidden_state_skip_layer = 2
        self.crop_start = 95
        self.prompt_template = (
            "<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: "
            "1. The main content and theme of the video."
            "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
            "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
            "4. background environment, light, style and atmosphere."
            "5. camera angles, movements, and transitions used in the video:<|eot_id|>"
            "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
        )

    def load(self):
        self.model = AutoModel.from_pretrained(self.model_path, low_cpu_mem_usage=True).to(torch.float16).to(self.device)
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, padding_side="right")

    def to_cpu(self):
        self.model = self.model.to("cpu")

    def to_cuda(self):
        self.model = self.model.to("cuda")

    @torch.no_grad()
    def infer(self, text, args):
        if args.cpu_offload:
            self.to_cuda()
        text = self.prompt_template.format(text)
        tokens = self.tokenizer(
            text,
            return_length=False,
            return_overflowing_tokens=False,
            return_attention_mask=True,
            truncation=True,
            max_length=self.max_length,
            padding="max_length",
            return_tensors="pt",
        ).to("cuda")

        outputs = self.model(
            input_ids=tokens["input_ids"],
            attention_mask=tokens["attention_mask"],
            output_hidden_states=True,
        )

Dongz's avatar
Dongz committed
58
59
        last_hidden_state = outputs.hidden_states[-(self.hidden_state_skip_layer + 1)][:, self.crop_start :]
        attention_mask = tokens["attention_mask"][:, self.crop_start :]
helloyongyang's avatar
helloyongyang committed
60
61
62
63
64
65
66
        if args.cpu_offload:
            self.to_cpu()
        return last_hidden_state, attention_mask


if __name__ == "__main__":
    model = TextEncoderHFLlamaModel("/mnt/nvme0/yongyang/projects/hy/HunyuanVideo/ckpts/text_encoder", torch.device("cuda"))
Dongz's avatar
Dongz committed
67
    text = "A cat walks on the grass, realistic style."
helloyongyang's avatar
helloyongyang committed
68
69
    outputs = model.infer(text)
    print(outputs)