model.py 2.55 KB
Newer Older
helloyongyang's avatar
helloyongyang committed
1
import torch
root's avatar
root committed
2
from loguru import logger
PengGao's avatar
PengGao committed
3
from transformers import AutoModel, AutoTokenizer
helloyongyang's avatar
helloyongyang committed
4
5


Dongz's avatar
Dongz committed
6
class TextEncoderHFLlamaModel:
helloyongyang's avatar
helloyongyang committed
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
    def __init__(self, model_path, device):
        self.device = device
        self.model_path = model_path
        self.init()
        self.load()

    def init(self):
        self.max_length = 351
        self.hidden_state_skip_layer = 2
        self.crop_start = 95
        self.prompt_template = (
            "<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: "
            "1. The main content and theme of the video."
            "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
            "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
            "4. background environment, light, style and atmosphere."
            "5. camera angles, movements, and transitions used in the video:<|eot_id|>"
            "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
        )

    def load(self):
        self.model = AutoModel.from_pretrained(self.model_path, low_cpu_mem_usage=True).to(torch.float16).to(self.device)
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, padding_side="right")

    def to_cpu(self):
        self.model = self.model.to("cpu")

    def to_cuda(self):
        self.model = self.model.to("cuda")

    @torch.no_grad()
38
39
    def infer(self, text, config):
        if config.cpu_offload:
helloyongyang's avatar
helloyongyang committed
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
            self.to_cuda()
        text = self.prompt_template.format(text)
        tokens = self.tokenizer(
            text,
            return_length=False,
            return_overflowing_tokens=False,
            return_attention_mask=True,
            truncation=True,
            max_length=self.max_length,
            padding="max_length",
            return_tensors="pt",
        ).to("cuda")

        outputs = self.model(
            input_ids=tokens["input_ids"],
            attention_mask=tokens["attention_mask"],
            output_hidden_states=True,
        )

Dongz's avatar
Dongz committed
59
60
        last_hidden_state = outputs.hidden_states[-(self.hidden_state_skip_layer + 1)][:, self.crop_start :]
        attention_mask = tokens["attention_mask"][:, self.crop_start :]
61
        if config.cpu_offload:
helloyongyang's avatar
helloyongyang committed
62
63
64
65
66
            self.to_cpu()
        return last_hidden_state, attention_mask


if __name__ == "__main__":
67
68
    model_path = ""
    model = TextEncoderHFLlamaModel(model_path, torch.device("cuda"))
Dongz's avatar
Dongz committed
69
    text = "A cat walks on the grass, realistic style."
helloyongyang's avatar
helloyongyang committed
70
    outputs = model.infer(text)
root's avatar
root committed
71
    logger.info(outputs)