tensor.py 1.58 KB
Newer Older
Xinchi Huang's avatar
Xinchi Huang committed
1
import torch
2
from lightx2v.utils.registry_factory import TENSOR_REGISTER
3
from safetensors import safe_open
4
5
6
7


@TENSOR_REGISTER("Default")
class DefaultTensor:
8
    def __init__(self, tensor_name, lazy_load=False, lazy_load_file=None):
9
        self.tensor_name = tensor_name
10
11
12
13
14
15
16
17
        self.lazy_load = lazy_load
        self.lazy_load_file = lazy_load_file

    def load_from_disk(self):
        if not torch._dynamo.is_compiling():
            self.tensor = self.lazy_load_file.get_tensor(self.tensor_name).to(torch.bfloat16).pin_memory()
        else:
            self.tensor = self.lazy_load_file.get_tensor(self.tensor_name).to(torch.bfloat16)
18
19

    def load(self, weight_dict):
20
21
22
23
24
25
26
27
28
        if not self.lazy_load:
            self.tensor = weight_dict[self.tensor_name].to(torch.bfloat16)
            self.pinned_tensor = torch.empty(self.tensor.shape, pin_memory=True, dtype=self.tensor.dtype)

    def clear(self):
        del self.tensor

    def _calculate_size(self):
        return self.tensor.numel() * self.tensor.element_size()
29
30

    def to_cpu(self, non_blocking=False):
31
32
33
34
        if hasattr(self, "pinned_tensor"):
            self.tensor = self.pinned_tensor.copy_(self.tensor, non_blocking=non_blocking).cpu()
        else:
            self.tensor = self.tensor.to("cpu", non_blocking=non_blocking)
35
36
37

    def to_cuda(self, non_blocking=False):
        self.tensor = self.tensor.cuda(non_blocking=non_blocking)
38
39
40
41
42
43

    def state_dict(self, destination=None):
        if destination is None:
            destination = {}
        destination[self.tensor_name] = self.tensor.cpu().detach().clone()
        return destination