test_tensor.py 1.25 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import torch
from colossalai.tensor import ColoTensor
from numpy import allclose


def test_tensor_indexing():
    torch_t = torch.randn(2, 3)
    colo_t = ColoTensor.init_from_torch_tensor(torch_t)
    assert allclose(torch_t[:, 1], colo_t[:, 1].torch_tensor())


def test_lazy_init_tensor():
    lazy_t = ColoTensor(2, 3, dtype=torch.float32, requires_grad=True)
    assert lazy_t._torch_tensor.numel() == 0
    assert lazy_t.numel() == 6 == lazy_t.torch_tensor().numel()
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45


def test_wrapped_tensor_func():
    t_ref = torch.randn(4, 5)
    t = ColoTensor.init_from_torch_tensor(t_ref.clone())

    # non-func attr
    assert t.is_cuda == t_ref.is_cuda

    # TODO I don't find out a tensor function which returns None.

    # return 1 torch.Tensor
    t_abs = t.abs()
    assert isinstance(t_abs, ColoTensor) and torch.equal(t_abs.torch_tensor(), t_ref.abs())

    # return 1 non-torch.Tensor
    assert t.dim() == t_ref.dim()

    # return >1 torch.Tensor
    t_split1, t_split2 = t.split(2)
    assert isinstance(t_split1, ColoTensor) and isinstance(t_split2, ColoTensor)


def test_operand():
    t_ref = torch.randn(4, 5)
    t = ColoTensor.init_from_torch_tensor(t_ref.clone())

    t_ref_res = t_ref + t_ref
    t_res = t + t
    assert torch.allclose(t_ref_res, t_res)