test_pipeline_infer.py 2.21 KB
Newer Older
1
2
3
4
import pytest
import torch
import torch.distributed as dist
import transformers
5
from packaging import version
6
7

import colossalai
8
9
from colossalai.inference.pipeline import PPInferEngine
from colossalai.inference.pipeline.policies import LlamaModelInferPolicy
10
11
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn

12
13
CUDA_SUPPORT = version.parse(torch.version.cuda) > version.parse("11.5")

14
15
16
17
18
19
20
21
22

def data_gen():
    input_ids = torch.tensor([[15496, 11, 616, 3290, 318, 13779, 318, 13779]], dtype=torch.int64)
    attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
    return dict(input_ids=input_ids, attention_mask=attention_mask)


inputs = data_gen()
for k, v in inputs.items():
23
    if torch.is_tensor(v) or "Tensor" in v.__class__.__name__:
24
25
        new_shape = [1] * v.dim()
        new_shape[0] = 16
26
        inputs[k] = v.to("cuda").repeat(*new_shape)
27
28
29


def pipeline_inference_test(pp_size, new_length, micro_batch_size):
30
31
    model = transformers.LlamaForCausalLM(transformers.LlamaConfig(num_hidden_layers=4))

32
33
34
    engine = PPInferEngine(
        pp_size=pp_size,
        model=model,
35
        model_policy=LlamaModelInferPolicy(),
36
37
38
        new_length=new_length,
        micro_batch_size=micro_batch_size,
    )
39
    output = engine.inference(inputs)
40
41
42
43
    if dist.get_rank() == 0:
        assert len(output[0]) == new_length, f"{len(output)}, {new_length}"


44
@parameterize("pp_size", [2])
45
46
@parameterize("new_length", [4, 8, 16])
@parameterize("micro_batch_size", [1, 4])
47
48
49
50
51
52
53
@clear_cache_before_run()
def run_pipeline_inference_test(pp_size, new_length, micro_batch_size):
    pipeline_inference_test(pp_size, new_length, micro_batch_size)
    torch.cuda.empty_cache()


def check_pipeline_inference(rank, world_size, port):
54
    colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
55
56
57
    run_pipeline_inference_test()


58
@pytest.mark.skipif(not CUDA_SUPPORT, reason="kv-cache manager engine requires cuda version to be higher than 11.5")
59
60
61
62
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_pipeline_inference():
63
    spawn(check_pipeline_inference, nprocs=2)
64
65


66
if __name__ == "__main__":
67
    test_pipeline_inference()