test_llama_infer.py 2.1 KB
Newer Older
1
2
3
4
5
import os

import pytest
import torch
from packaging import version
Xu Kai's avatar
Xu Kai committed
6
7
from transformers import LlamaForCausalLM
from transformers.models.llama.configuration_llama import LlamaConfig
8
9
10
11
12
13
14

import colossalai
from colossalai.inference.tensor_parallel.engine import TPInferEngine
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer import ShardConfig
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn

15
os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
16
17
18
19
20
TPSIZE = 2
BATCH_SIZE = 8
MAX_INPUT_LEN = 12
MAX_OUTPUT_LEN = 100

21
CUDA_SUPPORT = version.parse(torch.version.cuda) > version.parse("11.5")
22
23


24
25
26
27
28
29
30
31
@parameterize(
    "test_config",
    [
        {
            "tp_size": TPSIZE,
        }
    ],
)
32
def run_llama_test(test_config):
Xu Kai's avatar
Xu Kai committed
33
34
35
    llama_config = LlamaConfig(num_hidden_layers=2, bos_token_id=0, eos_token_id=1, vocab_size=1200, hidden_size=1024)
    model = LlamaForCausalLM(llama_config)
    model = model.half()
36

Xu Kai's avatar
Xu Kai committed
37
38
39
40
41
42
    shard_config = ShardConfig(
        enable_tensor_parallelism=True if test_config["tp_size"] > 1 else False, inference_only=True
    )
    infer_engine = TPInferEngine(model, shard_config, BATCH_SIZE, MAX_INPUT_LEN, MAX_OUTPUT_LEN)
    init_to_get_rotary(model.model, base=10000)
    generate_kwargs = dict(max_new_tokens=MAX_OUTPUT_LEN, do_sample=False)
43

Xu Kai's avatar
Xu Kai committed
44
45
46
47
48
    input_tokens = {
        "input_ids": torch.randint(1, 1000, (BATCH_SIZE, MAX_INPUT_LEN), device="cuda"),
        "attention_mask": torch.ones((BATCH_SIZE, MAX_INPUT_LEN), device="cuda"),
    }
    outputs = infer_engine.generate(input_tokens, **generate_kwargs)
49

Xu Kai's avatar
Xu Kai committed
50
    assert outputs is not None
51
52
53
54


def check_llama(rank, world_size, port):
    disable_existing_loggers()
55
    colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
56
57
58
59
60
61
62
63
64
65
66
67
68
    run_llama_test()


@pytest.mark.skipif(not CUDA_SUPPORT, reason="kv-cache manager engine requires cuda version to be higher than 11.5")
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_llama():
    spawn(check_llama, TPSIZE)


if __name__ == "__main__":
    test_llama()