gptq_llama.py 3.07 KB
Newer Older
1
2
3
4
5
import argparse
import os
import time

import torch
6
from _utils import print_perf_stats
7
8
from auto_gptq import AutoGPTQForCausalLM
from transformers import LlamaTokenizer
9
10
11
12
13
14
15

import colossalai
from colossalai.inference.tensor_parallel.engine import TPInferEngine
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer import ShardConfig
from colossalai.testing import clear_cache_before_run, rerun_if_address_is_in_use, spawn

16
os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
17
18
19
20
21
22
23
24
25
26
27
28
29


def run_llama_test(args):
    pretrained_model_dir = args.path
    quantized_model_dir = args.quantized_path
    max_batch_size = args.batch_size
    max_input_len = args.input_len
    max_output_len = args.output_len

    tokenizer = LlamaTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
    tokenizer.pad_token_id = tokenizer.eos_token_id

    # load quantized model to the first GPU
30
31
32
    model = AutoGPTQForCausalLM.from_quantized(
        quantized_model_dir, device=torch.cuda.current_device(), inject_fused_attention=False
    )
33
34

    model_config = model.config
35
36
37
    shard_config = ShardConfig(
        enable_tensor_parallelism=True if args.tp_size > 1 else False, inference_only=True, inference_gptq=True
    )
38
39
40
41
42
    infer_engine = TPInferEngine(model, shard_config, max_batch_size, max_input_len, max_output_len)

    generate_kwargs = dict(max_new_tokens=max_output_len, do_sample=False)

    input_tokens = {
43
44
        "input_ids": torch.randint(1, 1000, (max_batch_size, max_input_len), device="cuda"),
        "attention_mask": torch.ones((max_batch_size, max_input_len), device="cuda"),
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
    }

    iters = 10
    times = []

    for i in range(iters):
        torch.cuda.synchronize()
        start = time.time()
        outputs = infer_engine.generate(input_tokens, **generate_kwargs)
        torch.cuda.synchronize()
        end = time.time()
        out_len = outputs.shape[1]
        print(f" iter {i}: out len {str(out_len)}, generation time {str(end - start)} s")
        times.append((end - start) / (out_len - max_input_len))

    print_perf_stats(times, model_config, max_batch_size)


def check_llama(rank, world_size, port, args):
    disable_existing_loggers()
65
    colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
66
67
68
69
70
71
72
73
74
75
76
    run_llama_test(args)


@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_llama(args):
    spawn(check_llama, args.tp_size, args=args)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
77
78
79
80
81
82
    parser.add_argument("-p", "--path", type=str, help="Model path", required=True)
    parser.add_argument("-q", "--quantized_path", type=str, help="Model path", required=True)
    parser.add_argument("-tp", "--tp_size", type=int, default=1, help="Tensor parallel size")
    parser.add_argument("-b", "--batch_size", type=int, default=16, help="Maximum batch size")
    parser.add_argument("--input_len", type=int, default=1024, help="Maximum input length")
    parser.add_argument("--output_len", type=int, default=128, help="Maximum output length")
83
84
85
86

    args = parser.parse_args()

    test_llama(args)