trtllm_text_generation.py 1.9 KB
Newer Older
liangjing's avatar
liangjing committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.

"""An example script to run the tensorrt_llm engine."""

import argparse
from pathlib import Path
import subprocess
from typing import Optional, Union

import numpy as np
import torch
from modelopt.deploy.llm import LLM
from tensorrt_llm.models import PretrainedConfig
from transformers import AutoTokenizer, T5Tokenizer
import tensorrt_llm


def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument("--tokenizer", type=str, default="")
    parser.add_argument("--engine-dir", type=str, default="/tmp/trtllm_engine")
    parser.add_argument(
        "--input-texts",
        type=str,
        default=(
            "Born in north-east France, Soyer trained as a|Born in California, Soyer trained as a"
        ),
        help="Input texts. Please use | to separate different batches.",
    )
    return parser.parse_args()


def run(args):
    try:
        tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, trust_remote_code=True)
    except Exception as e:
        raise Exception(f"Failed to load tokenizer: {e}")

    print(tokenizer, tokenizer.vocab_size)

    input_texts = args.input_texts.split("|")
    assert input_texts, "input_text not specified"
    print(input_texts)

    free_memory_before = torch.cuda.mem_get_info()

    # This is a ModelOpt wrapper on top of tensorrt_llm.hlapi.llm.LLM
    llm_engine = LLM(args.engine_dir, tokenizer)

    torch.cuda.cudart().cudaProfilerStart()
    # outputs = llm_engine.generate_text(input_texts, args.max_output_len, args.max_beam_width)
    outputs = llm_engine.generate(input_texts)
    torch.cuda.cudart().cudaProfilerStop()

    free_memory_after = torch.cuda.mem_get_info()
    print(
        f"Used GPU memory: {(free_memory_before[0] - free_memory_after[0]) / 1024 / 1024 / 1024} GB"
    )
    print(outputs)


if __name__ == "__main__":
    args = parse_arguments()
    run(args)