"git@developer.sourcefind.cn:zhaoyu6/sglang.git" did not exist on "3b878863f7bb96726c8573efd1b8a6ba90de65a8"
benchmark.py 4.36 KB
Newer Older
Casper Hansen's avatar
Casper Hansen committed
1
2
3
4
5
6
import time
import torch
import argparse
import numpy as np
import pandas as pd
from awq import AutoAWQForCausalLM
7
8
from transformers import AutoTokenizer
from torch.cuda import OutOfMemoryError
Casper Hansen's avatar
Casper Hansen committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

def warmup(model):
    warm_up = torch.randn((4096,4096)).to(next(model.parameters()).device)
    torch.mm(warm_up,warm_up)

def generate(model, input_ids, n_generate):
    context_time = 0
    generate_time = 0

    with torch.inference_mode():
        for i in range(n_generate):
            torch.cuda.synchronize()
            start = time.time()

            if i == 0:
                # prefill context
25
                inputs = torch.as_tensor(input_ids, device=next(model.parameters()).device)
Casper Hansen's avatar
Casper Hansen committed
26
27
            else:
                # decode tokens
28
                inputs = torch.as_tensor(token, device=next(model.parameters()).device)
Casper Hansen's avatar
Casper Hansen committed
29
30
31
32
33
34
35
36
37
38
39
40
41
            
            out = model(inputs, use_cache=True)

            torch.cuda.synchronize()
            token = out[0][:, -1].max(1)[1].unsqueeze(1)

            if i == 0:
                context_time += time.time() - start
            else:
                generate_time += time.time() - start
    
    return context_time, generate_time

42
def run_round(model_path, quant_file, n_generate, input_ids, batch_size):
Casper Hansen's avatar
Casper Hansen committed
43
    print(f" -- Loading model...")
44
45
    model = AutoAWQForCausalLM.from_quantized(
        model_path, quant_file, fuse_layers=True,
Casper Hansen's avatar
Casper Hansen committed
46
        max_new_tokens=n_generate, batch_size=batch_size
47
    )
Casper Hansen's avatar
Casper Hansen committed
48
49
50
51

    print(f" -- Warming up...")
    warmup(model)

52
53
54
55
56
57
    print(f" -- Generating {n_generate} tokens, {input_ids.shape[1]} in context...")
    
    try:
        context_time, generate_time = generate(model, input_ids, n_generate)
        successful_generate = True
    except RuntimeError as ex:
Casper Hansen's avatar
Casper Hansen committed
58
59
60
61
        if 'cuda out of memory' in str(ex).lower():
            successful_generate = False
        else:
            raise RuntimeError(ex)
62
    
Casper Hansen's avatar
Casper Hansen committed
63
64
65
    device = next(model.parameters()).device
    memory_used = torch.cuda.max_memory_allocated(device) / (1024 ** 3)
    memory_pct = memory_used / (torch.cuda.get_device_properties(device).total_memory / (1024 ** 3)) * 100
Casper Hansen's avatar
Casper Hansen committed
66

67
68
69
70
71
72
73
74
75
76
    if successful_generate:
        prefill_tokens_per_second = n_generate / context_time * batch_size
        decode_tokens_per_second = n_generate / generate_time * batch_size

        print(f" ** Speed (Prefill): {prefill_tokens_per_second:.2f} tokens/second")
        print(f" ** Speed (Decode): {decode_tokens_per_second:.2f} tokens/second")
        print(f" ** Max Memory (VRAM): {memory_used:.2f} GB ({memory_pct:.2f}%)")
    else:
        prefill_tokens_per_second = 'OOM'
        decode_tokens_per_second = 'OOM'
Casper Hansen's avatar
Casper Hansen committed
77
78

    return {
79
80
81
        "Batch Size": batch_size,
        "Prefill Length": input_ids.shape[1],
        "Decode Length": n_generate,
Casper Hansen's avatar
Casper Hansen committed
82
83
        "Prefill tokens/s": prefill_tokens_per_second,
        "Decode tokens/s": decode_tokens_per_second,
84
        "Memory (VRAM)": f"{memory_used:.2f} GB ({memory_pct:.2f}%)"
Casper Hansen's avatar
Casper Hansen committed
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
    }

def main(args):
    rounds = [
        {"context": 4, "n_generate": 200},
        {"context": 32, "n_generate": 32},
        {"context": 64, "n_generate": 64},
        {"context": 128, "n_generate": 128},
        {"context": 256, "n_generate": 256},
        {"context": 512, "n_generate": 512},
        {"context": 1024, "n_generate": 1024},
        {"context": 2048, "n_generate": 2048},
    ]

    all_stats = []
100
    tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True)
Casper Hansen's avatar
Casper Hansen committed
101
102

    for settings in rounds:
103
        input_ids = torch.randint(0, tokenizer.vocab_size, (args.batch_size, settings["context"])).cuda()
Casper Hansen's avatar
Casper Hansen committed
104
105
106
107
108

        stats = run_round(
            args.model_path,
            args.quant_file,
            settings["n_generate"],
109
110
            input_ids,
            args.batch_size
Casper Hansen's avatar
Casper Hansen committed
111
112
113
        )
        
        all_stats.append(stats)
114
115
116

        if stats["Prefill tokens/s"] == 'OOM':
            break
Casper Hansen's avatar
Casper Hansen committed
117
118
    
    df = pd.DataFrame(all_stats)
119
120
    print('GPU:', torch.cuda.get_device_name())
    print('Model:', args.model_path)
Casper Hansen's avatar
Casper Hansen committed
121
122
123
124
    print(df.to_markdown(index=False))

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
125
    parser.add_argument("--model_path", type=str, default="casperhansen/vicuna-7b-v1.5-awq", help="path to the model")
Casper Hansen's avatar
Casper Hansen committed
126
    parser.add_argument("--quant_file", type=str, default="awq_model_w4_g128.pt", help="weights filename")
127
    parser.add_argument("--batch_size", type=int, default=1, help="weights filename")
Casper Hansen's avatar
Casper Hansen committed
128
129
130
    args = parser.parse_args()

    main(args)