benchmark.py 4.77 KB
Newer Older
Casper Hansen's avatar
Casper Hansen committed
1
2
3
4
5
6
import time
import torch
import argparse
import numpy as np
import pandas as pd
from awq import AutoAWQForCausalLM
7
8
from transformers import AutoTokenizer
from torch.cuda import OutOfMemoryError
Casper Hansen's avatar
Casper Hansen committed
9
10
11
12
13
14
15

def warmup(model):
    warm_up = torch.randn((4096,4096)).to(next(model.parameters()).device)
    torch.mm(warm_up,warm_up)

def generate(model, input_ids, n_generate):
    context_time = 0
Casper Hansen's avatar
Casper Hansen committed
16
    generate_time = []
Casper Hansen's avatar
Casper Hansen committed
17
18
19
20
21
22
23
24

    with torch.inference_mode():
        for i in range(n_generate):
            torch.cuda.synchronize()
            start = time.time()

            if i == 0:
                # prefill context
25
                inputs = torch.as_tensor(input_ids, device=next(model.parameters()).device)
Casper Hansen's avatar
Casper Hansen committed
26
27
            else:
                # decode tokens
28
                inputs = torch.as_tensor(token, device=next(model.parameters()).device)
Casper Hansen's avatar
Casper Hansen committed
29
30
31
32
33
34
35
36
37
            
            out = model(inputs, use_cache=True)

            torch.cuda.synchronize()
            token = out[0][:, -1].max(1)[1].unsqueeze(1)

            if i == 0:
                context_time += time.time() - start
            else:
Casper Hansen's avatar
Casper Hansen committed
38
                generate_time.append(time.time() - start)
Casper Hansen's avatar
Casper Hansen committed
39
40
41
    
    return context_time, generate_time

Casper's avatar
Casper committed
42
def run_round(model_path, quant_file, n_generate, input_ids, batch_size, no_safetensors):
Casper Hansen's avatar
Casper Hansen committed
43
    print(f" -- Loading model...")
44
45
    model = AutoAWQForCausalLM.from_quantized(
        model_path, quant_file, fuse_layers=True,
46
        max_new_tokens=n_generate, batch_size=batch_size,
Casper's avatar
Casper committed
47
        safetensors=not no_safetensors
48
    )
Casper Hansen's avatar
Casper Hansen committed
49
50
51
52

    print(f" -- Warming up...")
    warmup(model)

53
54
55
56
57
58
    print(f" -- Generating {n_generate} tokens, {input_ids.shape[1]} in context...")
    
    try:
        context_time, generate_time = generate(model, input_ids, n_generate)
        successful_generate = True
    except RuntimeError as ex:
Casper Hansen's avatar
Casper Hansen committed
59
60
61
62
        if 'cuda out of memory' in str(ex).lower():
            successful_generate = False
        else:
            raise RuntimeError(ex)
63
    
Casper Hansen's avatar
Casper Hansen committed
64
65
66
    device = next(model.parameters()).device
    memory_used = torch.cuda.max_memory_allocated(device) / (1024 ** 3)
    memory_pct = memory_used / (torch.cuda.get_device_properties(device).total_memory / (1024 ** 3)) * 100
Casper Hansen's avatar
Casper Hansen committed
67

68
    if successful_generate:
Casper Hansen's avatar
Casper Hansen committed
69
70
71
72
        # number of tokens in context / time for processing context * batch size
        prefill_tokens_per_second = input_ids.shape[1] / context_time * batch_size
        # 1 second / median time per token in seconds * batch size
        decode_tokens_per_second = 1 / np.median(generate_time) * batch_size
73
74
75
76
77
78
79

        print(f" ** Speed (Prefill): {prefill_tokens_per_second:.2f} tokens/second")
        print(f" ** Speed (Decode): {decode_tokens_per_second:.2f} tokens/second")
        print(f" ** Max Memory (VRAM): {memory_used:.2f} GB ({memory_pct:.2f}%)")
    else:
        prefill_tokens_per_second = 'OOM'
        decode_tokens_per_second = 'OOM'
Casper Hansen's avatar
Casper Hansen committed
80
81

    return {
82
83
84
        "Batch Size": batch_size,
        "Prefill Length": input_ids.shape[1],
        "Decode Length": n_generate,
Casper Hansen's avatar
Casper Hansen committed
85
86
        "Prefill tokens/s": prefill_tokens_per_second,
        "Decode tokens/s": decode_tokens_per_second,
87
        "Memory (VRAM)": f"{memory_used:.2f} GB ({memory_pct:.2f}%)"
Casper's avatar
Casper committed
88
    }, model.quant_config.version
Casper Hansen's avatar
Casper Hansen committed
89
90
91
92
93
94
95
96
97
98
99
100
101

def main(args):
    rounds = [
        {"context": 32, "n_generate": 32},
        {"context": 64, "n_generate": 64},
        {"context": 128, "n_generate": 128},
        {"context": 256, "n_generate": 256},
        {"context": 512, "n_generate": 512},
        {"context": 1024, "n_generate": 1024},
        {"context": 2048, "n_generate": 2048},
    ]

    all_stats = []
102
    tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True)
Casper Hansen's avatar
Casper Hansen committed
103
104

    for settings in rounds:
105
        input_ids = torch.randint(0, tokenizer.vocab_size, (args.batch_size, settings["context"])).cuda()
Casper Hansen's avatar
Casper Hansen committed
106

Casper Hansen's avatar
Casper Hansen committed
107
        stats, model_version = run_round(
Casper Hansen's avatar
Casper Hansen committed
108
109
110
            args.model_path,
            args.quant_file,
            settings["n_generate"],
111
            input_ids,
112
            args.batch_size,
Casper's avatar
Casper committed
113
            args.no_safetensors
Casper Hansen's avatar
Casper Hansen committed
114
115
116
        )
        
        all_stats.append(stats)
117
118
119

        if stats["Prefill tokens/s"] == 'OOM':
            break
Casper Hansen's avatar
Casper Hansen committed
120
121
    
    df = pd.DataFrame(all_stats)
122
123
    print('GPU:', torch.cuda.get_device_name())
    print('Model:', args.model_path)
Casper Hansen's avatar
Casper Hansen committed
124
    print('Version:', model_version)
Casper Hansen's avatar
Casper Hansen committed
125
126
127
128
    print(df.to_markdown(index=False))

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
Casper's avatar
Casper committed
129
130
    parser.add_argument("--model_path", type=str, default="casperhansen/mistral-7b-instruct-v0.1-awq", help="path to the model")
    parser.add_argument("--quant_file", type=str, default="", help="weights filename")
131
    parser.add_argument("--batch_size", type=int, default=1, help="Batch size for cache and generation")
Casper's avatar
Casper committed
132
    parser.add_argument("--no_safetensors", default=False, action="store_true", help="Use for disabling safetensors")
Casper Hansen's avatar
Casper Hansen committed
133
134
    args = parser.parse_args()

Casper's avatar
Casper committed
135
    main(args)