benchmark.py 4.6 KB
Newer Older
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
1
2
3
4
5
6
# Adapted from https://github.com/tloen/alpaca-lora/blob/main/generate.py

import argparse
from time import time

import torch
7
8
from coati.quant import llama_load_quant, low_resource_init
from transformers import AutoTokenizer, GenerationConfig, LlamaConfig, LlamaForCausalLM
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86


def generate_prompt(instruction, input=None):
    if input:
        return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.

### Instruction:
{instruction}

### Input:
{input}

### Response:"""
    else:
        return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.

### Instruction:
{instruction}

### Response:"""


@torch.no_grad()
def evaluate(
    model,
    tokenizer,
    instruction,
    input=None,
    temperature=0.1,
    top_p=0.75,
    top_k=40,
    num_beams=4,
    max_new_tokens=128,
    **kwargs,
):
    prompt = generate_prompt(instruction, input)
    inputs = tokenizer(prompt, return_tensors="pt")
    input_ids = inputs["input_ids"].cuda()
    generation_config = GenerationConfig(
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        num_beams=num_beams,
        **kwargs,
    )
    generation_output = model.generate(
        input_ids=input_ids,
        generation_config=generation_config,
        return_dict_in_generate=True,
        output_scores=True,
        max_new_tokens=max_new_tokens,
        do_sample=True,
    )
    s = generation_output.sequences[0]
    output = tokenizer.decode(s)
    n_new_tokens = s.size(0) - input_ids.size(1)
    return output.split("### Response:")[1].strip(), n_new_tokens


instructions = [
    "Tell me about alpacas.",
    "Tell me about the president of Mexico in 2019.",
    "Tell me about the king of France in 2019.",
    "List all Canadian provinces in alphabetical order.",
    "Write a Python program that prints the first 10 Fibonacci numbers.",
    "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.",
    "Tell me five words that rhyme with 'shock'.",
    "Translate the sentence 'I have no mouth but I must scream' into Spanish.",
    "Count up from 1 to 500.",
    # ===
    "How to play support in legends of league",
    "Write a Python program that calculate Fibonacci numbers.",
]
inst = [instructions[0]] * 4

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
87
88
89
90
91
92
93
94
95
        "pretrained",
        help="Path to pretrained model. Can be a local path or a model name from the HuggingFace model hub.",
    )
    parser.add_argument(
        "--quant",
        choices=["8bit", "4bit"],
        default=None,
        help="Quantization mode. Default: None (no quantization, fp16).",
    )
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
96
    parser.add_argument(
97
        "--gptq_checkpoint",
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
98
        default=None,
99
100
101
102
103
104
105
106
        help="Path to GPTQ checkpoint. This is only useful when quantization mode is 4bit. Default: None.",
    )
    parser.add_argument(
        "--gptq_group_size",
        type=int,
        default=128,
        help="Group size for GPTQ. This is only useful when quantization mode is 4bit. Default: 128.",
    )
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
107
108
    args = parser.parse_args()

109
110
    if args.quant == "4bit":
        assert args.gptq_checkpoint is not None, "Please specify a GPTQ checkpoint."
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
111
112
113

    tokenizer = AutoTokenizer.from_pretrained(args.pretrained)

114
    if args.quant == "4bit":
115
116
117
118
        with low_resource_init():
            config = LlamaConfig.from_pretrained(args.pretrained)
            model = LlamaForCausalLM(config)
        model = llama_load_quant(model, args.gptq_checkpoint, 4, args.gptq_group_size)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
119
120
121
122
        model.cuda()
    else:
        model = LlamaForCausalLM.from_pretrained(
            args.pretrained,
123
            load_in_8bit=(args.quant == "8bit"),
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
124
125
126
            torch_dtype=torch.float16,
            device_map="auto",
        )
127
128
        if args.quant != "8bit":
            model.half()  # seems to fix bugs for some users.
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
129
130
131
132
133
134
        model.eval()

    total_tokens = 0
    start = time()
    for instruction in instructions:
        print(f"Instruction: {instruction}")
135
        resp, tokens = evaluate(model, tokenizer, instruction, temperature=0.2, num_beams=1)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
136
137
        total_tokens += tokens
        print(f"Response: {resp}")
138
        print("\n----------------------------\n")
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
139
    duration = time() - start
140
141
    print(f"Total time: {duration:.3f} s, {total_tokens/duration:.3f} tokens/s")
    print(f"Peak CUDA mem: {torch.cuda.max_memory_allocated()/1024**3:.3f} GB")