build_smoothquant_weight.py 1.74 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import argparse
import os

import torch
from datasets import load_dataset
from transformers import LlamaTokenizer

from colossalai.inference.quant.smoothquant.models.llama import SmoothLlamaForCausalLM


def build_model_and_tokenizer(model_name):
    tokenizer = LlamaTokenizer.from_pretrained(model_name, model_max_length=512)
    kwargs = {"torch_dtype": torch.float16, "device_map": "sequential"}
    model = SmoothLlamaForCausalLM.from_pretrained(model_name, **kwargs)
    model = model.to(torch.float32)
    return model, tokenizer


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model-name", type=str, help="model name")
    parser.add_argument(
        "--output-path",
        type=str,
        help="where to save the checkpoint",
    )
    parser.add_argument(
        "--dataset-path",
        type=str,
        help="location of the calibration dataset",
    )
32
    parser.add_argument("--num-samples", type=int, default=10)
33
34
35
36
37
38
39
40
41
42
43
    parser.add_argument("--seq-len", type=int, default=512)
    args = parser.parse_args()
    return args


@torch.no_grad()
def main():
    args = parse_args()
    model_path = args.model_name
    dataset_path = args.dataset_path
    output_path = args.output_path
44
45
    num_samples = args.num_samples
    seq_len = args.seq_len
46
47
48

    model, tokenizer = build_model_and_tokenizer(model_path)
    if not os.path.exists(dataset_path):
49
        raise FileNotFoundError(f"Cannot find the dataset at {args.dataset_path}")
50
51
52
53
54
55
56
57
58
59
    dataset = load_dataset("json", data_files=dataset_path, split="train")

    model.quantized(tokenizer, dataset, num_samples=num_samples, seq_len=seq_len)
    model = model.cuda()

    model.save_quantized(output_path, model_basename="llama-7b")


if __name__ == "__main__":
    main()