calib_data.py 2.06 KB
Newer Older
Ji Lin's avatar
Ji Lin committed
1
import torch
2
import logging
3
from typing import List, Union
Ji Lin's avatar
Ji Lin committed
4
5
from datasets import load_dataset

Casper's avatar
Casper committed
6
7
8
9
10
11
12
13
14

def get_calib_dataset(
    data: Union[str, List[str], List[List[int]]] = "pileval",
    tokenizer=None,
    n_samples=512,
    block_size=512,
    split="train",
    text_column="text",
):
15
16
17
18
    if isinstance(data, str):
        if data == "pileval":
            dataset = load_dataset("mit-han-lab/pile-val-backup", split="validation")
        else:
Casper's avatar
Casper committed
19
            dataset = load_dataset(data, split=split)
Casper's avatar
Casper committed
20

21
22
        dataset = dataset.shuffle(seed=42)

23
    elif isinstance(data, list):
Aoyu's avatar
Aoyu committed
24
25
26
        if isinstance(data[0], str):
            dataset = [{text_column: text} for text in data]
        elif isinstance(data[0][0], int):
Casper's avatar
Casper committed
27
            dataset = data
Aoyu's avatar
Aoyu committed
28
29
30
31
        else:
            raise NotImplementedError(
                "Either pass a string to a huggingface dataset or a list"
                "that is preprocessed with one sample of text per element"
Casper's avatar
Casper committed
32
33
                " or a list of list of int for tokenized words."
            )
Ji Lin's avatar
Ji Lin committed
34
    else:
Casper's avatar
Casper committed
35
36
        raise NotImplementedError(
            "Either pass a string to a huggingface dataset or a list"
Aoyu's avatar
Aoyu committed
37
            "that is preprocessed with one sample of text per element"
Casper's avatar
Casper committed
38
39
40
            " or a list of list of int for tokenized words."
        )

Ji Lin's avatar
Ji Lin committed
41
42
43
    samples = []
    n_run = 0
    for data in dataset:
Aoyu's avatar
Aoyu committed
44
45
46
47
48
49
        if isinstance(data, list):
            line_encoded = data
        else:
            line = data[text_column]
            line = line.strip()
            line_encoded = tokenizer.encode(line)
Ji Lin's avatar
Ji Lin committed
50
51
52
53
54
55
56
57
58
59
60
61
        if len(line_encoded) > 512:
            continue
        sample = torch.tensor([line_encoded])
        if sample.numel() == 0:
            continue
        samples.append(sample)
        n_run += 1
        if n_run == n_samples:
            break
    # now concatenate all samples and split according to block size
    cat_samples = torch.cat(samples, dim=1)
    n_split = cat_samples.shape[1] // block_size
62
    logging.debug(f" * Split into {n_split} blocks")
Casper's avatar
Casper committed
63
64
65
    return [
        cat_samples[:, i * block_size : (i + 1) * block_size] for i in range(n_split)
    ]