llm_compression.py 1.74 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import SWCELossInferencer
from opencompass.openicl.icl_evaluator import BPCEvaluator
from opencompass.datasets import LLMCompressionDataset


# The three corpora for llm_compression used in the original paper
# See configs/datasets/llm_compression/README.md for more details
subset_mapping = {
    'arxiv_math': ['arxiv_math'],
    'commoncraw': ['cc'],
    'python': ['python'],
}


# Build LLM Compression datasets
llm_compression_datasets = []
for _name in subset_mapping.keys():
    llm_cmp_infer_cfg = dict(
        prompt_template=dict(
            type=PromptTemplate,
23
            template='{content}',
24
25
        ),
        # No in-context example, using ZeroRetriever
26
        retriever=dict(type=ZeroRetriever),
27
28
29
30
31
32
33
34
35
36
        # Calculates cross entropy loss for each batch based on a sliding context window
        # Setting block_size=1900 and stride=512 according to the original paper
        inferencer=dict(type=SWCELossInferencer, block_size=1900, stride=512),
    )

    # Calculates Bits per Character (BPC) based on the CE loss from the inference stage
    llm_cmp_eval_cfg = dict(evaluator=dict(type=BPCEvaluator))

    llm_compression_datasets.append(
        dict(
37
            abbr=f'llm_compression-{_name}',
38
            type=LLMCompressionDataset,
39
            path='./data/llm-compression',
40
41
42
            name=_name,
            samples=None,  # Set small samples for testing
            reader_cfg=dict(
43
                input_columns=['content'],
44
45
46
47
48
49
50
                output_column=None,
            ),
            infer_cfg=llm_cmp_infer_cfg,
            eval_cfg=llm_cmp_eval_cfg,
        ))

del _name