config.py 2.36 KB
Newer Older
yuguo960516's avatar
bloom  
yuguo960516 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from omegaconf import OmegaConf

from libai.config import get_config
from libai.config import LazyCall
from libai.data.build import build_nlp_test_loader, build_nlp_train_loader
from libai.tokenizer import BertTokenizer
from projects.text_classification.modeling.model import ModelForSequenceClassification
from projects.text_classification.dataset import ClueDataset

tokenization = get_config("common/data/bert_dataset.py").tokenization
optim = get_config("common/optim.py").optim
model_cfg = get_config("common/models/bert.py").cfg
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train

tokenization.tokenizer = LazyCall(BertTokenizer)(
    vocab_file="/DATA/disk1/liuchi/work/bert-base-chinese-vocab.txt",
    do_lower_case=True,
    do_chinese_wwm=False,
)
tokenization.append_eod = False
tokenization.make_vocab_size_divisible_by = 128

dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
    dataset=[
        LazyCall(ClueDataset)(
            task_name="afqmc",
            data_dir="./projects/text_classification/dataset/clue_data/afqmc",
            tokenizer=tokenization.tokenizer,
            max_seq_length=128,
            mode="train",
        ),
    ],
    num_workers=4,
)
dataloader.test = [
    LazyCall(build_nlp_test_loader)(
        dataset=LazyCall(ClueDataset)(
            task_name="afqmc",
            data_dir="./projects/text_classification/dataset/clue_data/afqmc",
            tokenizer=tokenization.tokenizer,
            max_seq_length=512,
            mode="dev",
        ),
        num_workers=4,
    ),
]

model_cfg.update(
    dict(
        # exist key
        vocab_size=21248,
        hidden_size=1024,
        hidden_layers=24,
        num_attention_heads=16,
        # new key
        num_classes=2,
        pretrain_megatron_weight=None,
    )
)
model = LazyCall(ModelForSequenceClassification)(cfg=model_cfg)

train.update(
    dict(
        activation_checkpoint=dict(enabled=True),
        output_dir="output/benchmark/",
        train_micro_batch_size=4,
        test_micro_batch_size=4,
        train_epoch=1,
        train_iter=0,
        evaluation=dict(
            enabled=True,
            eval_period=500,
        ),
        log_period=50,
        dist=dict(
            data_parallel_size=1,
            tensor_parallel_size=1,
            pipeline_parallel_size=1,
        ),
    )
)