test_ci.sh 5.26 KB
Newer Older
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
1
2
3
4
#!/usr/bin/env bash

set -xue

Camille Zhong's avatar
Camille Zhong committed
5
6
7
8
9
if [ -z "$SFT_DATASET" ]; then
    echo "Please set \$SFT_DATASET to the path to sft dataset."
    exit 1
fi

Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
10
11
12
13
14
if [ -z "$PROMPT_PATH" ]; then
    echo "Please set \$PROMPT_PATH to the path to prompts csv."
    exit 1
fi

Camille Zhong's avatar
Camille Zhong committed
15
16
17
18
19
if [ -z "$PRETRAIN_DATASET" ]; then
    echo "Please set \$PRETRAIN_DATASET to the path to alpaca data."
    exit 1
fi

Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
20
21
22
23
24
25
26
BASE=$(realpath $(dirname $0))

export OMP_NUM_THREADS=8

# install requirements
pip install -r ${BASE}/requirements.txt

Camille Zhong's avatar
Camille Zhong committed
27
wandb init -m offline
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
28

Camille Zhong's avatar
Camille Zhong committed
29
30
31
32
33
# train sft
torchrun --standalone --nproc_per_node=4 ${BASE}/train_sft.py --pretrain 'bigscience/bloom-560m' \
        --model 'bloom' --strategy colossalai_zero2 --lora_rank 4\
        --dataset $SFT_DATASET --max_datasets_size 512 --max_epochs 1 \
        --save_path ${BASE}/output
34

Camille Zhong's avatar
Camille Zhong committed
35
36
37
38
torchrun --standalone --nproc_per_node=4 ${BASE}/train_sft.py --pretrain 'gpt2' \
        --model 'gpt2' --strategy colossalai_zero2 \
        --dataset $SFT_DATASET --max_datasets_size 512 --max_epochs 1 \
        --save_path ${BASE}/output
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
39

Camille Zhong's avatar
Camille Zhong committed
40
41
42
43
torchrun --standalone --nproc_per_node=4 ${BASE}/train_sft.py --pretrain 'facebook/opt-350m' \
        --model 'opt' --strategy colossalai_zero2 --lora_rank 4\
        --dataset $SFT_DATASET --max_datasets_size 512 --max_epochs 1 \
        --save_path ${BASE}/output
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
44

Camille Zhong's avatar
Camille Zhong committed
45
46
47
48
torchrun --standalone --nproc_per_node=4 ${BASE}/train_sft.py --pretrain 'gpt2' \
        --model 'gpt2' --strategy ddp --lora_rank 4\
        --dataset $SFT_DATASET --max_datasets_size 512 --max_epochs 1 \
        --save_path ${BASE}/output
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
49

Camille Zhong's avatar
Camille Zhong committed
50
51
52
53
#torchrun --standalone --nproc_per_node=4 ${BASE}/train_sft.py --pretrain 'facebook/opt-350m' \
#        --model 'opt' --strategy naive \
#        --dataset $SFT_DATASET --max_datasets_size 512 --max_epochs 1 \
#        --save_path ${BASE}/output
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
54

Camille Zhong's avatar
Camille Zhong committed
55
rm -rf ${BASE}/output
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
56
57
58

# train rm
torchrun --standalone --nproc_per_node=2 ${BASE}/train_reward_model.py \
Camille Zhong's avatar
Camille Zhong committed
59
60
61
62
63
64
65
66
67
68
69
70
                            --pretrain 'facebook/opt-350m' --model 'opt' \
                            --strategy colossalai_zero2 --loss_fn 'log_sig'\
                            --dataset 'Anthropic/hh-rlhf' --subset 'harmless-base' \
                            --test True --lora_rank 4 \
                            --save_path ${BASE}/rm_ckpt_opt.pt

torchrun --standalone --nproc_per_node=2 ${BASE}/train_reward_model.py \
                            --pretrain 'gpt2' --model 'gpt2' \
                            --strategy colossalai_zero2 --loss_fn 'log_exp' \
                            --dataset 'Dahoas/rm-static' \
                            --test True  --lora_rank 4 \
                            --save_path ${BASE}/rm_ckpt_gpt.pt
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
71
72

torchrun --standalone --nproc_per_node=2 ${BASE}/train_reward_model.py \
Camille Zhong's avatar
Camille Zhong committed
73
74
75
76
77
                            --pretrain 'gpt2' --model 'gpt2' \
                            --strategy ddp --loss_fn 'log_exp' \
                            --dataset 'Dahoas/rm-static' \
                            --test True --lora_rank 4 \
                            --save_path ${BASE}/rm_ckpt.pt
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
78
79

torchrun --standalone --nproc_per_node=2 ${BASE}/train_reward_model.py \
Camille Zhong's avatar
Camille Zhong committed
80
81
82
83
84
                            --pretrain 'bigscience/bloom-560m' --model 'bloom' \
                            --strategy colossalai_zero2 --loss_fn 'log_sig' \
                            --dataset 'Anthropic/hh-rlhf' --subset 'harmless-base' \
                            --test True --lora_rank 4 \
                            --save_path ${BASE}/rm_ckpt.pt
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
85
86

torchrun --standalone --nproc_per_node=2 ${BASE}/train_reward_model.py \
Camille Zhong's avatar
Camille Zhong committed
87
88
89
90
91
                            --pretrain 'microsoft/deberta-v3-large' --model 'deberta' \
                            --strategy colossalai_zero2 --loss_fn 'log_sig' \
                            --dataset 'Anthropic/hh-rlhf' --subset 'harmless-base' \
                            --test True --lora_rank 4 \
                            --save_path ${BASE}/rm_ckpt.pt
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
92

93
torchrun --standalone --nproc_per_node=2 ${BASE}/train_reward_model.py \
Camille Zhong's avatar
Camille Zhong committed
94
95
96
97
98
                            --pretrain 'roberta-base' --model 'roberta' \
                            --strategy colossalai_zero2 --loss_fn 'log_exp'\
                            --dataset 'Anthropic/hh-rlhf' --subset 'harmless-base'\
                            --test True --lora_rank 4 \
                            --save_path ${BASE}/rm_ckpt.pt
99

Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
100
rm -rf ${BASE}/rm_ckpt.pt
Camille Zhong's avatar
Camille Zhong committed
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

torchrun --standalone --nproc_per_node=2 ${BASE}/train_prompts.py --prompt_path $PROMPT_PATH --pretrain_dataset $PRETRAIN_DATASET \
        --strategy colossalai_zero2 --num_episodes 1 --max_timesteps 2 \
        --update_timesteps 2 --max_epochs 1 --train_batch_size 2 \
        --pretrain 'facebook/opt-350m' --model opt \
        --rm_pretrain 'facebook/opt-350m' \
        --rm_path ${BASE}/rm_ckpt_opt.pt \
        --save_path ${BASE}/actor_checkpoint_prompts.pt
rm -rf ${BASE}/rm_ckpt_opt.pt

torchrun --standalone --nproc_per_node=2 ${BASE}/train_prompts.py --prompt_path $PROMPT_PATH --pretrain_dataset $PRETRAIN_DATASET \
         --strategy colossalai_zero2 --num_episodes 1 --max_timesteps 2 \
         --update_timesteps 2 --max_epochs 1 --train_batch_size 2 \
         --pretrain 'gpt2' --model gpt2 \
         --rm_pretrain 'gpt2' \
         --rm_path ${BASE}/rm_ckpt_gpt.pt \
         --save_path ${BASE}/actor_checkpoint_prompts.pt
rm -rf ${BASE}/rm_ckpt_gpt.pt

rm -rf ${BASE}/actor_checkpoint_prompts.pt