conver-model.sh 953 Bytes
Newer Older
hepj's avatar
hepj committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
MODEL_SIZE=$1                 # 模型参数:8B/70B
SOURCE_CKPT_PATH=$2           # 源checkpoint路径
TARGET_CKPT_PATH=$3           # 目标checkpoint路径
TP=$4                         # 模型并行度
PP=$5                         # 流水并行度
mg2hf=$6                      # 是否执行mcore2hf转换
CHECK=$7                      # 测试转换前后模型逐层输出是否一致
CHECK_ONLY=$8                 # 仅检测模型输出,不进行转换
PR=$9                         # 精度设置,fp16/bf16/fp32     
HF_CKPT_PATH=$10              # HF的CKPT的路径【可选,mg2hf=true时必须提供】

cd /public/new-pai/Pai-Megatron-Patch/toolkits/model_checkpoints_convertor/llama
bash hf2mcore_convertor_llama3_1.sh \
    8B \
    /public/new-pai/model/llama3-ckpts/Meta-Llama-3.1-8B    \
    /public/new-pai/Pai-Megatron-Patch/examples/llama3_1/mcore-tp2-pp1  \
    2  \
    1  \
    false \
    true \
    false \
    bf16