Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Megatron-LM
Commits
40ea1bd3
Commit
40ea1bd3
authored
Jan 10, 2025
by
wxj
Browse files
Update Llama_pretraining.sh
parent
18c8cf95
Pipeline
#2205
passed with stage
Changes
1
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
63 additions
and
69 deletions
+63
-69
Llama_pretraining.sh
Llama_pretraining.sh
+63
-69
No files found.
Llama_pretraining.sh
View file @
40ea1bd3
...
...
@@ -21,32 +21,30 @@ export NCCL_NET_GDR_LEVEL=SYS
export
NCCL_NET_GDR_READ
=
0
export
GLOG_minloglevel
=
3
# 打印error级别的nccl日志
source
/opt/dtk/env.sh
#
te调用gemm需要
导入hipblaslt库
# 导入hipblaslt库
# export LD_LIBRARY_PATH=/data/hipblaslt-install-0904/lib:$LD_LIBRARY_PATH
# 更新rocblas
export
LD_LIBRARY_PATH
=
/data/rocblas-install/lib:
$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_qwen1211/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_qwen1228/lib:$LD_LIBRARY_PATH
# # prof采集添加同步
# torch控制多流转单流
# export ALLREDUCE_STREAM_WITH_COMPUTE=1
# prof采集添加同步, 避免卡顿
# export GPU_FLUSH_ON_EXECUTION=1
# export HIP_DIRECT_DISPATCH=0
# 采集rocblas size
# export ROCBLAS_LAYER=3
# 采集 fa size
# export FLASH_ATTENTION_PRINT_PARAM=1
CHECKPOINT_PATH
=
./tmp_7b
#$1 #<Specify path>
TENSORBOARD_LOGS_PATH
=
./tmp_7b
#$2 #<Specify path>
DATA_PATH
=
"/public/home/wangxj3/Downloads/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document"
#<Specify path and file prefix>_text_document
# GPT_MODEL_ARGS=(
# --num-layers 32
# --hidden-size 5120
# --ffn-hidden-size 13824
# --num-attention-heads 40
# --seq-length 4096 #4096
# --max-position-embeddings 32768 #4096
# --num-query-groups 40
# --group-query-attention
# )
DATA_PATH
=
"/data/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document"
#<Specify path and file prefix>_text_document
GPT_MODEL_ARGS
=(
--num-layers
6
--num-layers
32
--hidden-size
4096
--ffn-hidden-size
11008
--num-attention-heads
32
...
...
@@ -56,15 +54,15 @@ GPT_MODEL_ARGS=(
# export NVTE_FLASH_ATTN=1 # 走cutlass
export
NVTE_FLASH_ATTN_TRITON
=
1
# 走triton_fa
# --transformer-impl transformer_engine
# --transformer-impl transformer_engine
# 走core用这两组参数
# --use-mcore-models
# --transformer-impl local
# --transformer-impl local
# 走legacy用这两组参数
# --use-legacy-models
TRAINING_ARGS
=(
--transformer-impl
transformer_engine
--use-
mcore
-models
--transformer-impl
local
# 走legacy用这两组参数
--use-
legacy
-models
--micro-batch-size
1
--global-batch-size
6
#240 #60 #512 #64
--global-batch-size
6
0
#240 #60 #512 #64
--train-iters
10
--weight-decay
0.1
--adam-beta1
0.9
...
...
@@ -72,12 +70,13 @@ TRAINING_ARGS=(
--init-method-std
0.006
--clip-grad
1.0
--bf16
# --fp16 # 开启fp16需要指定loss-scale
# --loss-scale 1024
--use-distributed-optimizer
--disable-bias-linear
--attention-dropout
0
--hidden-dropout
0
--no-gradient-accumulation-fusion
--add-qkv-bias
--swiglu
--lr
3.0e-5
--lr-decay-style
cosine
...
...
@@ -85,26 +84,19 @@ TRAINING_ARGS=(
--lr-warmup-iters
1
--ckpt-format
torch
--ddp-average-in-collective
--recompute-granularity
full
--recompute-num-layers
5
#0 #
--recompute-method
block
#
--recompute-granularity full
# 开启重计算降低显存增加耗时
#
--recompute-num-layers 5 #0 #
#
--recompute-method block
--overlap-grad-reduce
--use-flash-attn-triton
)
# --add-qkv-bias # qwen
# --ckpt-format torch
# --ddp-average-in-collective
# --recompute-granularity full
# --recompute-num-layers 5
# --recompute-method block
# --overlap-grad-reduce
# --use-flash-attn-cutlass
# --use-flash-attn-triton
# --use-flash-attn-cutlass # cutlass fa
# --use-flash-attn-triton # triton fa
MODEL_PARALLEL_ARGS
=(
--sequence-parallel
--tensor-model-parallel-size
2
--pipeline-model-parallel-size
2
--pipeline-model-parallel-size
4
)
DATA_ARGS
=(
...
...
@@ -115,7 +107,7 @@ DATA_ARGS=(
--normalization
RMSNorm
--no-position-embedding
--tokenizer-type
Llama2Tokenizer
--tokenizer-model
/
public/home/wangxj3/Downloads
/model_weights/llama2_7b_hf/tokenizer.model
--tokenizer-model
/
data
/model_weights/llama2_7b_hf/tokenizer.model
)
EVAL_AND_LOGGING_ARGS
=(
...
...
@@ -134,7 +126,7 @@ PROFILE_ARGS=(
--profile-step-start
4
--profile-step-end
5
--use-pytorch-profiler
--profile-ranks
0
3
--profile-ranks
0
1 2 3 4 5 6 7
--profile-dir
prof_data
)
...
...
@@ -142,7 +134,7 @@ RANK=$OMPI_COMM_WORLD_RANK
LOCAL_RANK
=
$OMPI_COMM_WORLD_LOCAL_RANK
WORLD_SIZE
=
$OMPI_COMM_WORLD_SIZE
DIST_URL
=
${
1
}
DIST_PORT
=
3456
6
DIST_PORT
=
3456
7
DISTRIBUTED_ARGS
=(
--rank
${
RANK
}
...
...
@@ -158,51 +150,53 @@ APP="python -u pretrain_gpt.py \
${
DATA_ARGS
[@]
}
\
${
EVAL_AND_LOGGING_ARGS
[@]
}
\
${
DISTRIBUTED_ARGS
[@]
}
\
${
PROFILE_ARGS
[@]
}
\
"
# 开启profile
# ${PROFILE_ARGS[@]} \
export
HIP_VISIBLE_DEVICES
=
4,5,6,7
#
0,1,2,3
# 4,5,6,7 #,
export
HIP_VISIBLE_DEVICES
=
0,1,2,3,
4,5,6,7
# # 4,5,6,7 #,
# export CUDA_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3,
# ${APP}
case
${
LOCAL_RANK
}
in
[
0]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
#
numactl --cpunodebind=0 --membind=0 ${APP}
#
${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
[
1]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
#
numactl --cpunodebind=0 --membind=0 ${APP}
#
${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
[
2]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
#
numactl --cpunodebind=0 --membind=0 ${APP}
#
${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
[
3]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
# numactl --cpunodebind=0 --membind=0 ${APP}
# ${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
[
4]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
[
5]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
[
6]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
[
7]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
numactl
--cpunodebind
=
0
--membind
=
0
${
APP
}
;;
# [4])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
# [5])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
# [6])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
# [7])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
esac
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment