Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Megatron-LM
Commits
c271aaae
Commit
c271aaae
authored
Jan 17, 2025
by
wxj
Browse files
Update Llama_pretraining.sh
parent
c5369391
Pipeline
#2447
passed with stage
Changes
1
Pipelines
2
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
86 additions
and
73 deletions
+86
-73
Llama_pretraining.sh
Llama_pretraining.sh
+86
-73
No files found.
Llama_pretraining.sh
View file @
c271aaae
...
...
@@ -8,7 +8,7 @@ export OMP_NUM_THREADS=1
export
NCCL_P2P_LEVEL
=
PXB
# SYS
#export HIP_ALLOC_INITIALIZE=0
#export GPU_MAX_HW_QUEUES=20
#export GPU_MAX_HW_QUEUES=20
# sglang空泡
export
NCCL_ALGO
=
Ring
export
NCCL_NCHANNELS_PER_PEER
=
16
...
...
@@ -17,54 +17,72 @@ export NCCL_IB_TIMEOUT=22
export
CUDA_DEVICE_MAX_CONNECTIONS
=
1
export
NCCL_IB_HCA
=
mlx5_1,mlx5_2
# export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,,mlx5_4,,mlx5_5,,mlx5_6,,mlx5_7
export
NCCL_NET_GDR_LEVEL
=
SYS
export
NCCL_NET_GDR_READ
=
0
export
GLOG_minloglevel
=
3
# 打印error级别的nccl日志
# export TORCH_COMPILE_DEBUG=1 # 查看编译后的图
source
/opt/dtk/env.sh
#
te调用gemm需要
导入hipblaslt库
# 导入hipblaslt库
# export LD_LIBRARY_PATH=/data/hipblaslt-install-0904/lib:$LD_LIBRARY_PATH
# 更新rocblas
export
LD_LIBRARY_PATH
=
/data/rocblas-install/lib:
$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_qwen1211/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_qwen1228/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_0107_trans/lib:$LD_LIBRARY_PATH
# torch控制多流转单流
# export ALLREDUCE_STREAM_WITH_COMPUTE=1
#
# prof采集添加同步
# prof采集添加同步
, 避免卡顿
# export GPU_FLUSH_ON_EXECUTION=1
# export HIP_DIRECT_DISPATCH=0
# 采集rocblas size
export
ROCBLAS_LAYER
=
3
# 采集 fa size
# export FLASH_ATTENTION_PRINT_PARAM=1
CHECKPOINT_PATH
=
./tmp_7b
#$1 #<Specify path>
TENSORBOARD_LOGS_PATH
=
./tmp_7b
#$2 #<Specify path>
DATA_PATH
=
"/public/home/wangxj3/Downloads/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document"
#<Specify path and file prefix>_text_document
# GPT_MODEL_ARGS=(
# --num-layers 32
# --hidden-size 5120
# --ffn-hidden-size 13824
# --num-attention-heads 40
# --seq-length 4096 #4096
# --max-position-embeddings 32768 #4096
# --num-query-groups 40
# --group-query-attention
# )
DATA_PATH
=
"/data/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document"
GPT_MODEL_ARGS
=(
--num-layers
6
--num-layers
32
--hidden-size
4096
--ffn-hidden-size
11008
--num-attention-heads
32
--seq-length
4096
#4096
--max-position-embeddings
4096
--normalization
RMSNorm
--position-embedding-type
rope
--untie-embeddings-and-output-weights
# 分开处理embed和输出权重, 增加灵活性
)
# GPT_MODEL_ARGS=(
# --num-layers 40
# --hidden-size 5120
# --ffn-hidden-size 13824
# --num-attention-heads 40
# --max-position-embeddings 4096
# --normalization RMSNorm
# --position-embedding-type rope
# --untie-embeddings-and-output-weights # 分开处理embed和输出权重, 增加灵活性
# )
# export NVTE_FLASH_ATTN=1 # 走cutlass
export
NVTE_FLASH_ATTN_TRITON
=
1
# 走triton_fa
# --transformer-impl transformer_engine
# --transformer-impl transformer_engine
# 走core用这两组参数
# --use-mcore-models
# --transformer-impl local
# --transformer-impl local
# 走legacy用这两组参数
# --use-legacy-models
TRAINING_ARGS
=(
--transformer-impl
transformer_engine
--use-
mcore
-models
--transformer-impl
local
# 走legacy用这两组参数
--use-
legacy
-models
--micro-batch-size
1
--global-batch-size
6
#240 #60 #512 #64
--global-batch-size
6
4
#240 #60 #512 #64
--train-iters
10
--weight-decay
0.1
--adam-beta1
0.9
...
...
@@ -72,34 +90,30 @@ TRAINING_ARGS=(
--init-method-std
0.006
--clip-grad
1.0
--bf16
# --fp16 # 开启fp16需要指定loss-scale
# --loss-scale 1024
--use-distributed-optimizer
--disable-bias-linear
--attention-dropout
0
--hidden-dropout
0
--no-gradient-accumulation-fusion
--add-qkv-bias
--no-gradient-accumulation-fusion
# 开启后精度不对, apex更新后可以开启
--swiglu
--lr
3.0e-5
--lr-decay-style
cosine
--min-lr
3.0e-6
--lr-warmup-iters
1
--ckpt-format
torch
--ddp-average-in-collective
--recompute-granularity
full
--recompute-num-layers
5
#0 #
--recompute-method
block
--overlap-grad-reduce
--use-flash-attn-triton
--ddp-average-in-collective
# 在dp阶段通信中, 梯度或参数将被直接平均, 而不是先求和(到一个设备)再平均
# --recompute-granularity full # 开启重计算降低显存增加耗时
# --recompute-num-layers 5 #0 #
# --recompute-method block
--overlap-grad-reduce
# 重叠ddp grad reduce
# --tp-comm-overlap # tensor parallel comm和gemm重叠, 优化项未适配
# --tp-comm-overlap-rs-dgrad # reduce-scatter和dgrad gemm重叠, 优化项未适配
--use-flash-attn-cutlass
)
# --add-qkv-bias # qwen
# --ckpt-format torch
# --ddp-average-in-collective
# --recompute-granularity full
# --recompute-num-layers 5
# --recompute-method block
# --overlap-grad-reduce
# --use-flash-attn-cutlass
# --use-flash-attn-triton
# --use-flash-attn-cutlass # cutlass fa
# --use-flash-attn-triton # triton fa
MODEL_PARALLEL_ARGS
=(
--sequence-parallel
...
...
@@ -109,13 +123,10 @@ MODEL_PARALLEL_ARGS=(
DATA_ARGS
=(
--data-path
$DATA_PATH
--seq-length
4096
#4096
--split
949,50,1
--untie-embeddings-and-output-weights
--use-rotary-position-embeddings
--normalization
RMSNorm
--no-position-embedding
--tokenizer-type
Llama2Tokenizer
--tokenizer-model
/
public/home/wangxj3/Downloads
/model_weights/llama2_7b_hf/tokenizer.model
--tokenizer-model
/
data
/model_weights/llama2_7b_hf/tokenizer.model
)
EVAL_AND_LOGGING_ARGS
=(
...
...
@@ -134,7 +145,7 @@ PROFILE_ARGS=(
--profile-step-start
4
--profile-step-end
5
--use-pytorch-profiler
--profile-ranks
0
3
--profile-ranks
0
1 2 3 4 5 6 7
--profile-dir
prof_data
)
...
...
@@ -142,7 +153,7 @@ RANK=$OMPI_COMM_WORLD_RANK
LOCAL_RANK
=
$OMPI_COMM_WORLD_LOCAL_RANK
WORLD_SIZE
=
$OMPI_COMM_WORLD_SIZE
DIST_URL
=
${
1
}
DIST_PORT
=
3456
6
DIST_PORT
=
3456
7
DISTRIBUTED_ARGS
=(
--rank
${
RANK
}
...
...
@@ -158,33 +169,35 @@ APP="python -u pretrain_gpt.py \
${
DATA_ARGS
[@]
}
\
${
EVAL_AND_LOGGING_ARGS
[@]
}
\
${
DISTRIBUTED_ARGS
[@]
}
\
${
PROFILE_ARGS
[@]
}
\
"
# 开启profile
# ${PROFILE_ARGS[@]} \
export
HIP_VISIBLE_DEVICES
=
4,5,6,7
#
0,1,2,3
# 4,5,6,7 #,
export
HIP_VISIBLE_DEVICES
=
0,1,2,3,
4,5,6,7
# # 4,5,6,7 #,
# export CUDA_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3,
${
APP
}
# case ${LOCAL_RANK} in
# [0])
# # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
case
${
LOCAL_RANK
}
in
[
0]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
# numactl --cpunodebind=0 --membind=0 ${APP}
;;
[
1]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
# numactl --cpunodebind=0 --membind=0 ${APP}
;;
[
2]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
# numactl --cpunodebind=0 --membind=0 ${APP}
;;
[
3]
)
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${
APP
}
# numactl --cpunodebind=0 --membind=0 ${APP}
;;
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
# [1])
# # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
# [2])
# # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
# [3])
# # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
# [4])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP}
...
...
@@ -205,4 +218,4 @@ case ${LOCAL_RANK} in
# ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP}
# ;;
esac
#
esac
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment