single_ddp.sh 1.57 KB
Newer Older
hepj987's avatar
hepj987 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/bin/bash
export MIOPEN_FIND_MODE=3
export GPU_MAX_HW_QUEUES=16
lrank=$OMPI_COMM_WORLD_LOCAL_RANK
comm_rank=$OMPI_COMM_WORLD_RANK
comm_size=$OMPI_COMM_WORLD_SIZE
export LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK
export RANK=$comm_rank
export WORLD_SIZE=$comm_size
export MASTER_ADDR=$1
export MASTER_PORT=29500

# export NCCL_DEBUG=info   #打印nccl通信的日志
export NCCL_IB_HCA=mlx5
export NCCL_SOCKET_IFNAME=ib0 
export HIP_DIRECT_DISPATCH=0

    #   "offload_optimizer": {
    #     "device": "cpu",
    #     "pin_memory": true
    #   },
    #   "offload_param": {
    #     "device": "cpu",
    #     "pin_memory": true
    #   }

APP="python ./src/train_bash.py \
    --deepspeed deepspeed.json  --model_name_or_path /work/home/hepj/model/Qwen-7B-Chat \
    --do_train --dataset alpaca_gpt4_zh --template chatml --finetuning_type lora --lora_target c_attn  \
    --output_dir ./output/ft_qwen \
    --per_device_train_batch_size 1 --gradient_accumulation_steps 1  --lr_scheduler_type cosine \
    --logging_steps 10 --save_steps 1000 --learning_rate 5e-5 --num_train_epochs 3.0 --fp16"
#--overwrite_cache

case ${lrank} in
[0])
export HIP_VISIBLE_DEVICES=0,1,2,3
export UCX_NET_DEVICES=mlx5_0:1
numactl --cpunodebind=0 --membind=0 ${APP}
;;
[1])
export HIP_VISIBLE_DEVICES=0,1,2,3
export UCX_NET_DEVICES=mlx5_1:1
numactl --cpunodebind=1 --membind=1 ${APP}
;;
[2])
export HIP_VISIBLE_DEVICES=0,1,2,3
export UCX_NET_DEVICES=mlx5_2:1
numactl --cpunodebind=2 --membind=2 ${APP}
;;
[3])
export HIP_VISIBLE_DEVICES=0,1,2,3
export UCX_NET_DEVICES=mlx5_3:1
numactl --cpunodebind=3 --membind=3 ${APP}
;;
esac