#!/bin/bash export HSA_FORCE_FINE_GRAIN_PCIE=1 export MIOPEN_FIND_MODE=3 export MIOPEN_COMPILE_PARALLEL_LEVEL=1 export NCCL_PLUGIN_P2P=ucx export RCCL_NCHANNELS=2 export NCCL_SOCKET_IFNAME=ib0 export NCCL_P2P_LEVEL=5 export NCCL_IB_HCA=mlx5_0 #0号网卡 export MASTER_ADDR=${1} lrank=$OMPI_COMM_WORLD_LOCAL_RANK export LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK export RANK=$OMPI_COMM_WORLD_RANK export WORLD_SIZE=$OMPI_COMM_WORLD_SIZE export MASTER_PORT=12365 APP="python3 ../src/train_bash.py --stage sft \ --model_name_or_path ../../baichuan-13b-base \ --do_train \ --template default \ --dataset alpaca_gpt4_en \ --finetuning_type lora \ --lora_rank 16 \ --lora_target W_pack,o_proj,gate_proj,down_proj,up_proj \ --output_dir out/baichuan-7b-lora-test7 \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 1 \ --gradient_accumulation_steps 1 \ --preprocessing_num_workers 8 \ --lr_scheduler_type cosine \ --logging_steps 10 \ --save_steps 2 \ --eval_steps 2 \ --learning_rate 1e-4 \ --max_grad_norm 0.5 \ --num_train_epochs 1.0 \ --val_size 0.001 \ --evaluation_strategy steps \ --load_best_model_at_end \ --plot_loss \ --fp16 \ --deepspeed deepspeed.json " case ${lrank} in [0]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_0:1 export UCX_IB_PCI_BW=mlx5_0:50Gbs numactl --cpunodebind=0 --membind=0 ${APP} ;; [1]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_1:1 export UCX_IB_PCI_BW=mlx5_1:50Gbs numactl --cpunodebind=1 --membind=1 ${APP} ;; [2]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_2:1 export UCX_IB_PCI_BW=mlx5_2:50Gbs numactl --cpunodebind=2 --membind=2 ${APP} ;; [3]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_3:1 export UCX_IB_PCI_BW=mlx5_3:50Gbs numactl --cpunodebind=3 --membind=3 ${APP} ;; esac