#!/bin/bash export HSA_FORCE_FINE_GRAIN_PCIE=1 export MIOPEN_FIND_MODE=3 export MIOPEN_COMPILE_PARALLEL_LEVEL=1 export NCCL_PLUGIN_P2P=ucx export RCCL_NCHANNELS=2 export NCCL_SOCKET_IFNAME=ib0 export NCCL_P2P_LEVEL=5 export NCCL_IB_HCA=mlx5_0 export NCCL_DEBUG=INFO export NCCL_NET_GDR_LEVEL=SYS export NCCL_NET_PLUGIN=none unset RCCL_NCHANNELS unset NCCL_NET_GDR_LEVEL lrank=$OMPI_COMM_WORLD_LOCAL_RANK echo "LRANK===============================$lrank" RANK=$OMPI_COMM_WORLD_RANK WORLD_SIZE=$OMPI_COMM_WORLD_SIZE echo "WORLD_SIZE*************$WORLD_SIZE" export HIP_VISIBLE_DEVICES=0,1,2,3 APP="python3 ../fine-tune.py \ --deepspeed ../ds_config.json \ --report_to "none" \ --data_path "../data/belle_chat_ramdon_10k.json" \ --model_name_or_path "../../baichuan2-7b-base" \ --output_dir "output" \ --model_max_length 64 \ --num_train_epochs 4 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 1 \ --save_strategy epoch \ --learning_rate 2e-5 \ --lr_scheduler_type constant \ --adam_beta1 0.9 \ --adam_beta2 0.98 \ --adam_epsilon 1e-8 \ --max_grad_norm 1.0 \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --logging_steps 1 \ --gradient_checkpointing False \ --fp16 \ --local_rank $lrank " case ${lrank} in [0]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_0:1 export UCX_IB_PCI_BW=mlx5_0:50Gbs numactl --cpunodebind=0 --membind=0 ${APP} ;; [1]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_1:1 export UCX_IB_PCI_BW=mlx5_1:50Gbs numactl --cpunodebind=1 --membind=1 ${APP} ;; [2]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_2:1 export UCX_IB_PCI_BW=mlx5_2:50Gbs numactl --cpunodebind=2 --membind=2 ${APP} ;; [3]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_3:1 export UCX_IB_PCI_BW=mlx5_3:50Gbs numactl --cpunodebind=3 --membind=3 ${APP} ;; esac