#!/bin/bash export HSA_FORCE_FINE_GRAIN_PCIE=1 export MIOPEN_FIND_MODE=1 module unload compiler/rocm/2.9 echo "MIOPEN_FIND_MODE=$MIOPEN_FIND_MODE" lrank=$OMPI_COMM_WORLD_LOCAL_RANK comm_rank=$OMPI_COMM_WORLD_RANK comm_size=$OMPI_COMM_WORLD_SIZE #下边是修改的 export HIP_VISIBLE_DEVICES=0,1,2,3 export PATH_PHRASE1=/public/software/apps/DeepLearning/Data/wikicorpus_en/lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5_shard_1472_test_split_10/wikicorpus_en/training APP="python3 run_pretraining_v4.py \ --input_dir=${PATH_PHRASE1} \ --output_dir=/public/home/hepj/outdir/torch/pre_wiki4/phrase1/fp32 \ --config_file=/public/home/hepj/model_source/pytorch_bert/bert_config.json \ --bert_model=bert-large-uncased \ --train_batch_size=16 \ --max_seq_length=128 \ --max_predictions_per_seq=20 \ --max_steps=100000 \ --warmup_proportion=0.0 \ --num_steps_per_checkpoint=20000 \ --learning_rate=4.0e-4 \ --seed=12439 \ --gradient_accumulation_steps=1 \ --allreduce_post_accumulation \ --do_train \ --use_env \ --local_rank ${comm_rank} \ --world_size 4 \ --gpus_per_node 1 \ --dist_url tcp://localhost:34567 \ --json-summary /public/home/hepj/outdir/torch/pre_wiki4/phrase1/fp32/dllogger.json " case ${lrank} in [0]) export HIP_VISIBLE_DEVICES=0 export UCX_NET_DEVICES=mlx5_0:1 export UCX_IB_PCI_BW=mlx5_0:50Gbs echo numactl --cpunodebind=0 --membind=0 ${APP} numactl --cpunodebind=0 --membind=0 ${APP} ;; [1]) export HIP_VISIBLE_DEVICES=1 export UCX_NET_DEVICES=mlx5_1:1 export UCX_IB_PCI_BW=mlx5_1:50Gbs echo numactl --cpunodebind=1 --membind=1 ${APP} numactl --cpunodebind=1 --membind=1 ${APP} ;; [2]) export HIP_VISIBLE_DEVICES=2 export UCX_NET_DEVICES=mlx5_2:1 export UCX_IB_PCI_BW=mlx5_2:50Gbs echo numactl --cpunodebind=2 --membind=2 ${APP} numactl --cpunodebind=2 --membind=2 ${APP} ;; [3]) export HIP_VISIBLE_DEVICES=3 export UCX_NET_DEVICES=mlx5_3:1 export UCX_IB_PCI_BW=mlx5_3:50Gbs echo numactl --cpunodebind=3 --membind=3 ${APP} numactl --cpunodebind=3 --membind=3 ${APP} ;; esac