#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. lrank=$OMPI_COMM_WORLD_LOCAL_RANK comm_rank=$OMPI_COMM_WORLD_RANK comm_size=$OMPI_COMM_WORLD_SIZE WORKSPACE="/workspace" #parameters train_batch_size=24 learning_rate=${2:-"6e-3"} precision=${3:-"fp16"} num_gpus=${4:-4} warmup_proportion=${5:-"0.2843"} train_steps=${6:-7038} save_checkpoint_steps=${7:-200} resume_training=${8:-"false"} create_logfile=${9:-"true"} accumulate_gradients=${10:-"true"} gradient_accumulation_steps=${11:-4} seed=${12:-12439} job_name=${13:-"bert_lamb_pretraining"} allreduce_post_accumulation=${14:-"true"} allreduce_post_accumulation_fp16=${15:-"true"} train_batch_size_phase2=${16:-4096} learning_rate_phase2=${17:-"4e-3"} warmup_proportion_phase2=${18:-"0.128"} train_steps_phase2=${19:-1563} gradient_accumulation_steps_phase2=${20:-512} DATASET=${WORKSPACE}/lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5_shard_1472_test_split_10/wikicorpus_en/training # change this for other datasets DATA_DIR_PHASE1=${21:-$BERT_PREP_WORKING_DIR/${DATASET}/} BERT_CONFIG=${WORKSPACE}/uncased_L-12_H-768_A-12/bert_config.json CODEDIR=${22:-"`pwd`"} init_checkpoint=${23:-"None"} RESULTS_DIR=$CODEDIR/results CHECKPOINTS_DIR=$RESULTS_DIR/checkpoints mkdir -p $CHECKPOINTS_DIR if [ ! -d "$DATA_DIR_PHASE1" ] ; then echo "Warning! $DATA_DIR_PHASE1 directory missing. Training cannot start" fi if [ ! -d "$RESULTS_DIR" ] ; then echo "Error! $RESULTS_DIR directory missing." exit -1 fi if [ ! -d "$CHECKPOINTS_DIR" ] ; then echo "Warning! $CHECKPOINTS_DIR directory missing." echo "Checkpoints will be written to $RESULTS_DIR instead." CHECKPOINTS_DIR=$RESULTS_DIR fi if [ ! -f "$BERT_CONFIG" ] ; then echo "Error! BERT large configuration file not found at $BERT_CONFIG" exit -1 fi PREC="" if [ "$precision" = "fp16" ] ; then PREC="--fp16" elif [ "$precision" = "fp32" ] ; then PREC="" elif [ "$precision" = "tf32" ] ; then PREC="" else echo "Unknown argument" exit -2 fi ACCUMULATE_GRADIENTS="" if [ "$accumulate_gradients" == "true" ] ; then ACCUMULATE_GRADIENTS="--gradient_accumulation_steps=$gradient_accumulation_steps" fi CHECKPOINT="" if [ "$resume_training" == "true" ] ; then CHECKPOINT="--resume_from_checkpoint" fi ALL_REDUCE_POST_ACCUMULATION="" if [ "$allreduce_post_accumulation" == "true" ] ; then ALL_REDUCE_POST_ACCUMULATION="--allreduce_post_accumulation" fi ALL_REDUCE_POST_ACCUMULATION_FP16="" if [ "$allreduce_post_accumulation_fp16" == "true" ] ; then ALL_REDUCE_POST_ACCUMULATION_FP16="--allreduce_post_accumulation_fp16" fi INIT_CHECKPOINT="" if [ "$init_checkpoint" != "None" ] ; then INIT_CHECKPOINT="--init_checkpoint=$init_checkpoint" fi echo $DATA_DIR_PHASE1 INPUT_DIR=$DATA_DIR_PHASE1 CMD=" $CODEDIR/run_pretraining_v1.py" CMD+=" --input_dir=$DATA_DIR_PHASE1" CMD+=" --output_dir=$CHECKPOINTS_DIR" CMD+=" --config_file=$BERT_CONFIG" CMD+=" --bert_model=bert-large-uncased" CMD+=" --train_batch_size=$train_batch_size" CMD+=" --max_seq_length=128" CMD+=" --max_predictions_per_seq=20" CMD+=" --max_steps=$train_steps" CMD+=" --warmup_proportion=$warmup_proportion" CMD+=" --num_steps_per_checkpoint=$save_checkpoint_steps" CMD+=" --learning_rate=$learning_rate" CMD+=" --seed=$seed" CMD+=" $PREC" CMD+=" $ACCUMULATE_GRADIENTS" CMD+=" $CHECKPOINT" CMD+=" $ALL_REDUCE_POST_ACCUMULATION" CMD+=" $ALL_REDUCE_POST_ACCUMULATION_FP16" CMD+=" $INIT_CHECKPOINT" CMD+=" --do_train" CMD+=" --json-summary ${RESULTS_DIR}/dllogger.json " CMD+=" --local_rank ${comm_rank} " CMD+=" --dist_url tcp://${1}:45679 " CMD+=" --world_size ${comm_size} " #CMD="python3 -m torch.distributed.launch --nproc_per_node=$num_gpus $CMD" APP="python3 $CMD" if [ "$create_logfile" = "true" ] ; then export GBS=$(expr $train_batch_size \* $num_gpus) printf -v TAG "pyt_bert_pretraining_phase1_%s_gbs%d" "$precision" $GBS DATESTAMP=`date +'%y%m%d%H%M%S'` LOGFILE=$RESULTS_DIR/$job_name.$TAG.$DATESTAMP.log printf "Logs written to %s\n" "$LOGFILE" fi echo ${CMD} case ${lrank} in [0]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_0:1 export UCX_IB_PCI_BW=mlx5_0:50Gbs GLOO_SOCKET_IFNAME=ib0 numactl --cpunodebind=0 --membind=0 ${APP} ;; [1]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_1:1 export UCX_IB_PCI_BW=mlx5_1:50Gbs GLOO_SOCKET_IFNAME=ib1 numactl --cpunodebind=1 --membind=1 ${APP} ;; [2]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_2:1 export UCX_IB_PCI_BW=mlx5_2:50Gbs GLOO_SOCKET_IFNAME=ib2 numactl --cpunodebind=2 --membind=2 ${APP} ;; [3]) export HIP_VISIBLE_DEVICES=0,1,2,3 export UCX_NET_DEVICES=mlx5_3:1 export UCX_IB_PCI_BW=mlx5_3:50Gbs GLOO_SOCKET_IFNAME=ib3 numactl --cpunodebind=3 --membind=3 ${APP} ;; esac