#!/usr/bin/bash # if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # asr_train_config="/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/config.yaml" # asr_model_file="/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/valid.acc.ave_10best.pth" # lm_train_config=/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/lm_train_lm_transformer_char_batch_bins2000000/config.yaml # lm_path=/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/lm_train_lm_transformer_char_batch_bins2000000/valid.loss.ave_10best.pth # manifest="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/test" asr_train_config="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/config.yaml" asr_model_file="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/valid.acc.ave_10best.pth" lm_train_config=/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/lm_train_lm_transformer_char_batch_bins2000000/config.yaml lm_path=/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/lm_train_lm_transformer_char_batch_bins2000000/valid.loss.ave_10best.pth manifest="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/test" mkdir -p logs # mode='attention_rescoring' mode='lm_rescoring' # num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') export HIP_VISIBLE_DEVICES=0 nohup numactl -N 0 -m 0 python3 infer.py \ --config $asr_train_config \ --model_path $asr_model_file \ --lm_config $lm_train_config \ --lm_path $lm_path \ --gpu 0 \ --wav_scp $manifest/wav.scp --text $manifest/text \ --result_file ./logs/predictions_${mode}_$gpu_id.txt \ --log_file ./logs/log_${mode}_$gpu_id.txt \ --batch_size 24 --beam_size 10 \ --mode $mode 2>&1 | tee result_0.log & export HIP_VISIBLE_DEVICES=1 nohup numactl -N 1 -m 1 python3 infer.py \ --config $asr_train_config \ --model_path $asr_model_file \ --lm_config $lm_train_config \ --lm_path $lm_path \ --gpu 0 \ --wav_scp $manifest/wav.scp --text $manifest/text \ --result_file ./logs/predictions_${mode}_$gpu_id.txt \ --log_file ./logs/log_${mode}_$gpu_id.txt \ --batch_size 24 --beam_size 10 \ --mode $mode 2>&1 | tee result_1.log & export HIP_VISIBLE_DEVICES=2 nohup numactl -N 2 -m 2 python3 infer.py \ --config $asr_train_config \ --model_path $asr_model_file \ --lm_config $lm_train_config \ --lm_path $lm_path \ --gpu 0 \ --wav_scp $manifest/wav.scp --text $manifest/text \ --result_file ./logs/predictions_${mode}_$gpu_id.txt \ --log_file ./logs/log_${mode}_$gpu_id.txt \ --batch_size 24 --beam_size 10 \ --mode $mode 2>&1 | tee result_2.log & export HIP_VISIBLE_DEVICES=3 nohup numactl -N 3 -m 3 python3 infer.py \ --config $asr_train_config \ --model_path $asr_model_file \ --lm_config $lm_train_config \ --lm_path $lm_path \ --gpu 0 \ --wav_scp $manifest/wav.scp --text $manifest/text \ --result_file ./logs/predictions_${mode}_$gpu_id.txt \ --log_file ./logs/log_${mode}_$gpu_id.txt \ --batch_size 24 --beam_size 10 \ --mode $mode 2>&1 | tee result_3.log &