Commit 0941998c authored by sunzhq2's avatar sunzhq2 Committed by xuxo
Browse files

conformer add post and ana

parent fde49a28
This diff is collapsed.
...@@ -158,8 +158,14 @@ if __name__ == '__main__': ...@@ -158,8 +158,14 @@ if __name__ == '__main__':
# b. Forward Encoder # b. Forward Encoder
# enc: [N, T, C] # enc: [N, T, C]
feats, feats_lengths = speech2text.asr_model.pre_data(**batch)
feats_lengths_1 = torch.ceil(feats_lengths.float() / 4).long()
print("feats_lengths_1:",feats_lengths_1)
# print("feats_lengths:",feats_lengths)
ll_time = time.time() ll_time = time.time()
encoder_out, encoder_out_lens = speech2text.asr_model.encode(**batch) encoder_out, encoder_out_lens = speech2text.asr_model.encode(feats, feats_lengths)
print("encoder_out_lens:",encoder_out_lens)
# ctc_log_probs: [N, T, C] # ctc_log_probs: [N, T, C]
ctc_log_probs = torch.nn.functional.log_softmax( ctc_log_probs = torch.nn.functional.log_softmax(
speech2text.asr_model.ctc.ctc_lo(encoder_out), dim=2 speech2text.asr_model.ctc.ctc_lo(encoder_out), dim=2
......
This diff is collapsed.
#!/usr/bin/bash
# if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
# asr_train_config="/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/config.yaml"
# asr_model_file="/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/valid.acc.ave_10best.pth"
# lm_train_config=/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/lm_train_lm_transformer_char_batch_bins2000000/config.yaml
# lm_path=/home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/lm_train_lm_transformer_char_batch_bins2000000/valid.loss.ave_10best.pth
# manifest="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/test"
asr_train_config="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/config.yaml"
asr_model_file="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/valid.acc.ave_10best.pth"
lm_train_config=/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/lm_train_lm_transformer_char_batch_bins2000000/config.yaml
lm_path=/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/exp/lm_train_lm_transformer_char_batch_bins2000000/valid.loss.ave_10best.pth
manifest="/home/sunzhq/workspace/yidong-infer/conformer/torch-infer/test"
mkdir -p logs
# mode='attention_rescoring'
mode='lm_rescoring'
# num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
export HIP_VISIBLE_DEVICES=0
nohup numactl -N 0 -m 0 python3 infer.py \
--config $asr_train_config \
--model_path $asr_model_file \
--lm_config $lm_train_config \
--lm_path $lm_path \
--gpu 0 \
--wav_scp $manifest/wav.scp --text $manifest/text \
--result_file ./logs/predictions_${mode}_$gpu_id.txt \
--log_file ./logs/log_${mode}_$gpu_id.txt \
--batch_size 24 --beam_size 10 \
--mode $mode 2>&1 | tee result_0.log &
export HIP_VISIBLE_DEVICES=1
nohup numactl -N 1 -m 1 python3 infer.py \
--config $asr_train_config \
--model_path $asr_model_file \
--lm_config $lm_train_config \
--lm_path $lm_path \
--gpu 0 \
--wav_scp $manifest/wav.scp --text $manifest/text \
--result_file ./logs/predictions_${mode}_$gpu_id.txt \
--log_file ./logs/log_${mode}_$gpu_id.txt \
--batch_size 24 --beam_size 10 \
--mode $mode 2>&1 | tee result_1.log &
export HIP_VISIBLE_DEVICES=2
nohup numactl -N 2 -m 2 python3 infer.py \
--config $asr_train_config \
--model_path $asr_model_file \
--lm_config $lm_train_config \
--lm_path $lm_path \
--gpu 0 \
--wav_scp $manifest/wav.scp --text $manifest/text \
--result_file ./logs/predictions_${mode}_$gpu_id.txt \
--log_file ./logs/log_${mode}_$gpu_id.txt \
--batch_size 24 --beam_size 10 \
--mode $mode 2>&1 | tee result_2.log &
export HIP_VISIBLE_DEVICES=3
nohup numactl -N 3 -m 3 python3 infer.py \
--config $asr_train_config \
--model_path $asr_model_file \
--lm_config $lm_train_config \
--lm_path $lm_path \
--gpu 0 \
--wav_scp $manifest/wav.scp --text $manifest/text \
--result_file ./logs/predictions_${mode}_$gpu_id.txt \
--log_file ./logs/log_${mode}_$gpu_id.txt \
--batch_size 24 --beam_size 10 \
--mode $mode 2>&1 | tee result_3.log &
This diff is collapsed.
espnet: 0.9.0
files:
asr_model_file: exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/valid.acc.ave_10best.pth
lm_file: exp/lm_train_lm_transformer_char_batch_bins2000000/valid.loss.ave_10best.pth
python: "3.7.3 (default, Mar 27 2019, 22:11:17) \n[GCC 7.3.0]"
timestamp: 1603088092.704853
torch: 1.6.0
yaml_files:
asr_train_config: exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/config.yaml
lm_train_config: exp/lm_train_lm_transformer_char_batch_bins2000000/config.yaml
python3 conformer-compute-wer.py ./logs/ref.trn ./logs/hyp.trn
\ No newline at end of file
This diff is collapsed.
espnet: 0.9.0
files:
asr_model_file: exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/valid.acc.ave_10best.pth
lm_file: exp/lm_train_lm_transformer_char_batch_bins2000000/valid.loss.ave_10best.pth
python: "3.7.3 (default, Mar 27 2019, 22:11:17) \n[GCC 7.3.0]"
timestamp: 1603088092.704853
torch: 1.6.0
yaml_files:
asr_train_config: exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp/config.yaml
lm_train_config: exp/lm_train_lm_transformer_char_batch_bins2000000/config.yaml
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment