export HIP_VISIBLE_DEVICES=0,1,2,3 LOAD_MODEL=/public/LLAMA/model/chatflow_7b.bin SPM_PATH=/public/LLAMA/model/tokenizer.model python llama_infer.py --test_path ./prompts.txt --prediction_path ./result.txt \ --load_model_path $LOAD_MODEL \ --config_path config/llama_7b_config.json \ --spm_model_path $SPM_PATH \ --world_size 4 --seq_length 1024