MASTER_PORT=$(shuf -n 1 -i 10000-65535) HIP_VISIBLE_DEVICES=2 deepspeed --num_gpus=1 --master_port $MASTER_PORT src/train_bash.py \ --stage sft \ --do_train True \ --flash_attn \ --template llama2 \ --dataset alpaca_gpt4_zh \ --finetuning_type lora \ --model_name_or_path /home/llama-2-13B \ --lora_target q_proj,v_proj \ --output_dir ./checkpoint_13b \ --overwrite_output_dir \ --per_device_train_batch_size 16 \ --lr_scheduler_type cosine \ --logging_steps 1 \ --save_steps 1000 \ --learning_rate te-5 \ --gradient_accumulation_steps 1 \ --bf16 \ --deepspeed bf16_deepspeed.json \