MASTER_PORT=$(shuf -n 1 -i 10000-65535) deepspeed --include localhost:0 --master_port $MASTER_PORT src/train_bash.py \ --stage sft \ --do_train True \ --flash_attn \ --template llama2 \ --dataset alpaca_gpt4_zh \ --finetuning_type lora \ --model_name_or_path /home/Llama-2-7b-hf \ --lora_target q_proj,v_proj \ --output_dir ./checkpoint_7b \ --overwrite_output_dir \ --per_device_train_batch_size 32 \ --lr_scheduler_type cosine \ --logging_steps 1 \ --save_steps 1000 \ --learning_rate 5e-5 \ --gradient_accumulation_steps 1 \ --max_steps 3000 \ --bf16 \ --deepspeed bf16_deepspeed.json \