deepspeed --hostfile hosts --num_gpus=4 src/train_bash.py \ --stage sft \ --do_train \ --template llama2 \ --dataset alpaca_gpt4_en,alpaca_gpt4_zh \ --finetuning_type full \ --model_name_or_path /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b \ --output_dir /work/share/huchen1/liangjj/llama_factory \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 1 \ --gradient_accumulation_steps 1 \ --preprocessing_num_workers 2 \ --lr_scheduler_type cosine \ --logging_steps 10 \ --save_steps 100 \ --eval_steps 100 \ --learning_rate 5e-5 \ --max_grad_norm 0.5 \ --num_train_epochs 4.0 \ --val_size 0.01 \ --evaluation_strategy steps \ --load_best_model_at_end \ --weight_decay 0. \ --warmup_ratio 0.03 \ --plot_loss \ --fp16 \ --save_on_each_node \ --deepspeed deepspeed.json #--resume_from_checkpoint /work/share/huchen1/liangjj/round6/round7/checkpoint_2000