Commit 2de27772 authored by zhaoying1's avatar zhaoying1
Browse files

Update lora_7B.sh

parent d767e5f5
......@@ -11,14 +11,11 @@ deepspeed --include localhost:0 --master_port $MASTER_PORT src/train_bash.py \
--output_dir ./checkpoint_7b \
--overwrite_output_dir \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 1 \
--lr_scheduler_type cosine \
--logging_steps 1 \
--save_steps 1000 \
--eval_steps 1000 \
--learning_rate 5e-5 \
--gradient_accumulation_steps 1 \
--max_steps 3000 \
--evaluation_strategy steps \
--bf16 \
--deepspeed bf16_deepspeed.json \
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment