#!/bin/bash ## 如果需要指定显卡训练可将 --num_gpus 4 改为 --include="localhost:0,3" deepspeed --master_port 25678 --num_gpus 4 LLaMA-Factory-main/src/train_bash.py \ --deepspeed /LLaMA-Factory-main/deepspeed.json \ --stage sft \ --do_train \ --model_name_or_path THUDM/chatglm2-6b \ --dataset fingpt_sentiment \ --dataset_dir LLaMA-Factory-main/data \ --template chatglm3 \ --finetuning_type lora \ --lora_target all \ --output_dir saves/FinGPT/lora_multi_dcu/sft \ --overwrite_output_dir \ --cutoff_len 1024 \ --preprocessing_num_workers 4 \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 1 \ --lr_scheduler_type cosine \ --logging_steps 10 \ --warmup_steps 20 \ --save_steps 100 \ --eval_steps 100 \ --evaluation_strategy steps \ --load_best_model_at_end \ --learning_rate 5e-5 \ --num_train_epochs 3.0 \ --max_samples 3000 \ --val_size 0.1 \ --ddp_timeout 180000000 \ --plot_loss \ --fp16