finetune.sh 1.09 KB
Newer Older
Rayyyyy's avatar
Rayyyyy committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#!/bin/bash
echo "Export params ..."
export HIP_VISIBLE_DEVICES=0,1,2,3 # 自行修改为训练的卡号和数量

torchrun --nproc_per_node {number of gpus} \
    -m FlagEmbedding.baai_general_embedding.finetune.run \
    --output_dir {path to save model} \
    --model_name_or_path  {path to finetune model} \
    --train_data {path of train datasets}  \
    --learning_rate 1e-5 \
    --fp16 \
    --num_train_epochs 5 \
    --per_device_train_batch_size {large batch size; set 1 for toy data} \
    --dataloader_drop_last True \
    --normlized True \
    --temperature 0.02 \
    --query_max_len 64 \
    --passage_max_len 256 \
    --train_group_size 2 \
    --negatives_cross_device \
    --logging_steps 10 \
    --save_steps 1000 \
    --query_instruction_for_retrieval ""
24
25
26
27
28
29
30
31
32
33


### Hard Negatives
# python -m FlagEmbedding.baai_general_embedding.finetune.hn_mine \
#     --model_name_or_path BAAI/bge-base-en-v1.5 \
#     --input_file toy_finetune_data.jsonl \
#     --output_file toy_finetune_data_minedHN.jsonl \
#     --range_for_sampling 2-200 \
#     --negative_number 15 \
#     --use_gpu_for_searching