finetune.py 3.68 KB
Newer Older
Rayyyyy's avatar
Rayyyyy committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import math
import json
import logging
import argparse
import torch

from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator

#### Just some code to print debug information to stdout
logging.basicConfig(
    format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)

parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='./datasets/tmp.txt', help='Input txt path')
parser.add_argument('--train_batch_size', type=int, default=16)
parser.add_argument('--num_epochs', type=int, default=10)
parser.add_argument('--model_name_or_path', type=str, default="all-MiniLM-L6-v2")
parser.add_argument('--model_save_path', type=str, default="output/training_sbert_" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), help='Output folder')
parser.add_argument('--lr', default=2e-05)
args = parser.parse_args()


if __name__ == "__main__":

    sts_dataset_path = args.data_path
    # Check if dataset exists. If not, download and extract it
    if not os.path.exists(sts_dataset_path):
        print("datasets is not exists!!!!")
        exit()

    model_name_or_path = args.model_name_or_path
    train_batch_size = args.train_batch_size
    num_epochs = args.num_epochs
    model_save_path = args.model_save_path

    # Load a pre-trained sentence transformer model
    model = SentenceTransformer(model_name_or_path, device='cuda')


    # Convert the dataset to a DataLoader ready for training
    logging.info("Read STSbenchmark train dataset")
    # Read the dataset
    train_samples = []
    dev_samples = []
    with open(sts_dataset_path, "r", encoding="utf8") as fIn:
        count = 0
        for lineinfo in fIn.readlines():
            row = json.loads(lineinfo)
            score = float(row["score"]) # Normalize score to range 0 ... 1
            inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)

            if (count+1) % 5 == 0:
                dev_samples.append(inp_example)
            else:
                train_samples.append(inp_example)
            count += 1

    logging.info("Dealing data end.")
    train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
    train_loss = losses.CosineSimilarityLoss(model=model)

    # Development set: Measure correlation between cosine score and gold labels
    logging.info("Read STSbenchmark dev dataset")
    evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")

    # Configure the training. We skip evaluation in this example
    warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1)  # 10% of train data for warm-up
    logging.info("Warmup-steps: {}".format(warmup_steps))

    print("Start training ...")
    # Train the model
    model.fit(
        train_objectives=[(train_dataloader, train_loss)],
        evaluator=evaluator,
        epochs=num_epochs,
        evaluation_steps=1000,
        warmup_steps=warmup_steps,
        optimizer_params={'lr': args.lr},
        output_path=model_save_path,
    )
    logging.info("Finetune end")

    ##############################################################################
    #
    # Load the stored model and evaluate its performance on STS benchmark dataset
    #
    ##############################################################################

    model = SentenceTransformer(model_save_path)
    test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-test")
    test_evaluator(model, output_path=model_save_path)