training_multi-task.py 5.43 KB
Newer Older
Rayyyyy's avatar
Rayyyyy committed
1
2
3
4
5
6
7
"""
This is an example how to train SentenceTransformers in a multi-task setup.

The system trains BERT on the AllNLI and on the STSbenchmark dataset.
"""

import logging
Rayyyyy's avatar
Rayyyyy committed
8
import traceback
Rayyyyy's avatar
Rayyyyy committed
9
10
from datetime import datetime

Rayyyyy's avatar
Rayyyyy committed
11
12
13
14
15
16
17
18
19
20
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.losses import CosineSimilarityLoss, SoftmaxLoss
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import MultiDatasetBatchSamplers, SentenceTransformerTrainingArguments

# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
Rayyyyy's avatar
Rayyyyy committed
21
22
23

# Read the dataset
model_name = "bert-base-uncased"
Rayyyyy's avatar
Rayyyyy committed
24
num_train_epochs = 1
Rayyyyy's avatar
Rayyyyy committed
25
batch_size = 16
Rayyyyy's avatar
Rayyyyy committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
output_dir = "output/training_multi-task_" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)

# 2a. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
nli_train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train")
nli_eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
logging.info(nli_train_dataset)

# 2b. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
stsb_train_dataset = load_dataset("sentence-transformers/stsb", split="train")
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
stsb_test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(stsb_train_dataset)

# 3. Define our training losses
# 3a. SoftmaxLoss for the NLI data (sentence_A, sentence_B, class), see also https://sbert.net/docs/training/loss_overview.html
train_loss_nli = SoftmaxLoss(
    model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=3
Rayyyyy's avatar
Rayyyyy committed
50
)
Rayyyyy's avatar
Rayyyyy committed
51
52
53
54
55
56
57
58
59
60
# 3b. CosineSimilarityLoss for the STSB data (sentence_A, sentence_B, similarity score between 0 and 1)
train_loss_sts = CosineSimilarityLoss(model=model)

# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
    sentences1=stsb_eval_dataset["sentence1"],
    sentences2=stsb_eval_dataset["sentence2"],
    scores=stsb_eval_dataset["score"],
    main_similarity=SimilarityFunction.COSINE,
    name="sts-dev",
Rayyyyy's avatar
Rayyyyy committed
61
62
)

Rayyyyy's avatar
Rayyyyy committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
    # Required parameter:
    output_dir=output_dir,
    # Optional training parameters:
    num_train_epochs=num_train_epochs,
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=batch_size,
    warmup_ratio=0.1,
    fp16=True,  # Set to False if you get an error that your GPU can't run on FP16
    bf16=False,  # Set to True if you have a GPU that supports BF16
    # With ROUND_ROBIN you'll sample the same amount from each dataset, until one of the multi-datasets is exhausted
    # The alternative is PROPORTIONAL, which samples from each dataset in proportion to the dataset size,
    # but that will lead to a lot of samples from the larger dataset (AllNLI in this case)
    multi_dataset_batch_sampler=MultiDatasetBatchSamplers.ROUND_ROBIN,
    # Optional tracking/debugging parameters:
    eval_strategy="steps",
    eval_steps=100,
    save_strategy="steps",
    save_steps=100,
    save_total_limit=2,
    logging_steps=100,
    run_name="multi-task",  # Will be used in W&B if `wandb` is installed
Rayyyyy's avatar
Rayyyyy committed
86
87
)

Rayyyyy's avatar
Rayyyyy committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
    model=model,
    args=args,
    train_dataset={
        "all-nli": nli_train_dataset,
        "sts": stsb_train_dataset,
    },
    eval_dataset={
        "all-nli": nli_eval_dataset,
        "sts": stsb_eval_dataset,
    },
    loss={
        "all-nli": train_loss_nli,
        "sts": train_loss_sts,
    },
    evaluator=dev_evaluator,
)
trainer.train()
Rayyyyy's avatar
Rayyyyy committed
107
108


Rayyyyy's avatar
Rayyyyy committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
    sentences1=stsb_test_dataset["sentence1"],
    sentences2=stsb_test_dataset["sentence2"],
    scores=stsb_test_dataset["score"],
    main_similarity=SimilarityFunction.COSINE,
    name="sts-test",
)
test_evaluator(model)

# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)

# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
    model.push_to_hub(f"{model_name}-multi-task")
except Exception:
    logging.error(
        f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
        f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
        f"and saving it using `model.push_to_hub('{model_name}-multi-task')`."
    )