BinaryClassificationEvaluator.py 14.1 KB
Newer Older
Rayyyyy's avatar
Rayyyyy committed
1
import csv
Rayyyyy's avatar
Rayyyyy committed
2
3
import logging
import os
Rayyyyy's avatar
Rayyyyy committed
4
5
6
from contextlib import nullcontext
from typing import TYPE_CHECKING, Dict, List, Optional

Rayyyyy's avatar
Rayyyyy committed
7
import numpy as np
Rayyyyy's avatar
Rayyyyy committed
8
9
10
11
12
13
from sklearn.metrics import average_precision_score
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances

from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.readers import InputExample
from sentence_transformers.similarity_functions import SimilarityFunction
Rayyyyy's avatar
Rayyyyy committed
14

Rayyyyy's avatar
Rayyyyy committed
15
16
if TYPE_CHECKING:
    from sentence_transformers.SentenceTransformer import SentenceTransformer
Rayyyyy's avatar
Rayyyyy committed
17
18
19
20
21
22
23
24

logger = logging.getLogger(__name__)


class BinaryClassificationEvaluator(SentenceEvaluator):
    """
    Evaluate a model based on the similarity of the embeddings by calculating the accuracy of identifying similar and
    dissimilar sentences.
Rayyyyy's avatar
Rayyyyy committed
25
    The metrics are the cosine similarity, dot score, Euclidean and Manhattan distance
Rayyyyy's avatar
Rayyyyy committed
26
27
28
29
30
31
    The returned score is the accuracy with a specified metric.

    The results are written in a CSV. If a CSV already exists, then values are appended.

    The labels need to be 0 for dissimilar pairs and 1 for similar pairs.

Rayyyyy's avatar
Rayyyyy committed
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
    Args:
        sentences1 (List[str]): The first column of sentences.
        sentences2 (List[str]): The second column of sentences.
        labels (List[int]): labels[i] is the label for the pair (sentences1[i], sentences2[i]). Must be 0 or 1.
        name (str, optional): Name for the output. Defaults to "".
        batch_size (int, optional): Batch size used to compute embeddings. Defaults to 32.
        show_progress_bar (bool, optional): If true, prints a progress bar. Defaults to False.
        write_csv (bool, optional): Write results to a CSV file. Defaults to True.
        truncate_dim (Optional[int], optional): The dimension to truncate sentence embeddings to. `None` uses the model's current truncation dimension. Defaults to None.

    Example:
        ::

            from sentence_transformers import SentenceTransformer
            from sentence_transformers.evaluation import BinaryClassificationEvaluator
            from datasets import load_dataset

            # Load a model
            model = SentenceTransformer('all-mpnet-base-v2')

            # Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
            eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")

            # Initialize the evaluator
            binary_acc_evaluator = BinaryClassificationEvaluator(
                sentences1=eval_dataset["sentence1"],
                sentences2=eval_dataset["sentence2"],
                labels=eval_dataset["label"],
                name="quora-duplicates-dev",
            )
            results = binary_acc_evaluator(model)
            '''
            Binary Accuracy Evaluation of the model on the quora-duplicates-dev dataset:
            Accuracy with Cosine-Similarity:           81.60    (Threshold: 0.8352)
            F1 with Cosine-Similarity:                 75.27    (Threshold: 0.7715)
            Precision with Cosine-Similarity:          65.81
            Recall with Cosine-Similarity:             87.89
            Average Precision with Cosine-Similarity:  76.03

            Accuracy with Dot-Product:           81.60  (Threshold: 0.8352)
            F1 with Dot-Product:                 75.27  (Threshold: 0.7715)
            Precision with Dot-Product:          65.81
            Recall with Dot-Product:             87.89
            Average Precision with Dot-Product:  76.03

            Accuracy with Manhattan-Distance:           81.50   (Threshold: 12.0727)
            F1 with Manhattan-Distance:                 74.97   (Threshold: 15.2269)
            Precision with Manhattan-Distance:          63.89
            Recall with Manhattan-Distance:             90.68
            Average Precision with Manhattan-Distance:  75.66

            Accuracy with Euclidean-Distance:           81.60   (Threshold: 0.5741)
            F1 with Euclidean-Distance:                 75.27   (Threshold: 0.6760)
            Precision with Euclidean-Distance:          65.81
            Recall with Euclidean-Distance:             87.89
            Average Precision with Euclidean-Distance:  76.03
            '''
            print(binary_acc_evaluator.primary_metric)
            # => "quora-duplicates-dev_max_ap"
            print(results[binary_acc_evaluator.primary_metric])
            # => 0.760277070888393
Rayyyyy's avatar
Rayyyyy committed
93
94
95
96
97
98
99
100
101
102
103
    """

    def __init__(
        self,
        sentences1: List[str],
        sentences2: List[str],
        labels: List[int],
        name: str = "",
        batch_size: int = 32,
        show_progress_bar: bool = False,
        write_csv: bool = True,
Rayyyyy's avatar
Rayyyyy committed
104
        truncate_dim: Optional[int] = None,
Rayyyyy's avatar
Rayyyyy committed
105
106
107
108
    ):
        self.sentences1 = sentences1
        self.sentences2 = sentences2
        self.labels = labels
Rayyyyy's avatar
Rayyyyy committed
109
110
111
        self.truncate_dim = truncate_dim

        self.primary_metric = "max_ap"
Rayyyyy's avatar
Rayyyyy committed
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127

        assert len(self.sentences1) == len(self.sentences2)
        assert len(self.sentences1) == len(self.labels)
        for label in labels:
            assert label == 0 or label == 1

        self.write_csv = write_csv
        self.name = name
        self.batch_size = batch_size
        if show_progress_bar is None:
            show_progress_bar = (
                logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
            )
        self.show_progress_bar = show_progress_bar

        self.csv_file = "binary_classification_evaluation" + ("_" + name if name else "") + "_results.csv"
Rayyyyy's avatar
Rayyyyy committed
128
129
130
131
132
133
134
135
136
        self.csv_headers = ["epoch", "steps"]
        metrics = [
            "accuracy",
            "accuracy_threshold",
            "f1",
            "precision",
            "recall",
            "f1_threshold",
            "ap",
Rayyyyy's avatar
Rayyyyy committed
137
        ]
Rayyyyy's avatar
Rayyyyy committed
138
139
140
        for v in SimilarityFunction.possible_values():
            for m in metrics:
                self.csv_headers.append(f"{v}_{m}")
Rayyyyy's avatar
Rayyyyy committed
141
142
143
144
145
146
147
148
149
150
151
152
153

    @classmethod
    def from_input_examples(cls, examples: List[InputExample], **kwargs):
        sentences1 = []
        sentences2 = []
        scores = []

        for example in examples:
            sentences1.append(example.texts[0])
            sentences2.append(example.texts[1])
            scores.append(example.label)
        return cls(sentences1, sentences2, scores, **kwargs)

Rayyyyy's avatar
Rayyyyy committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    def __call__(
        self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
    ) -> Dict[str, float]:
        """
        Compute the evaluation metrics for the given model.

        Args:
            model (SentenceTransformer): The model to evaluate.
            output_path (str, optional): Path to save the evaluation results CSV file. Defaults to None.
            epoch (int, optional): The epoch number. Defaults to -1.
            steps (int, optional): The number of steps. Defaults to -1.

        Returns:
            Dict[str, float]: A dictionary containing the evaluation metrics.
        """
Rayyyyy's avatar
Rayyyyy committed
169
170
        if epoch != -1:
            if steps == -1:
Rayyyyy's avatar
Rayyyyy committed
171
                out_txt = f" after epoch {epoch}"
Rayyyyy's avatar
Rayyyyy committed
172
            else:
Rayyyyy's avatar
Rayyyyy committed
173
                out_txt = f" in epoch {epoch} after {steps} steps"
Rayyyyy's avatar
Rayyyyy committed
174
        else:
Rayyyyy's avatar
Rayyyyy committed
175
176
177
            out_txt = ""
        if self.truncate_dim is not None:
            out_txt += f" (truncated to {self.truncate_dim})"
Rayyyyy's avatar
Rayyyyy committed
178

Rayyyyy's avatar
Rayyyyy committed
179
        logger.info(f"Binary Accuracy Evaluation of the model on the {self.name} dataset{out_txt}:")
Rayyyyy's avatar
Rayyyyy committed
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201

        scores = self.compute_metrices(model)

        file_output_data = [epoch, steps]

        for header_name in self.csv_headers:
            if "_" in header_name:
                sim_fct, metric = header_name.split("_", maxsplit=1)
                file_output_data.append(scores[sim_fct][metric])

        if output_path is not None and self.write_csv:
            csv_path = os.path.join(output_path, self.csv_file)
            if not os.path.isfile(csv_path):
                with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
                    writer = csv.writer(f)
                    writer.writerow(self.csv_headers)
                    writer.writerow(file_output_data)
            else:
                with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
                    writer = csv.writer(f)
                    writer.writerow(file_output_data)

Rayyyyy's avatar
Rayyyyy committed
202
203
204
205
206
207
208
209
210
211
212
        metrics = {
            f"{short_name}_{metric}": value
            for short_name, values in scores.items()
            for metric, value in values.items()
        }
        metrics.update(
            {f"max_{metric}": max(scores[short_name][metric] for short_name in scores) for metric in scores["cosine"]}
        )
        metrics = self.prefix_name_to_metrics(metrics, self.name)
        self.store_metrics_in_model_card_data(model, metrics)
        return metrics
Rayyyyy's avatar
Rayyyyy committed
213
214

    def compute_metrices(self, model):
Rayyyyy's avatar
Rayyyyy committed
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
        with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
            try:
                # If the sentences are hashable, then we can use a set to avoid embedding the same sentences multiple
                # times
                sentences = list(set(self.sentences1 + self.sentences2))
            except TypeError:
                # Otherwise we just embed everything, e.g. if the sentences are images for evaluating a CLIP model
                embeddings = model.encode(
                    self.sentences1 + self.sentences2,
                    batch_size=self.batch_size,
                    show_progress_bar=self.show_progress_bar,
                    convert_to_numpy=True,
                )
                embeddings1 = embeddings[: len(self.sentences1)]
                embeddings2 = embeddings[len(self.sentences1) :]
            else:
                embeddings = model.encode(
                    sentences,
                    batch_size=self.batch_size,
                    show_progress_bar=self.show_progress_bar,
                    convert_to_numpy=True,
                )
                emb_dict = {sent: emb for sent, emb in zip(sentences, embeddings)}
                embeddings1 = [emb_dict[sent] for sent in self.sentences1]
                embeddings2 = [emb_dict[sent] for sent in self.sentences2]
Rayyyyy's avatar
Rayyyyy committed
240
241
242
243
244
245
246

        cosine_scores = 1 - paired_cosine_distances(embeddings1, embeddings2)
        manhattan_distances = paired_manhattan_distances(embeddings1, embeddings2)
        euclidean_distances = paired_euclidean_distances(embeddings1, embeddings2)

        embeddings1_np = np.asarray(embeddings1)
        embeddings2_np = np.asarray(embeddings2)
Rayyyyy's avatar
Rayyyyy committed
247
        dot_scores = np.sum(embeddings1_np * embeddings2_np, axis=-1)
Rayyyyy's avatar
Rayyyyy committed
248
249
250
251

        labels = np.asarray(self.labels)
        output_scores = {}
        for short_name, name, scores, reverse in [
Rayyyyy's avatar
Rayyyyy committed
252
253
254
255
            [SimilarityFunction.COSINE.value, "Cosine-Similarity", cosine_scores, True],
            [SimilarityFunction.DOT_PRODUCT.value, "Dot-Product", dot_scores, True],
            [SimilarityFunction.MANHATTAN.value, "Manhattan-Distance", manhattan_distances, False],
            [SimilarityFunction.EUCLIDEAN.value, "Euclidean-Distance", euclidean_distances, False],
Rayyyyy's avatar
Rayyyyy committed
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
        ]:
            acc, acc_threshold = self.find_best_acc_and_threshold(scores, labels, reverse)
            f1, precision, recall, f1_threshold = self.find_best_f1_and_threshold(scores, labels, reverse)
            ap = average_precision_score(labels, scores * (1 if reverse else -1))

            logger.info(
                "Accuracy with {}:           {:.2f}\t(Threshold: {:.4f})".format(name, acc * 100, acc_threshold)
            )
            logger.info("F1 with {}:                 {:.2f}\t(Threshold: {:.4f})".format(name, f1 * 100, f1_threshold))
            logger.info("Precision with {}:          {:.2f}".format(name, precision * 100))
            logger.info("Recall with {}:             {:.2f}".format(name, recall * 100))
            logger.info("Average Precision with {}:  {:.2f}\n".format(name, ap * 100))

            output_scores[short_name] = {
                "accuracy": acc,
                "accuracy_threshold": acc_threshold,
                "f1": f1,
                "f1_threshold": f1_threshold,
                "precision": precision,
                "recall": recall,
                "ap": ap,
            }

        return output_scores

    @staticmethod
    def find_best_acc_and_threshold(scores, labels, high_score_more_similar: bool):
        assert len(scores) == len(labels)
        rows = list(zip(scores, labels))

        rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)

        max_acc = 0
        best_threshold = -1

        positive_so_far = 0
        remaining_negatives = sum(labels == 0)

        for i in range(len(rows) - 1):
            score, label = rows[i]
            if label == 1:
                positive_so_far += 1
            else:
                remaining_negatives -= 1

            acc = (positive_so_far + remaining_negatives) / len(labels)
            if acc > max_acc:
                max_acc = acc
                best_threshold = (rows[i][0] + rows[i + 1][0]) / 2

        return max_acc, best_threshold

    @staticmethod
    def find_best_f1_and_threshold(scores, labels, high_score_more_similar: bool):
        assert len(scores) == len(labels)

        scores = np.asarray(scores)
        labels = np.asarray(labels)

        rows = list(zip(scores, labels))

        rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)

        best_f1 = best_precision = best_recall = 0
        threshold = 0
        nextract = 0
        ncorrect = 0
        total_num_duplicates = sum(labels)

        for i in range(len(rows) - 1):
            score, label = rows[i]
            nextract += 1

            if label == 1:
                ncorrect += 1

            if ncorrect > 0:
                precision = ncorrect / nextract
                recall = ncorrect / total_num_duplicates
                f1 = 2 * precision * recall / (precision + recall)
                if f1 > best_f1:
                    best_f1 = f1
                    best_precision = precision
                    best_recall = recall
                    threshold = (rows[i][0] + rows[i + 1][0]) / 2

        return best_f1, best_precision, best_recall, threshold