"git@developer.sourcefind.cn:zhaoyu6/sglang.git" did not exist on "c913ed4046ddaae410b9e94f0186a825fa17e69a"
Commit 0fccd232 authored by Rayyyyy's avatar Rayyyyy
Browse files

First add

parents
Pipeline #1027 failed with stages
in 0 seconds
from contextlib import nullcontext
from typing import List
import pytest
from sentence_transformers import SentenceTransformer, InputExample, losses
import tqdm
from transformers import set_seed
import torch
from torch.optim import Adam
@pytest.mark.parametrize(
["train_samples_mnrl", "train_samples_cmnrl", "same_grad", "scaler", "precision"],
[
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
True,
1.0,
1e-6,
),
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["adsa", "czx", "dsada"],
["b", "fas", "xcz"],
["c", "yyy", "asdas"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
False,
1.0,
1e-6,
),
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
True,
1000.0,
1e-3,
),
],
)
def test_cmnrl_same_grad(
train_samples_mnrl: List[InputExample],
train_samples_cmnrl: List[InputExample],
same_grad: bool,
scaler: float,
precision: float,
):
# Given:
sbert = SentenceTransformer("distilbert-base-uncased")
sbert.to("cpu")
optimizer = Adam(sbert.parameters())
# train_samples_mnrl
# train_samples_cmnrl
# same_grad
# scaler # This simulates AMP scenarios
# precision
# When:
# First run with MNRL
set_seed(42)
optimizer.zero_grad()
loss_mnrl = losses.MultipleNegativesRankingLoss(sbert)
loss_mnrl_value: torch.Tensor = loss_mnrl.forward(*sbert.smart_batching_collate(train_samples_mnrl)) * scaler
loss_mnrl_value.backward()
grad_expected = {name: p.grad.clone() for name, p in loss_mnrl.named_parameters() if p.grad is not None}
# Then run with this cached version:
set_seed(42)
optimizer.zero_grad()
loss_cmnrl = losses.CachedMultipleNegativesRankingLoss(sbert, mini_batch_size=2)
loss_cmnrl_value = loss_cmnrl.forward(*sbert.smart_batching_collate(train_samples_cmnrl)) * scaler
loss_cmnrl_value.backward()
grad = {name: p.grad.clone() for name, p in loss_cmnrl.named_parameters() if p.grad is not None}
# Then:
if same_grad:
assert pytest.approx(loss_mnrl_value.item()) == loss_cmnrl_value.item()
else:
assert pytest.approx(loss_mnrl_value.item()) != loss_cmnrl_value.item()
nclose = 0
for name in tqdm.tqdm(grad_expected):
nclose += torch.allclose(grad[name], grad_expected[name], precision, precision)
if same_grad:
assert nclose == len(grad_expected)
else:
assert nclose != len(grad_expected)
@pytest.mark.parametrize("use_rand_context", [True, False])
def test_rand_context_working(use_rand_context: bool):
# Given:
from sentence_transformers.losses.CachedMultipleNegativesRankingLoss import (
RandContext,
)
a = torch.Tensor(1)
b = torch.Tensor(1)
random_state = RandContext(a, b) if use_rand_context else nullcontext()
expected = torch.rand(1000)
precision = 1e-6
# When:
with random_state:
# Then:
if use_rand_context:
assert torch.allclose(torch.rand(1000), expected, precision, precision)
else:
assert not torch.allclose(torch.rand(1000), expected, precision, precision)
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import get_device_name
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
device = get_device_name()
if device == "hpu":
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == model.get_max_seq_length()
else:
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
"""
Tests that the pretrained models produce the correct scores on the STSbenchmark dataset
"""
import csv
import gzip
import os
from pathlib import Path
import tempfile
import pytest
import torch
from torch.utils.data import DataLoader
from sentence_transformers import CrossEncoder, util
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
from sentence_transformers.readers import InputExample
from typing import Generator, List, Tuple
@pytest.fixture()
def sts_resource() -> Generator[Tuple[List[InputExample], List[InputExample]], None, None]:
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
stsb_train_samples = []
stsb_test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "test":
stsb_test_samples.append(inp_example)
elif row["split"] == "train":
stsb_train_samples.append(inp_example)
yield stsb_train_samples, stsb_test_samples
def evaluate_stsb_test(
distilroberta_base_ce_model: CrossEncoder,
expected_score: float,
test_samples: List[InputExample],
num_test_samples: int = -1,
) -> None:
model = distilroberta_base_ce_model
evaluator = CECorrelationEvaluator.from_input_examples(test_samples[:num_test_samples], name="sts-test")
score = evaluator(model) * 100
print("STS-Test Performance: {:.2f} vs. exp: {:.2f}".format(score, expected_score))
assert score > expected_score or abs(score - expected_score) < 0.1
def test_pretrained_stsb(sts_resource: Tuple[List[InputExample], List[InputExample]]):
_, sts_test_samples = sts_resource
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
evaluate_stsb_test(model, 87.92, sts_test_samples)
@pytest.mark.slow
def test_train_stsb_slow(
distilroberta_base_ce_model: CrossEncoder, sts_resource: Tuple[List[InputExample], List[InputExample]]
) -> None:
model = distilroberta_base_ce_model
sts_train_samples, sts_test_samples = sts_resource
train_dataloader = DataLoader(sts_train_samples, shuffle=True, batch_size=16)
model.fit(
train_dataloader=train_dataloader,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
)
evaluate_stsb_test(model, 75, sts_test_samples)
def test_train_stsb(
distilroberta_base_ce_model: CrossEncoder, sts_resource: Tuple[List[InputExample], List[InputExample]]
) -> None:
model = distilroberta_base_ce_model
sts_train_samples, sts_test_samples = sts_resource
train_dataloader = DataLoader(sts_train_samples[:500], shuffle=True, batch_size=16)
model.fit(
train_dataloader=train_dataloader,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
)
evaluate_stsb_test(model, 50, sts_test_samples, num_test_samples=100)
def test_classifier_dropout_is_set() -> None:
model = CrossEncoder("cross-encoder/stsb-distilroberta-base", classifier_dropout=0.1234)
assert model.config.classifier_dropout == 0.1234
assert model.model.config.classifier_dropout == 0.1234
def test_classifier_dropout_default_value() -> None:
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
assert model.config.classifier_dropout is None
assert model.model.config.classifier_dropout is None
def test_load_with_revision() -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
main_model = CrossEncoder(model_name, num_labels=1, revision="main")
latest_model = CrossEncoder(
model_name,
num_labels=1,
revision="f3cb857cba53019a20df283396bcca179cf051a4",
)
older_model = CrossEncoder(
model_name,
num_labels=1,
revision="ba33022fdf0b0fc2643263f0726f44d0a07d0e24",
)
# Set the classifier.bias and classifier.weight equal among models. This
# is needed because the AutoModelForSequenceClassification randomly initializes
# the classifier.bias and classifier.weight for each (model) initialization.
# The test is only possible if all models have the same classifier.bias
# and classifier.weight parameters.
latest_model.model.classifier.bias = main_model.model.classifier.bias
latest_model.model.classifier.weight = main_model.model.classifier.weight
older_model.model.classifier.bias = main_model.model.classifier.bias
older_model.model.classifier.weight = main_model.model.classifier.weight
test_sentences = [["Hello there!", "Hello, World!"]]
main_prob = main_model.predict(test_sentences, convert_to_tensor=True)
assert torch.equal(main_prob, latest_model.predict(test_sentences, convert_to_tensor=True))
assert not torch.equal(main_prob, older_model.predict(test_sentences, convert_to_tensor=True))
def test_rank() -> None:
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
expected_ranking = [0, 1, 3, 6, 2, 5, 7, 4, 8]
# 1. We rank all sentences in the corpus for the query
ranks = model.rank(query, corpus)
pred_ranking = [rank["corpus_id"] for rank in ranks]
assert pred_ranking == expected_ranking
@pytest.mark.parametrize("safe_serialization", [True, False, None])
def test_safe_serialization(safe_serialization: bool) -> None:
with tempfile.TemporaryDirectory() as cache_folder:
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
if safe_serialization:
model.save(cache_folder, safe_serialization=safe_serialization)
model_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(model_files)
elif safe_serialization is None:
model.save(cache_folder)
model_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(model_files)
else:
model.save(cache_folder, safe_serialization=safe_serialization)
model_files = list(Path(cache_folder).glob("**/pytorch_model.bin"))
assert 1 == len(model_files)
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold() -> None:
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
best_f1,
best_precision,
best_recall,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold() -> None:
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
max_acc,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
acc = evaluator(model)
assert acc > 0.2
def test_ParaphraseMiningEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = paraphrase_distilroberta_base_v1_model
sentences = {
0: "Hello World",
1: "Hello World!",
2: "The cat is on the table",
3: "On the table the cat is",
}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0, 1), (2, 3)])
score = data_eval(model)
assert score > 0.99
"""
Compute image embeddings
"""
import os
from PIL import Image
from sentence_transformers import util, SentenceTransformer
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../examples/applications/image-search/two_dogs_in_snow.jpg",
)
img_emb = model.encode(Image.open(image_filepath))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)[0]
assert abs(cos_scores[0] - 0.3069) < 0.01
assert abs(cos_scores[1] - 0.1010) < 0.01
assert abs(cos_scores[2] - 0.1086) < 0.01
"""
Computes embeddings
"""
import numpy as np
import pytest
from typing import Optional
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_tiny_model: SentenceTransformer, normalize_embeddings: bool, prompt_name: Optional[str]
) -> None:
model = stsb_bert_tiny_model
model.prompts = {"retrieval": "Represent this sentence for searching relevant passages: "}
sentences = ["This is sentence {}".format(i) for i in range(40)]
# Start the multi-process pool on e.g. two CPU devices & compute the embeddings using the pool
pool = model.start_multi_process_pool(["cpu", "cpu"])
emb = model.encode_multi_process(
sentences, pool, chunk_size=10, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name
)
model.stop_multi_process_pool(pool)
assert emb.shape == (len(sentences), 128)
# Make sure the embeddings aren't just all 0
assert emb.sum() != 0.0
# Compare against normal embeddings
emb_normal = model.encode(sentences, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name)
diff = np.max(np.abs(emb - emb_normal))
assert diff < 0.001
# Ensure that after normalizing, the means are all almost 0, and otherwise not
assert np.all(np.abs(emb.mean(1)) < 0.01) == normalize_embeddings
"""
Tests that the pretrained models produce the correct scores on the STSbenchmark dataset
"""
import csv
import gzip
import os
from functools import partial
from typing import Optional
import pytest
from sentence_transformers import InputExample, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
def pretrained_model_score(
model_name, expected_score: float, max_test_samples: int = 100, cache_dir: Optional[str] = None
) -> None:
model = SentenceTransformer(model_name, cache_folder=cache_dir)
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "test":
test_samples.append(inp_example)
if max_test_samples != -1 and len(test_samples) >= max_test_samples:
break
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
score = model.evaluate(evaluator) * 100
print(model_name, "{:.2f} vs. exp: {:.2f}".format(score, expected_score))
assert score > expected_score or abs(score - expected_score) < 0.1
pretrained_model_score = partial(pretrained_model_score, max_test_samples=100)
pretrained_model_score_slow = partial(pretrained_model_score, max_test_samples=-1)
@pytest.mark.slow
def test_bert_base_slow() -> None:
pretrained_model_score_slow("bert-base-nli-mean-tokens", 77.12)
pretrained_model_score_slow("bert-base-nli-max-tokens", 77.21)
pretrained_model_score_slow("bert-base-nli-cls-token", 76.30)
pretrained_model_score_slow("bert-base-nli-stsb-mean-tokens", 85.14)
@pytest.mark.slow
def test_bert_large_slow() -> None:
pretrained_model_score_slow("bert-large-nli-mean-tokens", 79.19)
pretrained_model_score_slow("bert-large-nli-max-tokens", 78.41)
pretrained_model_score_slow("bert-large-nli-cls-token", 78.29)
pretrained_model_score_slow("bert-large-nli-stsb-mean-tokens", 85.29)
@pytest.mark.slow
def test_roberta_slow() -> None:
pretrained_model_score_slow("roberta-base-nli-mean-tokens", 77.49)
pretrained_model_score_slow("roberta-large-nli-mean-tokens", 78.69)
pretrained_model_score_slow("roberta-base-nli-stsb-mean-tokens", 85.30)
pretrained_model_score_slow("roberta-large-nli-stsb-mean-tokens", 86.39)
@pytest.mark.slow
def test_distilbert_slow() -> None:
pretrained_model_score_slow("distilbert-base-nli-mean-tokens", 78.69)
pretrained_model_score_slow("distilbert-base-nli-stsb-mean-tokens", 85.16)
pretrained_model_score_slow("paraphrase-distilroberta-base-v1", 81.81)
@pytest.mark.slow
def test_multiling_slow() -> None:
pretrained_model_score_slow("distiluse-base-multilingual-cased", 80.75)
pretrained_model_score_slow("paraphrase-xlm-r-multilingual-v1", 83.50)
pretrained_model_score_slow("paraphrase-multilingual-MiniLM-L12-v2", 84.42)
@pytest.mark.slow
def test_mpnet_slow() -> None:
pretrained_model_score_slow("paraphrase-mpnet-base-v2", 86.99)
@pytest.mark.slow
def test_other_models_slow() -> None:
pretrained_model_score_slow("average_word_embeddings_komninos", 61.56)
@pytest.mark.slow
def test_msmarco_slow() -> None:
pretrained_model_score_slow("msmarco-roberta-base-ance-firstp", 77.0)
pretrained_model_score_slow("msmarco-distilbert-base-v3", 78.85)
@pytest.mark.slow
def test_sentence_t5_slow() -> None:
pretrained_model_score_slow("sentence-t5-base", 85.52)
def test_bert_base(cache_dir) -> None:
pretrained_model_score("bert-base-nli-mean-tokens", 86.53, cache_dir=cache_dir)
pretrained_model_score("bert-base-nli-max-tokens", 87.00, cache_dir=cache_dir)
pretrained_model_score("bert-base-nli-cls-token", 85.93, cache_dir=cache_dir)
pretrained_model_score("bert-base-nli-stsb-mean-tokens", 89.26, cache_dir=cache_dir)
def test_bert_large(cache_dir) -> None:
pretrained_model_score("bert-large-nli-mean-tokens", 90.06, cache_dir=cache_dir)
pretrained_model_score("bert-large-nli-max-tokens", 90.15, cache_dir=cache_dir)
pretrained_model_score("bert-large-nli-cls-token", 89.51, cache_dir=cache_dir)
pretrained_model_score("bert-large-nli-stsb-mean-tokens", 92.27, cache_dir=cache_dir)
def test_roberta(cache_dir) -> None:
pretrained_model_score("roberta-base-nli-mean-tokens", 87.91, cache_dir=cache_dir)
pretrained_model_score("roberta-large-nli-mean-tokens", 89.41, cache_dir=cache_dir)
pretrained_model_score("roberta-base-nli-stsb-mean-tokens", 93.39, cache_dir=cache_dir)
pretrained_model_score("roberta-large-nli-stsb-mean-tokens", 91.26, cache_dir=cache_dir)
def test_distilbert(cache_dir) -> None:
pretrained_model_score("distilbert-base-nli-mean-tokens", 88.83, cache_dir=cache_dir)
pretrained_model_score("distilbert-base-nli-stsb-mean-tokens", 91.01, cache_dir=cache_dir)
pretrained_model_score("paraphrase-distilroberta-base-v1", 90.89, cache_dir=cache_dir)
def test_multiling(cache_dir) -> None:
pretrained_model_score("distiluse-base-multilingual-cased", 88.79, cache_dir=cache_dir)
pretrained_model_score("paraphrase-xlm-r-multilingual-v1", 92.76, cache_dir=cache_dir)
pretrained_model_score("paraphrase-multilingual-MiniLM-L12-v2", 92.64, cache_dir=cache_dir)
def test_mpnet(cache_dir) -> None:
pretrained_model_score("paraphrase-mpnet-base-v2", 92.83, cache_dir=cache_dir)
def test_other_models(cache_dir) -> None:
pretrained_model_score("average_word_embeddings_komninos", 68.97, cache_dir=cache_dir)
def test_msmarco(cache_dir) -> None:
pretrained_model_score("msmarco-roberta-base-ance-firstp", 83.61, cache_dir=cache_dir)
pretrained_model_score("msmarco-distilbert-base-v3", 87.96, cache_dir=cache_dir)
def test_sentence_t5(cache_dir) -> None:
pretrained_model_score("sentence-t5-base", 92.75, cache_dir=cache_dir)
"""
Tests general behaviour of the SentenceTransformer class
"""
from functools import partial
import json
import logging
import os
from pathlib import Path
import re
import tempfile
from typing import Dict, List, Literal, Optional, Union, cast
import numpy as np
import pytest
from huggingface_hub import HfApi, RepoUrl, GitRefs, GitRefInfo
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Normalize, Transformer, Pooling
from sentence_transformers import util
def test_load_with_safetensors() -> None:
with tempfile.TemporaryDirectory() as cache_folder:
safetensors_model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
cache_folder=cache_folder,
)
# Only the safetensors file must be loaded
pytorch_files = list(Path(cache_folder).glob("**/pytorch_model.bin"))
assert 0 == len(pytorch_files), "PyTorch model file must not be downloaded."
safetensors_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(safetensors_files), "Safetensors model file must be downloaded."
with tempfile.TemporaryDirectory() as cache_folder:
transformer = Transformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
cache_dir=cache_folder,
model_args={"use_safetensors": False},
)
pooling = Pooling(transformer.get_word_embedding_dimension())
pytorch_model = SentenceTransformer(modules=[transformer, pooling])
# Only the pytorch file must be loaded
pytorch_files = list(Path(cache_folder).glob("**/pytorch_model.bin"))
assert 1 == len(pytorch_files), "PyTorch model file must be downloaded."
safetensors_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 0 == len(safetensors_files), "Safetensors model file must not be downloaded."
sentences = ["This is a test sentence", "This is another test sentence"]
assert torch.equal(
safetensors_model.encode(sentences, convert_to_tensor=True),
pytorch_model.encode(sentences, convert_to_tensor=True),
), "Ensure that Safetensors and PyTorch loaded models result in identical embeddings"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA must be available to test moving devices effectively.")
def test_to() -> None:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors", device="cpu")
test_device = torch.device("cuda")
assert model.device.type == "cpu"
assert test_device.type == "cuda"
model.to(test_device)
assert model.device.type == "cuda", "The model device should have updated"
model.encode("Test sentence")
assert model.device.type == "cuda", "Encoding shouldn't change the device"
assert model._target_device == model.device, "Prevent backwards compatibility failure for _target_device"
model._target_device = "cpu"
assert model.device.type == "cpu", "Ensure that setting `_target_device` doesn't crash."
def test_push_to_hub(monkeypatch: pytest.MonkeyPatch, caplog: pytest.LogCaptureFixture) -> None:
def mock_create_repo(self, repo_id, **kwargs):
return RepoUrl(f"https://huggingface.co/{repo_id}")
mock_upload_folder_kwargs = {}
def mock_upload_folder(self, **kwargs):
nonlocal mock_upload_folder_kwargs
mock_upload_folder_kwargs = kwargs
def mock_list_repo_refs(self, repo_id=None, **kwargs):
try:
git_ref_info = GitRefInfo(name="main", ref="refs/heads/main", target_commit="123456")
except TypeError:
git_ref_info = GitRefInfo(dict(name="main", ref="refs/heads/main", targetCommit="123456"))
# workaround for https://github.com/huggingface/huggingface_hub/issues/1956
git_ref_kwargs = {"branches": [git_ref_info], "converts": [], "tags": [], "pull_requests": None}
try:
return GitRefs(**git_ref_kwargs)
except TypeError:
git_ref_kwargs.pop("pull_requests")
return GitRefs(**git_ref_kwargs)
monkeypatch.setattr(HfApi, "create_repo", mock_create_repo)
monkeypatch.setattr(HfApi, "upload_folder", mock_upload_folder)
monkeypatch.setattr(HfApi, "list_repo_refs", mock_list_repo_refs)
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
url = model.push_to_hub("sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
mock_upload_folder_kwargs.clear()
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub("sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
mock_upload_folder_kwargs.clear()
assert len(caplog.record_tuples) == 1
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
with pytest.raises(
ValueError, match="Providing an `organization` to `save_to_hub` is deprecated, please only use `repo_id`."
):
model.save_to_hub("sentence-transformers-testing/stsb-bert-tiny-safetensors", organization="unrelated")
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", organization="sentence-transformers-testing"
)
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
assert (
caplog.record_tuples[1][2]
== 'Providing an `organization` to `save_to_hub` is deprecated, please only use `repo_id="sentence-transformers-testing/stsb-bert-tiny-safetensors"` instead.'
)
mock_upload_folder_kwargs.clear()
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub("stsb-bert-tiny-safetensors", organization="sentence-transformers-testing")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
assert (
caplog.record_tuples[1][2]
== 'Providing an `organization` to `save_to_hub` is deprecated, please use `repo_id="sentence-transformers-testing/stsb-bert-tiny-safetensors"` instead.'
)
mock_upload_folder_kwargs.clear()
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", local_model_path="my_fake_local_model_path"
)
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert mock_upload_folder_kwargs["folder_path"] == "my_fake_local_model_path"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 1
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
mock_upload_folder_kwargs.clear()
# Incorrect usage: Using deprecated "repo_name" positional argument
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(repo_name="sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "Providing a `repo_name` keyword argument to `save_to_hub` is deprecated, please use `repo_id` instead."
)
assert (
caplog.record_tuples[1][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
mock_upload_folder_kwargs.clear()
# Incorrect usage: Use positional arguments from before "token" was introduced
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(
"stsb-bert-tiny-safetensors", # repo_name
"sentence-transformers-testing", # organization
True, # private
commit_message="Adding new awesome Model!",
exist_ok=True,
)
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert mock_upload_folder_kwargs["commit_message"] == "Adding new awesome Model!"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
assert (
caplog.record_tuples[1][2]
== 'Providing an `organization` to `save_to_hub` is deprecated, please use `repo_id="sentence-transformers-testing/stsb-bert-tiny-safetensors"` instead.'
)
@pytest.mark.parametrize("safe_serialization", [True, False, None])
def test_safe_serialization(safe_serialization: bool) -> None:
with tempfile.TemporaryDirectory() as cache_folder:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
if safe_serialization:
model.save(cache_folder, safe_serialization=safe_serialization)
model_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(model_files)
elif safe_serialization is None:
model.save(cache_folder)
model_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(model_files)
else:
model.save(cache_folder, safe_serialization=safe_serialization)
model_files = list(Path(cache_folder).glob("**/pytorch_model.bin"))
assert 1 == len(model_files)
def test_load_with_revision() -> None:
main_model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors", revision="main")
latest_model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", revision="f3cb857cba53019a20df283396bcca179cf051a4"
)
older_model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", revision="ba33022fdf0b0fc2643263f0726f44d0a07d0e24"
)
test_sentence = ["Hello there!"]
main_embeddings = main_model.encode(test_sentence, convert_to_tensor=True)
assert torch.equal(main_embeddings, latest_model.encode(test_sentence, convert_to_tensor=True))
assert not torch.equal(main_embeddings, older_model.encode(test_sentence, convert_to_tensor=True))
def test_load_local_without_normalize_directory() -> None:
tiny_model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
tiny_model.add_module("Normalize", Normalize())
with tempfile.TemporaryDirectory() as tmp_folder:
model_path = Path(tmp_folder) / "tiny_model_local"
tiny_model.save(str(model_path))
assert (model_path / "2_Normalize").exists()
os.rmdir(model_path / "2_Normalize")
assert not (model_path / "2_Normalize").exists()
# This fails in v2.3.0
fresh_tiny_model = SentenceTransformer(str(model_path))
assert isinstance(fresh_tiny_model, SentenceTransformer)
def test_prompts(caplog: pytest.LogCaptureFixture) -> None:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert model.prompts == {}
assert model.default_prompt_name is None
texts = ["How to bake a chocolate cake", "Symptoms of the flu"]
no_prompt_embedding = model.encode(texts)
prompt_embedding = model.encode([f"query: {text}" for text in texts])
assert not np.array_equal(no_prompt_embedding, prompt_embedding)
for query in ["query: ", "query:", "query: "]:
# Test prompt="... {}"
model.prompts = {}
assert np.array_equal(model.encode(texts, prompt=query), prompt_embedding)
# Test prompt_name="..."
model.prompts = {"query": query}
assert np.array_equal(model.encode(texts, prompt_name="query"), prompt_embedding)
caplog.clear()
# Test prompt_name="..." & prompt="..."
with caplog.at_level(logging.WARNING):
assert np.array_equal(model.encode(texts, prompt=query, prompt_name="query"), prompt_embedding)
assert len(caplog.record_tuples) == 1
assert (
caplog.record_tuples[0][2]
== "Encode with either a `prompt`, a `prompt_name`, or neither, but not both. "
"Ignoring the `prompt_name` in favor of `prompt`."
)
with pytest.raises(
ValueError,
match=re.escape(
"Prompt name 'invalid_prompt_name' not found in the configured prompts dictionary with keys ['query']."
),
):
model.encode(texts, prompt_name="invalid_prompt_name")
def test_save_load_prompts() -> None:
with pytest.raises(
ValueError,
match=re.escape(
"Default prompt name 'invalid_prompt_name' not found in the configured prompts dictionary with keys ['query']."
),
):
model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
prompts={"query": "query: "},
default_prompt_name="invalid_prompt_name",
)
model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
prompts={"query": "query: "},
default_prompt_name="query",
)
assert model.prompts == {"query": "query: "}
assert model.default_prompt_name == "query"
with tempfile.TemporaryDirectory() as tmp_folder:
model_path = Path(tmp_folder) / "tiny_model_local"
model.save(str(model_path))
config_path = model_path / "config_sentence_transformers.json"
assert config_path.exists()
with open(config_path, "r", encoding="utf8") as f:
saved_config = json.load(f)
assert saved_config["prompts"] == {"query": "query: "}
assert saved_config["default_prompt_name"] == "query"
fresh_model = SentenceTransformer(str(model_path))
assert fresh_model.prompts == {"query": "query: "}
assert fresh_model.default_prompt_name == "query"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA must be available to test float16 support.")
def test_encode_fp16() -> None:
tiny_model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
tiny_model.half()
embeddings = tiny_model.encode(["Hello there!"], convert_to_tensor=True)
assert embeddings.dtype == torch.float16
@pytest.mark.parametrize("convert_to_tensor", [True, False])
@pytest.mark.parametrize("convert_to_numpy", [True, False])
@pytest.mark.parametrize(
("precision", "expected_torch_dtype", "expected_numpy_dtype"),
[
(None, torch.float32, np.float32),
("float32", torch.float32, np.float32),
("int8", torch.int8, np.int8),
("uint8", torch.uint8, np.uint8),
("binary", torch.int8, np.int8),
("ubinary", torch.uint8, np.uint8),
],
)
def test_encode_quantization(
stsb_bert_tiny_model_reused: SentenceTransformer,
convert_to_tensor: bool,
convert_to_numpy: bool,
precision: str,
expected_torch_dtype,
expected_numpy_dtype,
) -> None:
tiny_model = stsb_bert_tiny_model_reused
embeddings = tiny_model.encode(
["One sentence", "Another sentence"],
convert_to_tensor=convert_to_tensor,
convert_to_numpy=convert_to_numpy,
precision=precision,
)
if convert_to_tensor:
assert embeddings[0].dtype == expected_torch_dtype
assert isinstance(embeddings, torch.Tensor)
elif convert_to_numpy:
assert embeddings[0].dtype == expected_numpy_dtype
assert isinstance(embeddings, np.ndarray)
else:
assert embeddings[0].dtype == expected_torch_dtype
assert isinstance(embeddings, list)
@pytest.mark.parametrize("sentences", ("Single sentence", ["One sentence", "Another sentence"]))
@pytest.mark.parametrize("convert_to_tensor", [True, False])
@pytest.mark.parametrize("convert_to_numpy", [True, False])
@pytest.mark.parametrize("normalize_embeddings", [True, False])
@pytest.mark.parametrize("output_value", ["sentence_embedding", None])
def test_encode_truncate(
sentences: Union[str, List[str]],
convert_to_tensor: bool,
convert_to_numpy: bool,
normalize_embeddings: bool,
output_value: Optional[Literal["sentence_embedding"]],
) -> None:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
embeddings_full_unnormalized: torch.Tensor = model.encode(
sentences, convert_to_numpy=False, convert_to_tensor=True
) # These are raw embeddings which serve as the reference to test against
def test(model: SentenceTransformer, expected_dim: int):
outputs = model.encode(
sentences,
output_value=output_value,
convert_to_tensor=convert_to_tensor,
convert_to_numpy=convert_to_numpy,
normalize_embeddings=normalize_embeddings,
)
# Extract the sentence embeddings out of outputs
if output_value is None:
# We get the whole plate
if not isinstance(outputs, List):
embeddings = outputs["sentence_embedding"]
else:
outputs = cast(List[Dict[str, torch.Tensor]], outputs)
# TODO: can overload model.encode if ppl want type checker compatibility
embeddings = [out_features["sentence_embedding"] for out_features in outputs]
else:
embeddings = outputs
# Test shape
if isinstance(embeddings, list): # list of tensors
embeddings_shape = (len(embeddings), embeddings[0].shape[-1])
else:
embeddings_shape = embeddings.shape
expected_shape = (expected_dim,) if isinstance(sentences, str) else (len(sentences), expected_dim)
assert embeddings_shape == expected_shape
assert model.get_sentence_embedding_dimension() == expected_dim
# Convert embeddings to a torch Tensor for ease of testing
if isinstance(embeddings, list):
embeddings = torch.stack(embeddings)
elif isinstance(embeddings, np.ndarray):
embeddings = torch.from_numpy(embeddings).to(embeddings_full_unnormalized.device)
# On a non-cpu device, the device of torch.from_numpy(embeddings) is always CPU
# Test content
if normalize_embeddings:
if output_value is None:
# Currently, normalization is not performed; it's the raw output of the forward pass
pass
else:
normalize = partial(torch.nn.functional.normalize, p=2, dim=-1)
assert torch.allclose(
embeddings,
normalize(util.truncate_embeddings(embeddings_full_unnormalized, expected_dim)),
)
else:
assert torch.allclose(embeddings, util.truncate_embeddings(embeddings_full_unnormalized, expected_dim))
# Test init w/o setting truncate_dim (it's None)
original_output_dim: int = model.get_sentence_embedding_dimension()
test(model, expected_dim=original_output_dim)
# Test init w/ a set truncate_dim
truncate_dim = int(original_output_dim / 4)
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors", truncate_dim=truncate_dim)
test(model, expected_dim=truncate_dim)
# Test setting the attribute after init to a greater dimension
new_truncate_dim = 2 * truncate_dim
model.truncate_dim = new_truncate_dim
test(model, expected_dim=new_truncate_dim)
# Test context manager
final_truncate_dim = int(original_output_dim / 8)
with model.truncate_sentence_embeddings(final_truncate_dim):
test(model, expected_dim=final_truncate_dim)
test(model, expected_dim=new_truncate_dim) # b/c we've exited the context
# Test w/ an ouptut_dim that's larger than the original_output_dim. No truncation ends up happening
model.truncate_dim = 2 * original_output_dim
test(model, expected_dim=original_output_dim)
"""
Tests that the pretrained models produce the correct scores on the STSbenchmark dataset
"""
import csv
import gzip
import os
from typing import Generator, List, Tuple
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import (
SentencesDataset,
SentenceTransformer,
losses,
util,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
@pytest.fixture()
def sts_resource() -> Generator[Tuple[List[InputExample], List[InputExample]], None, None]:
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
stsb_train_samples = []
stsb_test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "test":
stsb_test_samples.append(inp_example)
elif row["split"] == "train":
stsb_train_samples.append(inp_example)
yield stsb_train_samples, stsb_test_samples
@pytest.fixture()
def nli_resource() -> Generator[List[InputExample], None, None]:
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
nli_train_samples = []
max_train_samples = 10000
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
nli_train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(nli_train_samples) >= max_train_samples:
break
yield nli_train_samples
def evaluate_stsb_test(model, expected_score, test_samples) -> None:
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
score = model.evaluate(evaluator) * 100
print("STS-Test Performance: {:.2f} vs. exp: {:.2f}".format(score, expected_score))
assert score > expected_score or abs(score - expected_score) < 0.1
@pytest.mark.slow
def test_train_stsb_slow(
distilbert_base_uncased_model: SentenceTransformer, sts_resource: Tuple[List[InputExample], List[InputExample]]
) -> None:
model = distilbert_base_uncased_model
sts_train_samples, sts_test_samples = sts_resource
train_dataset = SentencesDataset(sts_train_samples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
evaluation_steps=1000,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 80.0, sts_test_samples)
@pytest.mark.skipif("CI" in os.environ, reason="This test is too slow for the CI (~8 minutes)")
def test_train_stsb(
distilbert_base_uncased_model: SentenceTransformer, sts_resource: Tuple[List[InputExample], List[InputExample]]
) -> None:
model = distilbert_base_uncased_model
sts_train_samples, sts_test_samples = sts_resource
train_dataset = SentencesDataset(sts_train_samples[:100], model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
evaluation_steps=1000,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 60.0, sts_test_samples)
@pytest.mark.slow
def test_train_nli_slow(
distilbert_base_uncased_model: SentenceTransformer,
nli_resource: List[InputExample],
sts_resource: Tuple[List[InputExample], List[InputExample]],
):
model = distilbert_base_uncased_model
_, sts_test_samples = sts_resource
train_dataset = SentencesDataset(nli_resource, model=model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 50.0, sts_test_samples)
@pytest.mark.skipif("CI" in os.environ, reason="This test is too slow for the CI (~25 minutes)")
def test_train_nli(
distilbert_base_uncased_model: SentenceTransformer,
nli_resource: List[InputExample],
sts_resource: Tuple[List[InputExample], List[InputExample]],
):
model = distilbert_base_uncased_model
_, sts_test_samples = sts_resource
train_dataset = SentencesDataset(nli_resource[:100], model=model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 50.0, sts_test_samples)
import numpy as np
import sklearn
import torch
from sentence_transformers import SentenceTransformer, util
def test_normalize_embeddings() -> None:
"""Tests the correct computation of util.normalize_embeddings"""
embedding_size = 100
a = torch.tensor(np.random.randn(50, embedding_size))
a_norm = util.normalize_embeddings(a)
for embedding in a_norm:
assert len(embedding) == embedding_size
emb_norm = torch.norm(embedding)
assert abs(emb_norm.item() - 1) < 0.0001
def test_pytorch_cos_sim() -> None:
"""Tests the correct computation of util.pytorch_cos_scores"""
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
sklearn_pairwise = sklearn.metrics.pairwise.cosine_similarity(a, b)
pytorch_cos_scores = util.pytorch_cos_sim(a, b).numpy()
for i in range(len(sklearn_pairwise)):
for j in range(len(sklearn_pairwise[i])):
assert abs(sklearn_pairwise[i][j] - pytorch_cos_scores[i][j]) < 0.001
def test_semantic_search() -> None:
"""Tests util.semantic_search function"""
num_queries = 20
num_k = 10
doc_emb = torch.tensor(np.random.randn(1000, 100))
q_emb = torch.tensor(np.random.randn(num_queries, 100))
hits = util.semantic_search(q_emb, doc_emb, top_k=num_k, query_chunk_size=5, corpus_chunk_size=17)
assert len(hits) == num_queries
assert len(hits[0]) == num_k
# Sanity Check of the results
cos_scores = util.pytorch_cos_sim(q_emb, doc_emb)
cos_scores_values, cos_scores_idx = cos_scores.topk(num_k)
cos_scores_values = cos_scores_values.cpu().tolist()
cos_scores_idx = cos_scores_idx.cpu().tolist()
for qid in range(num_queries):
for hit_num in range(num_k):
assert hits[qid][hit_num]["corpus_id"] == cos_scores_idx[qid][hit_num]
assert np.abs(hits[qid][hit_num]["score"] - cos_scores_values[qid][hit_num]) < 0.001
def test_paraphrase_mining() -> None:
model = SentenceTransformer("all-MiniLM-L6-v2")
sentences = [
"This is a test",
"This is a test!",
"The cat sits on mat",
"The cat sits on the mat",
"On the mat a cat sits",
"A man eats pasta",
"A woman eats pasta",
"A man eats spaghetti",
]
duplicates = util.paraphrase_mining(model, sentences)
for score, a, b in duplicates:
if score > 0.5:
assert (a, b) in [(0, 1), (2, 3), (2, 4), (3, 4), (5, 6), (5, 7), (6, 7)]
def test_pairwise_scores() -> None:
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
# Pairwise cos
sklearn_pairwise = 1 - sklearn.metrics.pairwise.paired_cosine_distances(a, b)
pytorch_cos_scores = util.pairwise_cos_sim(a, b).numpy()
assert np.allclose(sklearn_pairwise, pytorch_cos_scores)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment