Commit 24db6dab authored by Rayyyyy's avatar Rayyyyy
Browse files

first add

parents
Pipeline #850 failed with stages
in 0 seconds
"""
Computes embeddings
"""
import numpy as np
import pytest
from typing import Optional
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_tiny_model: SentenceTransformer, normalize_embeddings: bool, prompt_name: Optional[str]
) -> None:
model = stsb_bert_tiny_model
model.prompts = {"retrieval": "Represent this sentence for searching relevant passages: "}
sentences = ["This is sentence {}".format(i) for i in range(40)]
# Start the multi-process pool on e.g. two CPU devices & compute the embeddings using the pool
pool = model.start_multi_process_pool(["cpu", "cpu"])
emb = model.encode_multi_process(
sentences, pool, chunk_size=10, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name
)
model.stop_multi_process_pool(pool)
assert emb.shape == (len(sentences), 128)
# Make sure the embeddings aren't just all 0
assert emb.sum() != 0.0
# Compare against normal embeddings
emb_normal = model.encode(sentences, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name)
diff = np.max(np.abs(emb - emb_normal))
assert diff < 0.001
# Ensure that after normalizing, the means are all almost 0, and otherwise not
assert np.all(np.abs(emb.mean(1)) < 0.01) == normalize_embeddings
"""
Tests that the pretrained models produce the correct scores on the STSbenchmark dataset
"""
import csv
import gzip
import os
from functools import partial
from typing import Optional
import pytest
from sentence_transformers import InputExample, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
def pretrained_model_score(
model_name, expected_score: float, max_test_samples: int = 100, cache_dir: Optional[str] = None
) -> None:
model = SentenceTransformer(model_name, cache_folder=cache_dir)
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "test":
test_samples.append(inp_example)
if max_test_samples != -1 and len(test_samples) >= max_test_samples:
break
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
score = model.evaluate(evaluator) * 100
print(model_name, "{:.2f} vs. exp: {:.2f}".format(score, expected_score))
assert score > expected_score or abs(score - expected_score) < 0.1
pretrained_model_score = partial(pretrained_model_score, max_test_samples=100)
pretrained_model_score_slow = partial(pretrained_model_score, max_test_samples=-1)
@pytest.mark.slow
def test_bert_base_slow() -> None:
pretrained_model_score_slow("bert-base-nli-mean-tokens", 77.12)
pretrained_model_score_slow("bert-base-nli-max-tokens", 77.21)
pretrained_model_score_slow("bert-base-nli-cls-token", 76.30)
pretrained_model_score_slow("bert-base-nli-stsb-mean-tokens", 85.14)
@pytest.mark.slow
def test_bert_large_slow() -> None:
pretrained_model_score_slow("bert-large-nli-mean-tokens", 79.19)
pretrained_model_score_slow("bert-large-nli-max-tokens", 78.41)
pretrained_model_score_slow("bert-large-nli-cls-token", 78.29)
pretrained_model_score_slow("bert-large-nli-stsb-mean-tokens", 85.29)
@pytest.mark.slow
def test_roberta_slow() -> None:
pretrained_model_score_slow("roberta-base-nli-mean-tokens", 77.49)
pretrained_model_score_slow("roberta-large-nli-mean-tokens", 78.69)
pretrained_model_score_slow("roberta-base-nli-stsb-mean-tokens", 85.30)
pretrained_model_score_slow("roberta-large-nli-stsb-mean-tokens", 86.39)
@pytest.mark.slow
def test_distilbert_slow() -> None:
pretrained_model_score_slow("distilbert-base-nli-mean-tokens", 78.69)
pretrained_model_score_slow("distilbert-base-nli-stsb-mean-tokens", 85.16)
pretrained_model_score_slow("paraphrase-distilroberta-base-v1", 81.81)
@pytest.mark.slow
def test_multiling_slow() -> None:
pretrained_model_score_slow("distiluse-base-multilingual-cased", 80.75)
pretrained_model_score_slow("paraphrase-xlm-r-multilingual-v1", 83.50)
pretrained_model_score_slow("paraphrase-multilingual-MiniLM-L12-v2", 84.42)
@pytest.mark.slow
def test_mpnet_slow() -> None:
pretrained_model_score_slow("paraphrase-mpnet-base-v2", 86.99)
@pytest.mark.slow
def test_other_models_slow() -> None:
pretrained_model_score_slow("average_word_embeddings_komninos", 61.56)
@pytest.mark.slow
def test_msmarco_slow() -> None:
pretrained_model_score_slow("msmarco-roberta-base-ance-firstp", 77.0)
pretrained_model_score_slow("msmarco-distilbert-base-v3", 78.85)
@pytest.mark.slow
def test_sentence_t5_slow() -> None:
pretrained_model_score_slow("sentence-t5-base", 85.52)
def test_bert_base(cache_dir) -> None:
pretrained_model_score("bert-base-nli-mean-tokens", 86.53, cache_dir=cache_dir)
pretrained_model_score("bert-base-nli-max-tokens", 87.00, cache_dir=cache_dir)
pretrained_model_score("bert-base-nli-cls-token", 85.93, cache_dir=cache_dir)
pretrained_model_score("bert-base-nli-stsb-mean-tokens", 89.26, cache_dir=cache_dir)
def test_bert_large(cache_dir) -> None:
pretrained_model_score("bert-large-nli-mean-tokens", 90.06, cache_dir=cache_dir)
pretrained_model_score("bert-large-nli-max-tokens", 90.15, cache_dir=cache_dir)
pretrained_model_score("bert-large-nli-cls-token", 89.51, cache_dir=cache_dir)
pretrained_model_score("bert-large-nli-stsb-mean-tokens", 92.27, cache_dir=cache_dir)
def test_roberta(cache_dir) -> None:
pretrained_model_score("roberta-base-nli-mean-tokens", 87.91, cache_dir=cache_dir)
pretrained_model_score("roberta-large-nli-mean-tokens", 89.41, cache_dir=cache_dir)
pretrained_model_score("roberta-base-nli-stsb-mean-tokens", 93.39, cache_dir=cache_dir)
pretrained_model_score("roberta-large-nli-stsb-mean-tokens", 91.26, cache_dir=cache_dir)
def test_distilbert(cache_dir) -> None:
pretrained_model_score("distilbert-base-nli-mean-tokens", 88.83, cache_dir=cache_dir)
pretrained_model_score("distilbert-base-nli-stsb-mean-tokens", 91.01, cache_dir=cache_dir)
pretrained_model_score("paraphrase-distilroberta-base-v1", 90.89, cache_dir=cache_dir)
def test_multiling(cache_dir) -> None:
pretrained_model_score("distiluse-base-multilingual-cased", 88.79, cache_dir=cache_dir)
pretrained_model_score("paraphrase-xlm-r-multilingual-v1", 92.76, cache_dir=cache_dir)
pretrained_model_score("paraphrase-multilingual-MiniLM-L12-v2", 92.64, cache_dir=cache_dir)
def test_mpnet(cache_dir) -> None:
pretrained_model_score("paraphrase-mpnet-base-v2", 92.83, cache_dir=cache_dir)
def test_other_models(cache_dir) -> None:
pretrained_model_score("average_word_embeddings_komninos", 68.97, cache_dir=cache_dir)
def test_msmarco(cache_dir) -> None:
pretrained_model_score("msmarco-roberta-base-ance-firstp", 83.61, cache_dir=cache_dir)
pretrained_model_score("msmarco-distilbert-base-v3", 87.96, cache_dir=cache_dir)
def test_sentence_t5(cache_dir) -> None:
pretrained_model_score("sentence-t5-base", 92.75, cache_dir=cache_dir)
"""
Tests general behaviour of the SentenceTransformer class
"""
from functools import partial
import json
import logging
import os
from pathlib import Path
import re
import tempfile
from typing import Dict, List, Literal, Optional, Union, cast
import numpy as np
import pytest
from huggingface_hub import HfApi, RepoUrl, GitRefs, GitRefInfo
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Normalize, Transformer, Pooling
from sentence_transformers import util
def test_load_with_safetensors() -> None:
with tempfile.TemporaryDirectory() as cache_folder:
safetensors_model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
cache_folder=cache_folder,
)
# Only the safetensors file must be loaded
pytorch_files = list(Path(cache_folder).glob("**/pytorch_model.bin"))
assert 0 == len(pytorch_files), "PyTorch model file must not be downloaded."
safetensors_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(safetensors_files), "Safetensors model file must be downloaded."
with tempfile.TemporaryDirectory() as cache_folder:
transformer = Transformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
cache_dir=cache_folder,
model_args={"use_safetensors": False},
)
pooling = Pooling(transformer.get_word_embedding_dimension())
pytorch_model = SentenceTransformer(modules=[transformer, pooling])
# Only the pytorch file must be loaded
pytorch_files = list(Path(cache_folder).glob("**/pytorch_model.bin"))
assert 1 == len(pytorch_files), "PyTorch model file must be downloaded."
safetensors_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 0 == len(safetensors_files), "Safetensors model file must not be downloaded."
sentences = ["This is a test sentence", "This is another test sentence"]
assert torch.equal(
safetensors_model.encode(sentences, convert_to_tensor=True),
pytorch_model.encode(sentences, convert_to_tensor=True),
), "Ensure that Safetensors and PyTorch loaded models result in identical embeddings"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA must be available to test moving devices effectively.")
def test_to() -> None:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors", device="cpu")
test_device = torch.device("cuda")
assert model.device.type == "cpu"
assert test_device.type == "cuda"
model.to(test_device)
assert model.device.type == "cuda", "The model device should have updated"
model.encode("Test sentence")
assert model.device.type == "cuda", "Encoding shouldn't change the device"
assert model._target_device == model.device, "Prevent backwards compatibility failure for _target_device"
model._target_device = "cpu"
assert model.device.type == "cpu", "Ensure that setting `_target_device` doesn't crash."
def test_push_to_hub(monkeypatch: pytest.MonkeyPatch, caplog: pytest.LogCaptureFixture) -> None:
def mock_create_repo(self, repo_id, **kwargs):
return RepoUrl(f"https://huggingface.co/{repo_id}")
mock_upload_folder_kwargs = {}
def mock_upload_folder(self, **kwargs):
nonlocal mock_upload_folder_kwargs
mock_upload_folder_kwargs = kwargs
def mock_list_repo_refs(self, repo_id=None, **kwargs):
try:
git_ref_info = GitRefInfo(name="main", ref="refs/heads/main", target_commit="123456")
except TypeError:
git_ref_info = GitRefInfo(dict(name="main", ref="refs/heads/main", targetCommit="123456"))
# workaround for https://github.com/huggingface/huggingface_hub/issues/1956
git_ref_kwargs = {"branches": [git_ref_info], "converts": [], "tags": [], "pull_requests": None}
try:
return GitRefs(**git_ref_kwargs)
except TypeError:
git_ref_kwargs.pop("pull_requests")
return GitRefs(**git_ref_kwargs)
monkeypatch.setattr(HfApi, "create_repo", mock_create_repo)
monkeypatch.setattr(HfApi, "upload_folder", mock_upload_folder)
monkeypatch.setattr(HfApi, "list_repo_refs", mock_list_repo_refs)
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
url = model.push_to_hub("sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
mock_upload_folder_kwargs.clear()
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub("sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
mock_upload_folder_kwargs.clear()
assert len(caplog.record_tuples) == 1
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
with pytest.raises(
ValueError, match="Providing an `organization` to `save_to_hub` is deprecated, please only use `repo_id`."
):
model.save_to_hub("sentence-transformers-testing/stsb-bert-tiny-safetensors", organization="unrelated")
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", organization="sentence-transformers-testing"
)
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
assert (
caplog.record_tuples[1][2]
== 'Providing an `organization` to `save_to_hub` is deprecated, please only use `repo_id="sentence-transformers-testing/stsb-bert-tiny-safetensors"` instead.'
)
mock_upload_folder_kwargs.clear()
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub("stsb-bert-tiny-safetensors", organization="sentence-transformers-testing")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
assert (
caplog.record_tuples[1][2]
== 'Providing an `organization` to `save_to_hub` is deprecated, please use `repo_id="sentence-transformers-testing/stsb-bert-tiny-safetensors"` instead.'
)
mock_upload_folder_kwargs.clear()
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", local_model_path="my_fake_local_model_path"
)
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert mock_upload_folder_kwargs["folder_path"] == "my_fake_local_model_path"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 1
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
mock_upload_folder_kwargs.clear()
# Incorrect usage: Using deprecated "repo_name" positional argument
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(repo_name="sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "Providing a `repo_name` keyword argument to `save_to_hub` is deprecated, please use `repo_id` instead."
)
assert (
caplog.record_tuples[1][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
mock_upload_folder_kwargs.clear()
# Incorrect usage: Use positional arguments from before "token" was introduced
caplog.clear()
with caplog.at_level(logging.WARNING):
url = model.save_to_hub(
"stsb-bert-tiny-safetensors", # repo_name
"sentence-transformers-testing", # organization
True, # private
commit_message="Adding new awesome Model!",
exist_ok=True,
)
assert mock_upload_folder_kwargs["repo_id"] == "sentence-transformers-testing/stsb-bert-tiny-safetensors"
assert mock_upload_folder_kwargs["commit_message"] == "Adding new awesome Model!"
assert url == "https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors/commit/123456"
assert len(caplog.record_tuples) == 2
assert (
caplog.record_tuples[0][2]
== "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers. Please use `push_to_hub` instead for future model uploads."
)
assert (
caplog.record_tuples[1][2]
== 'Providing an `organization` to `save_to_hub` is deprecated, please use `repo_id="sentence-transformers-testing/stsb-bert-tiny-safetensors"` instead.'
)
@pytest.mark.parametrize("safe_serialization", [True, False, None])
def test_safe_serialization(safe_serialization: bool) -> None:
with tempfile.TemporaryDirectory() as cache_folder:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
if safe_serialization:
model.save(cache_folder, safe_serialization=safe_serialization)
model_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(model_files)
elif safe_serialization is None:
model.save(cache_folder)
model_files = list(Path(cache_folder).glob("**/model.safetensors"))
assert 1 == len(model_files)
else:
model.save(cache_folder, safe_serialization=safe_serialization)
model_files = list(Path(cache_folder).glob("**/pytorch_model.bin"))
assert 1 == len(model_files)
def test_load_with_revision() -> None:
main_model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors", revision="main")
latest_model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", revision="f3cb857cba53019a20df283396bcca179cf051a4"
)
older_model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors", revision="ba33022fdf0b0fc2643263f0726f44d0a07d0e24"
)
test_sentence = ["Hello there!"]
main_embeddings = main_model.encode(test_sentence, convert_to_tensor=True)
assert torch.equal(main_embeddings, latest_model.encode(test_sentence, convert_to_tensor=True))
assert not torch.equal(main_embeddings, older_model.encode(test_sentence, convert_to_tensor=True))
def test_load_local_without_normalize_directory() -> None:
tiny_model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
tiny_model.add_module("Normalize", Normalize())
with tempfile.TemporaryDirectory() as tmp_folder:
model_path = Path(tmp_folder) / "tiny_model_local"
tiny_model.save(str(model_path))
assert (model_path / "2_Normalize").exists()
os.rmdir(model_path / "2_Normalize")
assert not (model_path / "2_Normalize").exists()
# This fails in v2.3.0
fresh_tiny_model = SentenceTransformer(str(model_path))
assert isinstance(fresh_tiny_model, SentenceTransformer)
def test_prompts(caplog: pytest.LogCaptureFixture) -> None:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
assert model.prompts == {}
assert model.default_prompt_name is None
texts = ["How to bake a chocolate cake", "Symptoms of the flu"]
no_prompt_embedding = model.encode(texts)
prompt_embedding = model.encode([f"query: {text}" for text in texts])
assert not np.array_equal(no_prompt_embedding, prompt_embedding)
for query in ["query: ", "query:", "query: "]:
# Test prompt="... {}"
model.prompts = {}
assert np.array_equal(model.encode(texts, prompt=query), prompt_embedding)
# Test prompt_name="..."
model.prompts = {"query": query}
assert np.array_equal(model.encode(texts, prompt_name="query"), prompt_embedding)
caplog.clear()
# Test prompt_name="..." & prompt="..."
with caplog.at_level(logging.WARNING):
assert np.array_equal(model.encode(texts, prompt=query, prompt_name="query"), prompt_embedding)
assert len(caplog.record_tuples) == 1
assert (
caplog.record_tuples[0][2]
== "Encode with either a `prompt`, a `prompt_name`, or neither, but not both. "
"Ignoring the `prompt_name` in favor of `prompt`."
)
with pytest.raises(
ValueError,
match=re.escape(
"Prompt name 'invalid_prompt_name' not found in the configured prompts dictionary with keys ['query']."
),
):
model.encode(texts, prompt_name="invalid_prompt_name")
def test_save_load_prompts() -> None:
with pytest.raises(
ValueError,
match=re.escape(
"Default prompt name 'invalid_prompt_name' not found in the configured prompts dictionary with keys ['query']."
),
):
model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
prompts={"query": "query: "},
default_prompt_name="invalid_prompt_name",
)
model = SentenceTransformer(
"sentence-transformers-testing/stsb-bert-tiny-safetensors",
prompts={"query": "query: "},
default_prompt_name="query",
)
assert model.prompts == {"query": "query: "}
assert model.default_prompt_name == "query"
with tempfile.TemporaryDirectory() as tmp_folder:
model_path = Path(tmp_folder) / "tiny_model_local"
model.save(str(model_path))
config_path = model_path / "config_sentence_transformers.json"
assert config_path.exists()
with open(config_path, "r", encoding="utf8") as f:
saved_config = json.load(f)
assert saved_config["prompts"] == {"query": "query: "}
assert saved_config["default_prompt_name"] == "query"
fresh_model = SentenceTransformer(str(model_path))
assert fresh_model.prompts == {"query": "query: "}
assert fresh_model.default_prompt_name == "query"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA must be available to test float16 support.")
def test_encode_fp16() -> None:
tiny_model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
tiny_model.half()
embeddings = tiny_model.encode(["Hello there!"], convert_to_tensor=True)
assert embeddings.dtype == torch.float16
@pytest.mark.parametrize("convert_to_tensor", [True, False])
@pytest.mark.parametrize("convert_to_numpy", [True, False])
@pytest.mark.parametrize(
("precision", "expected_torch_dtype", "expected_numpy_dtype"),
[
(None, torch.float32, np.float32),
("float32", torch.float32, np.float32),
("int8", torch.int8, np.int8),
("uint8", torch.uint8, np.uint8),
("binary", torch.int8, np.int8),
("ubinary", torch.uint8, np.uint8),
],
)
def test_encode_quantization(
stsb_bert_tiny_model_reused: SentenceTransformer,
convert_to_tensor: bool,
convert_to_numpy: bool,
precision: str,
expected_torch_dtype,
expected_numpy_dtype,
) -> None:
tiny_model = stsb_bert_tiny_model_reused
embeddings = tiny_model.encode(
["One sentence", "Another sentence"],
convert_to_tensor=convert_to_tensor,
convert_to_numpy=convert_to_numpy,
precision=precision,
)
if convert_to_tensor:
assert embeddings[0].dtype == expected_torch_dtype
assert isinstance(embeddings, torch.Tensor)
elif convert_to_numpy:
assert embeddings[0].dtype == expected_numpy_dtype
assert isinstance(embeddings, np.ndarray)
else:
assert embeddings[0].dtype == expected_torch_dtype
assert isinstance(embeddings, list)
@pytest.mark.parametrize("sentences", ("Single sentence", ["One sentence", "Another sentence"]))
@pytest.mark.parametrize("convert_to_tensor", [True, False])
@pytest.mark.parametrize("convert_to_numpy", [True, False])
@pytest.mark.parametrize("normalize_embeddings", [True, False])
@pytest.mark.parametrize("output_value", ["sentence_embedding", None])
def test_encode_truncate(
sentences: Union[str, List[str]],
convert_to_tensor: bool,
convert_to_numpy: bool,
normalize_embeddings: bool,
output_value: Optional[Literal["sentence_embedding"]],
) -> None:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
embeddings_full_unnormalized: torch.Tensor = model.encode(
sentences, convert_to_numpy=False, convert_to_tensor=True
) # These are raw embeddings which serve as the reference to test against
def test(model: SentenceTransformer, expected_dim: int):
outputs = model.encode(
sentences,
output_value=output_value,
convert_to_tensor=convert_to_tensor,
convert_to_numpy=convert_to_numpy,
normalize_embeddings=normalize_embeddings,
)
# Extract the sentence embeddings out of outputs
if output_value is None:
# We get the whole plate
if not isinstance(outputs, List):
embeddings = outputs["sentence_embedding"]
else:
outputs = cast(List[Dict[str, torch.Tensor]], outputs)
# TODO: can overload model.encode if ppl want type checker compatibility
embeddings = [out_features["sentence_embedding"] for out_features in outputs]
else:
embeddings = outputs
# Test shape
if isinstance(embeddings, list): # list of tensors
embeddings_shape = (len(embeddings), embeddings[0].shape[-1])
else:
embeddings_shape = embeddings.shape
expected_shape = (expected_dim,) if isinstance(sentences, str) else (len(sentences), expected_dim)
assert embeddings_shape == expected_shape
assert model.get_sentence_embedding_dimension() == expected_dim
# Convert embeddings to a torch Tensor for ease of testing
if isinstance(embeddings, list):
embeddings = torch.stack(embeddings)
elif isinstance(embeddings, np.ndarray):
embeddings = torch.from_numpy(embeddings).to(embeddings_full_unnormalized.device)
# On a non-cpu device, the device of torch.from_numpy(embeddings) is always CPU
# Test content
if normalize_embeddings:
if output_value is None:
# Currently, normalization is not performed; it's the raw output of the forward pass
pass
else:
normalize = partial(torch.nn.functional.normalize, p=2, dim=-1)
assert torch.allclose(
embeddings,
normalize(util.truncate_embeddings(embeddings_full_unnormalized, expected_dim)),
)
else:
assert torch.allclose(embeddings, util.truncate_embeddings(embeddings_full_unnormalized, expected_dim))
# Test init w/o setting truncate_dim (it's None)
original_output_dim: int = model.get_sentence_embedding_dimension()
test(model, expected_dim=original_output_dim)
# Test init w/ a set truncate_dim
truncate_dim = int(original_output_dim / 4)
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors", truncate_dim=truncate_dim)
test(model, expected_dim=truncate_dim)
# Test setting the attribute after init to a greater dimension
new_truncate_dim = 2 * truncate_dim
model.truncate_dim = new_truncate_dim
test(model, expected_dim=new_truncate_dim)
# Test context manager
final_truncate_dim = int(original_output_dim / 8)
with model.truncate_sentence_embeddings(final_truncate_dim):
test(model, expected_dim=final_truncate_dim)
test(model, expected_dim=new_truncate_dim) # b/c we've exited the context
# Test w/ an ouptut_dim that's larger than the original_output_dim. No truncation ends up happening
model.truncate_dim = 2 * original_output_dim
test(model, expected_dim=original_output_dim)
"""
Tests that the pretrained models produce the correct scores on the STSbenchmark dataset
"""
import csv
import gzip
import os
from typing import Generator, List, Tuple
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import (
SentencesDataset,
SentenceTransformer,
losses,
util,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
@pytest.fixture()
def sts_resource() -> Generator[Tuple[List[InputExample], List[InputExample]], None, None]:
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
stsb_train_samples = []
stsb_test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "test":
stsb_test_samples.append(inp_example)
elif row["split"] == "train":
stsb_train_samples.append(inp_example)
yield stsb_train_samples, stsb_test_samples
@pytest.fixture()
def nli_resource() -> Generator[List[InputExample], None, None]:
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
nli_train_samples = []
max_train_samples = 10000
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
nli_train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(nli_train_samples) >= max_train_samples:
break
yield nli_train_samples
def evaluate_stsb_test(model, expected_score, test_samples) -> None:
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
score = model.evaluate(evaluator) * 100
print("STS-Test Performance: {:.2f} vs. exp: {:.2f}".format(score, expected_score))
assert score > expected_score or abs(score - expected_score) < 0.1
@pytest.mark.slow
def test_train_stsb_slow(
distilbert_base_uncased_model: SentenceTransformer, sts_resource: Tuple[List[InputExample], List[InputExample]]
) -> None:
model = distilbert_base_uncased_model
sts_train_samples, sts_test_samples = sts_resource
train_dataset = SentencesDataset(sts_train_samples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
evaluation_steps=1000,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 80.0, sts_test_samples)
@pytest.mark.skipif("CI" in os.environ, reason="This test is too slow for the CI (~8 minutes)")
def test_train_stsb(
distilbert_base_uncased_model: SentenceTransformer, sts_resource: Tuple[List[InputExample], List[InputExample]]
) -> None:
model = distilbert_base_uncased_model
sts_train_samples, sts_test_samples = sts_resource
train_dataset = SentencesDataset(sts_train_samples[:100], model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
evaluation_steps=1000,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 60.0, sts_test_samples)
@pytest.mark.slow
def test_train_nli_slow(
distilbert_base_uncased_model: SentenceTransformer,
nli_resource: List[InputExample],
sts_resource: Tuple[List[InputExample], List[InputExample]],
):
model = distilbert_base_uncased_model
_, sts_test_samples = sts_resource
train_dataset = SentencesDataset(nli_resource, model=model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 50.0, sts_test_samples)
@pytest.mark.skipif("CI" in os.environ, reason="This test is too slow for the CI (~25 minutes)")
def test_train_nli(
distilbert_base_uncased_model: SentenceTransformer,
nli_resource: List[InputExample],
sts_resource: Tuple[List[InputExample], List[InputExample]],
):
model = distilbert_base_uncased_model
_, sts_test_samples = sts_resource
train_dataset = SentencesDataset(nli_resource[:100], model=model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=16)
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=None,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
use_amp=True,
)
evaluate_stsb_test(model, 50.0, sts_test_samples)
import numpy as np
import sklearn
import torch
from sentence_transformers import SentenceTransformer, util
def test_normalize_embeddings() -> None:
"""Tests the correct computation of util.normalize_embeddings"""
embedding_size = 100
a = torch.tensor(np.random.randn(50, embedding_size))
a_norm = util.normalize_embeddings(a)
for embedding in a_norm:
assert len(embedding) == embedding_size
emb_norm = torch.norm(embedding)
assert abs(emb_norm.item() - 1) < 0.0001
def test_pytorch_cos_sim() -> None:
"""Tests the correct computation of util.pytorch_cos_scores"""
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
sklearn_pairwise = sklearn.metrics.pairwise.cosine_similarity(a, b)
pytorch_cos_scores = util.pytorch_cos_sim(a, b).numpy()
for i in range(len(sklearn_pairwise)):
for j in range(len(sklearn_pairwise[i])):
assert abs(sklearn_pairwise[i][j] - pytorch_cos_scores[i][j]) < 0.001
def test_semantic_search() -> None:
"""Tests util.semantic_search function"""
num_queries = 20
num_k = 10
doc_emb = torch.tensor(np.random.randn(1000, 100))
q_emb = torch.tensor(np.random.randn(num_queries, 100))
hits = util.semantic_search(q_emb, doc_emb, top_k=num_k, query_chunk_size=5, corpus_chunk_size=17)
assert len(hits) == num_queries
assert len(hits[0]) == num_k
# Sanity Check of the results
cos_scores = util.pytorch_cos_sim(q_emb, doc_emb)
cos_scores_values, cos_scores_idx = cos_scores.topk(num_k)
cos_scores_values = cos_scores_values.cpu().tolist()
cos_scores_idx = cos_scores_idx.cpu().tolist()
for qid in range(num_queries):
for hit_num in range(num_k):
assert hits[qid][hit_num]["corpus_id"] == cos_scores_idx[qid][hit_num]
assert np.abs(hits[qid][hit_num]["score"] - cos_scores_values[qid][hit_num]) < 0.001
def test_paraphrase_mining() -> None:
model = SentenceTransformer("all-MiniLM-L6-v2")
sentences = [
"This is a test",
"This is a test!",
"The cat sits on mat",
"The cat sits on the mat",
"On the mat a cat sits",
"A man eats pasta",
"A woman eats pasta",
"A man eats spaghetti",
]
duplicates = util.paraphrase_mining(model, sentences)
for score, a, b in duplicates:
if score > 0.5:
assert (a, b) in [(0, 1), (2, 3), (2, 4), (3, 4), (5, 6), (5, 7), (6, 7)]
def test_pairwise_scores() -> None:
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
# Pairwise cos
sklearn_pairwise = 1 - sklearn.metrics.pairwise.paired_cosine_distances(a, b)
pytorch_cos_scores = util.pairwise_cos_sim(a, b).numpy()
assert np.allclose(sklearn_pairwise, pytorch_cos_scores)
#!/bin/bash
echo "Export params ..."
export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 自行修改为训练的卡号和数量
export HSA_FORCE_FINE_GRAIN_PCIE=1
export USE_MIOPEN_BATCHNORM=1
echo "Training start ..."
python finetune.py \
--data_path ./datasets/xxx.txt \
--train_batch_size 32 \
--num_epochs 10
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment