test_embedding_models.py 3.41 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
14

15
import multiprocessing as mp
16
17
18
import unittest

import torch
19
from transformers import AutoConfig, AutoTokenizer
20
21

from sglang.test.runners import DEFAULT_PROMPTS, HFRunner, SRTRunner
22
from sglang.test.test_utils import get_similarities
23

24
25
26
MODELS = [
    ("Alibaba-NLP/gte-Qwen2-1.5B-instruct", 1, 1e-5),
    ("intfloat/e5-mistral-7b-instruct", 1, 1e-5),
27
    ("marco/mcdse-2b-v1", 1, 1e-5),
28
]
29
30
31
TORCH_DTYPES = [torch.float16]


32
class TestEmbeddingModels(unittest.TestCase):
33

34
35
36
37
    @classmethod
    def setUpClass(cls):
        mp.set_start_method("spawn", force=True)

38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
    def _truncate_prompts(self, prompts, model_path):
        config = AutoConfig.from_pretrained(model_path)
        max_length = getattr(config, "max_position_embeddings", 2048)

        tokenizer = AutoTokenizer.from_pretrained(model_path)

        truncated_prompts = []
        for prompt in prompts:
            tokens = tokenizer(prompt, return_tensors="pt", truncation=False)
            if len(tokens.input_ids[0]) > max_length:
                truncated_text = tokenizer.decode(
                    tokens.input_ids[0][: max_length - 1], skip_special_tokens=True
                )
                truncated_prompts.append(truncated_text)
            else:
                truncated_prompts.append(prompt)
        return truncated_prompts

56
57
58
59
60
61
    def assert_close_prefill_logits(
        self,
        prompts,
        model_path,
        tp_size,
        torch_dtype,
62
        prefill_tolerance,
63
    ) -> None:
64
65
        truncated_prompts = self._truncate_prompts(prompts, model_path)

66
        with HFRunner(
67
68
69
            model_path,
            torch_dtype=torch_dtype,
            model_type="embedding",
70
        ) as hf_runner:
71
            hf_outputs = hf_runner.forward(truncated_prompts)
72
73
74
75
76

        with SRTRunner(
            model_path,
            tp_size=tp_size,
            torch_dtype=torch_dtype,
77
            model_type="embedding",
78
        ) as srt_runner:
79
            srt_outputs = srt_runner.forward(truncated_prompts)
80
81

        for i in range(len(prompts)):
82
83
84
            hf_logits = torch.Tensor(hf_outputs.embed_logits[i])
            srt_logits = torch.Tensor(srt_outputs.embed_logits[i])

85
86
            similarity = torch.tensor(get_similarities(hf_logits, srt_logits))
            print("similarity diff", abs(similarity - 1))
87

88
            if len(prompts[i]) <= 1000:
89
90
91
                assert torch.all(
                    abs(similarity - 1) < prefill_tolerance
                ), "embeddings are not all close"
92
93

    def test_prefill_logits(self):
94
        for model, tp_size, prefill_tolerance in MODELS:
95
96
            for torch_dtype in TORCH_DTYPES:
                self.assert_close_prefill_logits(
97
                    DEFAULT_PROMPTS, model, tp_size, torch_dtype, prefill_tolerance
98
99
100
101
                )


if __name__ == "__main__":
Mingyi's avatar
Mingyi committed
102
    unittest.main()