test_embedding_models.py 3.66 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
14

15
import multiprocessing as mp
16
import random
17
18
19
import unittest

import torch
20
from transformers import AutoConfig, AutoTokenizer
21
22

from sglang.test.runners import DEFAULT_PROMPTS, HFRunner, SRTRunner
23
from sglang.test.test_utils import CustomTestCase, get_similarities, is_in_ci
24

25
26
27
MODELS = [
    ("Alibaba-NLP/gte-Qwen2-1.5B-instruct", 1, 1e-5),
    ("intfloat/e5-mistral-7b-instruct", 1, 1e-5),
28
    ("marco/mcdse-2b-v1", 1, 1e-5),
29
30
    # Temporarily disable before this model is fixed
    # ("jason9693/Qwen2.5-1.5B-apeach", 1, 1e-5),
31
]
32
33
34
TORCH_DTYPES = [torch.float16]


35
class TestEmbeddingModels(CustomTestCase):
36

37
38
39
40
    @classmethod
    def setUpClass(cls):
        mp.set_start_method("spawn", force=True)

41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
    def _truncate_prompts(self, prompts, model_path):
        config = AutoConfig.from_pretrained(model_path)
        max_length = getattr(config, "max_position_embeddings", 2048)

        tokenizer = AutoTokenizer.from_pretrained(model_path)

        truncated_prompts = []
        for prompt in prompts:
            tokens = tokenizer(prompt, return_tensors="pt", truncation=False)
            if len(tokens.input_ids[0]) > max_length:
                truncated_text = tokenizer.decode(
                    tokens.input_ids[0][: max_length - 1], skip_special_tokens=True
                )
                truncated_prompts.append(truncated_text)
            else:
                truncated_prompts.append(prompt)
        return truncated_prompts

59
60
61
62
63
64
    def assert_close_prefill_logits(
        self,
        prompts,
        model_path,
        tp_size,
        torch_dtype,
65
        prefill_tolerance,
66
    ) -> None:
67
68
        truncated_prompts = self._truncate_prompts(prompts, model_path)

69
        with HFRunner(
70
71
72
            model_path,
            torch_dtype=torch_dtype,
            model_type="embedding",
73
        ) as hf_runner:
74
            hf_outputs = hf_runner.forward(truncated_prompts)
75
76
77
78
79

        with SRTRunner(
            model_path,
            tp_size=tp_size,
            torch_dtype=torch_dtype,
80
            model_type="embedding",
81
        ) as srt_runner:
82
            srt_outputs = srt_runner.forward(truncated_prompts)
83
84

        for i in range(len(prompts)):
85
86
87
            hf_logits = torch.Tensor(hf_outputs.embed_logits[i])
            srt_logits = torch.Tensor(srt_outputs.embed_logits[i])

88
89
            similarity = torch.tensor(get_similarities(hf_logits, srt_logits))
            print("similarity diff", abs(similarity - 1))
90

91
            if len(prompts[i]) <= 1000:
92
93
94
                assert torch.all(
                    abs(similarity - 1) < prefill_tolerance
                ), "embeddings are not all close"
95
96

    def test_prefill_logits(self):
97
98
99
100
101
102
        models_to_test = MODELS

        if is_in_ci():
            models_to_test = [random.choice(MODELS)]

        for model, tp_size, prefill_tolerance in models_to_test:
103
104
            for torch_dtype in TORCH_DTYPES:
                self.assert_close_prefill_logits(
105
                    DEFAULT_PROMPTS, model, tp_size, torch_dtype, prefill_tolerance
106
107
108
109
                )


if __name__ == "__main__":
Mingyi's avatar
Mingyi committed
110
    unittest.main()