"official/nlp/bert/run_squad.py" did not exist on "5175b7e69c74b13405afa26761d79a21ff1621ef"
test_torch_native_attention_backend.py 1.55 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
"""
Usage:
python3 -m unittest test_triton_attention_backend.TestTritonAttnBackend.test_mmlu
"""

import unittest
from types import SimpleNamespace

from sglang.srt.utils import kill_process_tree
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
    DEFAULT_MODEL_NAME_FOR_TEST,
    DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
    DEFAULT_URL_FOR_TEST,
15
    CustomTestCase,
16
17
18
19
20
21
    is_in_ci,
    popen_launch_server,
    run_bench_one_batch,
)


22
class TestTorchNativeAttnBackend(CustomTestCase):
23
24
25
26
27
28
29
30
    def test_latency(self):
        output_throughput = run_bench_one_batch(
            DEFAULT_MODEL_NAME_FOR_TEST,
            ["--attention-backend", "torch_native"],
        )

        if is_in_ci():
            # Torch native backend is expected to be slower
Lianmin Zheng's avatar
Lianmin Zheng committed
31
            self.assertGreater(output_throughput, 40)
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59

    def test_mmlu(self):
        model = DEFAULT_MODEL_NAME_FOR_TEST
        base_url = DEFAULT_URL_FOR_TEST
        process = popen_launch_server(
            model,
            base_url,
            timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
            other_args=["--attention-backend", "torch_native"],
        )

        try:
            args = SimpleNamespace(
                base_url=base_url,
                model=model,
                eval_name="mmlu",
                num_examples=64,
                num_threads=32,
            )

            metrics = run_eval(args)
            self.assertGreaterEqual(metrics["score"], 0.65)
        finally:
            kill_process_tree(process.pid)


if __name__ == "__main__":
    unittest.main()