test_torch_tp.py 627 Bytes
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import unittest

from sglang.test.test_utils import is_in_ci, run_bench_latency


class TestTorchTP(unittest.TestCase):
    def test_torch_native_llama(self):
        output_throughput = run_bench_latency(
            "meta-llama/Meta-Llama-3-8B",
            [
                "--tp",
                "2",
                "--json-model-override-args",
                '{"architectures": ["TorchNativeLlamaForCausalLM"]}',
                "--disable-cuda-graph",
            ],
        )

        if is_in_ci():
            assert output_throughput > 0, f"{output_throughput=}"


if __name__ == "__main__":
    unittest.main()