hidden_states_server.py 2.21 KB
Newer Older
1
2
3
4
5
6
"""
Usage:

python hidden_states_server.py

Note that each time you change the `return_hidden_states` parameter,
7
the cuda graph will be recaptured, which might lead to a performance hit.
8
9
10
11
So avoid getting hidden states and completions alternately.
"""

import requests
12
import torch
13
14

from sglang.test.test_utils import is_in_ci
15
from sglang.utils import terminate_process, wait_for_server
16
17
18
19
20
21
22
23
24
25

if is_in_ci():
    from docs.backend.patch import launch_server_cmd
else:
    from sglang.utils import launch_server_cmd


def main():
    # Launch the server
    server_process, port = launch_server_cmd(
26
        "python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-1.5B-instruct --enable-return-hidden-states --host 0.0.0.0"
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
    )
    wait_for_server(f"http://localhost:{port}")

    prompts = [
        "Hello, my name is",
        "The president of the United States is",
        "The capital of France is",
        "The future of AI is",
    ]

    sampling_params = {
        "temperature": 0.8,
        "top_p": 0.95,
        "max_new_tokens": 10,
    }

    json_data = {
        "text": prompts,
        "sampling_params": sampling_params,
        "return_hidden_states": True,
    }

    response = requests.post(
        f"http://localhost:{port}/generate",
        json=json_data,
    )

54
55
    terminate_process(server_process)

56
57
    outputs = response.json()
    for prompt, output in zip(prompts, outputs):
58
59
60
61
        for i in range(len(output["meta_info"]["hidden_states"])):
            output["meta_info"]["hidden_states"][i] = torch.tensor(
                output["meta_info"]["hidden_states"][i], dtype=torch.bfloat16
            )
62
63
64
65
66
        print("===============================")
        print(
            f"Prompt: {prompt}\n"
            f"Generated text: {output['text']}\n"
            f"Prompt_Tokens: {output['meta_info']['prompt_tokens']}\t"
67
68
69
70
71
72
73
74
            f"Completion_tokens: {output['meta_info']['completion_tokens']}"
        )
        print("Hidden states: ")
        hidden_states = torch.cat(
            [
                i.unsqueeze(0) if len(i.shape) == 1 else i
                for i in output["meta_info"]["hidden_states"]
            ]
75
        )
76
        print(hidden_states)
77
78
79
80
81
        print()


if __name__ == "__main__":
    main()