openai_example_chat.py 1.74 KB
Newer Older
1
2
3
4
5
"""
Usage:
export OPENAI_API_KEY=sk-******
python3 openai_example_chat.py
"""
zhyncs's avatar
zhyncs committed
6

7
import sglang as sgl
Lianmin Zheng's avatar
Lianmin Zheng committed
8
9


10
@sgl.function
Lianmin Zheng's avatar
Lianmin Zheng committed
11
def multi_turn_question(s, question_1, question_2):
12
13
14
15
16
    s += sgl.system("You are a helpful assistant.")
    s += sgl.user(question_1)
    s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
    s += sgl.user(question_2)
    s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
Lianmin Zheng's avatar
Lianmin Zheng committed
17
18


19
20
21
22
23
def single():
    state = multi_turn_question.run(
        question_1="What is the capital of the United States?",
        question_2="List two local attractions.",
    )
Lianmin Zheng's avatar
Lianmin Zheng committed
24

25
26
27
    for m in state.messages():
        print(m["role"], ":", m["content"])

28
    print("\n-- answer_1 --\n", state["answer_1"])
29
30
31
32
33
34


def stream():
    state = multi_turn_question.run(
        question_1="What is the capital of the United States?",
        question_2="List two local attractions.",
zhyncs's avatar
zhyncs committed
35
        stream=True,
36
37
38
39
40
41
42
43
    )

    for out in state.text_iter():
        print(out, end="", flush=True)
    print()


def batch():
zhyncs's avatar
zhyncs committed
44
45
46
47
48
49
50
51
52
53
54
55
    states = multi_turn_question.run_batch(
        [
            {
                "question_1": "What is the capital of the United States?",
                "question_2": "List two local attractions.",
            },
            {
                "question_1": "What is the capital of France?",
                "question_2": "What is the population of this city?",
            },
        ]
    )
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74

    for s in states:
        print(s.messages())


if __name__ == "__main__":
    sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo"))

    # Run a single request
    print("\n========== single ==========\n")
    single()

    # Stream output
    print("\n========== stream ==========\n")
    stream()

    # Run a batch of requests
    print("\n========== batch ==========\n")
    batch()