openai-client.py 2.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import openai
import time
import threading
import queue
from concurrent.futures import ThreadPoolExecutor, as_completed

def jls_extract_def(model, messages, temperature, max_length, stream, index):
    openai.api_base = "http://127.0.0.1:8100/v1"
    openai.api_key = "none"
    output_tokens = 0
    ret = ""
    
    t0 = time.time()
    result = openai.ChatCompletion.create(model=model,messages=messages, temperature=temperature, max_length=max_length, stream=stream)

    for chunk in result:
        # print(chunk)
        output_tokens += 1
        if hasattr(chunk.choices[0].delta, "content"):
            if (index == 0):
                print(chunk.choices[0].delta.content, end="", flush=True)
            ret += chunk.choices[0].delta.content
    t1 = time.time()
    # print("\ntoken/s: {:.2f}, output_tokens: {}".format(output_tokens/(t1-t0),output_tokens))  
    result = output_tokens, ret, output_tokens/(t1-t0)

    return result

if __name__ == "__main__":
    prompt = "满江红全文"
    concurrencys = [1]
    
    temperature = 0.1
    max_length = 4096
    stream = True
    
    prompts = [prompt] 
    model="chatglm3-6b-fastllm"
    messages=[{"role": "user", "content": "你好"}]
    
    pool = ThreadPoolExecutor(max_workers=32)

    for i in range(len(concurrencys)):
        cur_prompts = prompts * concurrencys[i]
        token_count = 0
        threads = []
        t0 = time.time()
        for index, prompt in enumerate(cur_prompts):
            messages[0]["content"] = prompt

            t = pool.submit(jls_extract_def, model, messages, temperature, max_length, stream, index)
            t.index = index
            threads.append(t)

        for future in as_completed(threads):
            result = future.result()
            print(future.index)
            print(result)
            print("\n")
            token_count += result[0]

        t1 = time.time()

        print("\n---------------------------------------------\n")
        print("\nconcurrency: {}".format(concurrencys[i]))
        print("\ntotal use: {:.2f}".format(t1-t0))
        print("\ntoken/s: {:.2f}, token_count: {}".format(token_count/(t1-t0),token_count))  
        print("\n---------------------------------------------\n")