Unverified Commit bb0501c0 authored by yichuan~'s avatar yichuan~ Committed by GitHub
Browse files

Fix List input bug (#838)

parent 6b0f2e90
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python openai_batch_chat.py
Note: Before running this script,
you should create the input.jsonl file with the following content:
{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world! List 3 NBA players and tell a story"}],"max_tokens": 300}}
{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are an assistant. "},{"role": "user", "content": "Hello world! List three capital and tell a story"}],"max_tokens": 500}}
"""
import json import json
import os import os
import time import time
......
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python openai_batch_complete.py
Note: Before running this script,
you should create the input.jsonl file with the following content:
{"custom_id": "request-1", "method": "POST", "url": "/v1/completions", "body": {"model": "gpt-3.5-turbo-instruct", "prompt": "List 3 names of famous soccer player: ", "max_tokens": 200}}
{"custom_id": "request-2", "method": "POST", "url": "/v1/completions", "body": {"model": "gpt-3.5-turbo-instruct", "prompt": "List 6 names of famous basketball player: ", "max_tokens": 400}}
{"custom_id": "request-3", "method": "POST", "url": "/v1/completions", "body": {"model": "gpt-3.5-turbo-instruct", "prompt": "List 6 names of famous basketball player: ", "max_tokens": 400}}
"""
import json import json
import os import os
import time import time
......
...@@ -17,9 +17,9 @@ print(response) ...@@ -17,9 +17,9 @@ print(response)
response = client.completions.create( response = client.completions.create(
model="default", model="default",
prompt="I am a robot and I want to study like humans. Now let's tell a story. Once upon a time, there was a little", prompt="I am a robot and I want to study like humans. Now let's tell a story. Once upon a time, there was a little",
n=1, n=5,
temperature=0.8, temperature=0.8,
max_tokens=32, max_tokens=320,
) )
print(response) print(response)
...@@ -68,6 +68,22 @@ response = client.completions.create( ...@@ -68,6 +68,22 @@ response = client.completions.create(
print(response) print(response)
response = client.completions.create(
model="default",
prompt=[
"prompt1: I am a robot and I want to learn like humans. Now let's begin a tale. Once upon a time, there was a small",
"prompt2: As a robot, my goal is to understand human learning. Let's start a story. In a faraway land, there lived a tiny",
"prompt3: Being a robot, I aspire to study like people. Let's share a story. Long ago, there was a little",
"prompt4: I am a robot aiming to learn like humans. Let's narrate a story. Once, in a distant kingdom, there was a young",
"prompt5: As a robot, I seek to learn in human ways. Let's tell a story. Once upon a time, in a small village, there was a young",
],
n=1,
temperature=0.8,
max_tokens=320,
)
print(response)
# Text completion # Text completion
response = client.completions.create( response = client.completions.create(
model="default", model="default",
......
...@@ -361,7 +361,7 @@ def v1_generate_request(all_requests): ...@@ -361,7 +361,7 @@ def v1_generate_request(all_requests):
if len(all_requests) == 1: if len(all_requests) == 1:
prompt = prompts[0] prompt = prompts[0]
sampling_params_list = sampling_params_list[0] sampling_params_list = sampling_params_list[0]
if isinstance(prompts, str) or isinstance(prompts[0], str): if isinstance(prompt, str) or isinstance(prompt[0], str):
prompt_kwargs = {"text": prompt} prompt_kwargs = {"text": prompt}
else: else:
prompt_kwargs = {"input_ids": prompt} prompt_kwargs = {"input_ids": prompt}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment