Commit da2e5d65 authored by Lianmin Zheng's avatar Lianmin Zheng
Browse files

Fix the default argument of OpenAI Chat completion (#605)

parent ce62dc73
...@@ -42,7 +42,6 @@ pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.3/ ...@@ -42,7 +42,6 @@ pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.3/
git clone https://github.com/sgl-project/sglang.git git clone https://github.com/sgl-project/sglang.git
cd sglang cd sglang
pip install --upgrade pip
pip install -e "python[all]" pip install -e "python[all]"
# Install FlashInfer CUDA kernels # Install FlashInfer CUDA kernels
......
...@@ -134,7 +134,7 @@ class ChatCompletionRequest(BaseModel): ...@@ -134,7 +134,7 @@ class ChatCompletionRequest(BaseModel):
logit_bias: Optional[Dict[str, float]] = None logit_bias: Optional[Dict[str, float]] = None
logprobs: Optional[bool] = False logprobs: Optional[bool] = False
top_logprobs: Optional[int] = None top_logprobs: Optional[int] = None
max_tokens: Optional[int] = None max_tokens: Optional[int] = 16
n: Optional[int] = 1 n: Optional[int] = 1
presence_penalty: Optional[float] = 0.0 presence_penalty: Optional[float] = 0.0
response_format: Optional[ResponseFormat] = None response_format: Optional[ResponseFormat] = None
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment