Unverified Commit 04eb6062 authored by Jon Durbin's avatar Jon Durbin Committed by GitHub
Browse files

Include context length in /v1/models response. (#4809)

parent e84f4ba0
......@@ -561,7 +561,13 @@ def available_models():
served_model_names = [_global_state.tokenizer_manager.served_model_name]
model_cards = []
for served_model_name in served_model_names:
model_cards.append(ModelCard(id=served_model_name, root=served_model_name))
model_cards.append(
ModelCard(
id=served_model_name,
root=served_model_name,
max_model_len=_global_state.tokenizer_manager.model_config.context_len,
)
)
return ModelList(data=model_cards)
......
......@@ -28,6 +28,7 @@ class ModelCard(BaseModel):
created: int = Field(default_factory=lambda: int(time.time()))
owned_by: str = "sglang"
root: Optional[str] = None
max_model_len: Optional[int] = None
class ModelList(BaseModel):
......
......@@ -542,6 +542,12 @@ The SmartHome Mini is a compact smart home assistant available in black or white
.startswith('"name": "SmartHome Mini",')
)
def test_model_list(self):
client = openai.Client(api_key=self.api_key, base_url=self.base_url)
models = list(client.models.list())
assert len(models) == 1
assert isinstance(getattr(models[0], "max_model_len", None), int)
# -------------------------------------------------------------------------
# EBNF Test Class: TestOpenAIServerEBNF
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment