[build-system] requires = ["setuptools>=61.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "sglang" version = "0.3.4.post2" description = "SGLang is yet another fast serving framework for large language models and vision language models." readme = "README.md" requires-python = ">=3.8" license = { file = "LICENSE" } classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", ] dependencies = ["requests", "tqdm", "numpy"] [project.optional-dependencies] runtime_common = ["aiohttp", "decord", "fastapi", "hf_transfer", "huggingface_hub", "interegular", "orjson", "packaging", "pillow", "psutil", "pydantic", "python-multipart", "torchao", "uvicorn", "uvloop", "zmq", "outlines>=0.0.44", "modelscope"] # xpu is not enabled in public vllm and torch whl, # need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm srt = ["sglang[runtime_common]", "torch", "vllm==0.6.3.post1"] srt_xpu = ["sglang[runtime_common]"] openai = ["openai>=1.0", "tiktoken"] anthropic = ["anthropic>=0.20.0"] litellm = ["litellm>=1.0.0"] test = [ "jsonlines", "matplotlib", "pandas", "sentence_transformers", "accelerate", "peft", ] all = ["sglang[srt]", "sglang[openai]", "sglang[anthropic]", "sglang[litellm]"] all_xpu = ["sglang[srt_xpu]", "sglang[openai]", "sglang[anthropic]", "sglang[litellm]"] dev = ["sglang[all]", "sglang[test]"] dev_xpu = ["sglang[all_xpu]", "sglang[test]"] [project.urls] "Homepage" = "https://github.com/sgl-project/sglang" "Bug Tracker" = "https://github.com/sgl-project/sglang/issues" [tool.setuptools.packages.find] exclude = [ "assets*", "benchmark*", "docs*", "dist*", "playground*", "scripts*", "tests*", ] [tool.wheel] exclude = [ "assets*", "benchmark*", "docs*", "dist*", "playground*", "scripts*", "tests*", ]