Commit a130cf33 authored by zhuwenwen's avatar zhuwenwen
Browse files

Merge tag 'v0.3.3' into vllm-v0.3.2-dtk23.10 and add gfx

parents a2d181be 82091b86
...@@ -72,7 +72,7 @@ html_theme_options = { ...@@ -72,7 +72,7 @@ html_theme_options = {
# Mock out external dependencies here. # Mock out external dependencies here.
autodoc_mock_imports = [ autodoc_mock_imports = [
"torch", "transformers", "psutil", "aioprometheus", "sentencepiece", "torch", "transformers", "psutil", "prometheus_client", "sentencepiece",
"vllm.cuda_utils", "vllm._C" "vllm.cuda_utils", "vllm._C"
] ]
......
...@@ -70,6 +70,7 @@ Documentation ...@@ -70,6 +70,7 @@ Documentation
serving/distributed_serving serving/distributed_serving
serving/run_on_sky serving/run_on_sky
serving/deploying_with_kserve
serving/deploying_with_triton serving/deploying_with_triton
serving/deploying_with_docker serving/deploying_with_docker
serving/serving_with_langchain serving/serving_with_langchain
......
...@@ -58,7 +58,7 @@ LoRA adapted models can also be served with the Open-AI compatible vLLM server. ...@@ -58,7 +58,7 @@ LoRA adapted models can also be served with the Open-AI compatible vLLM server.
.. code-block:: bash .. code-block:: bash
python -m vllm.entrypoints.api_server \ python -m vllm.entrypoints.openai.api_server \
--model meta-llama/Llama-2-7b-hf \ --model meta-llama/Llama-2-7b-hf \
--enable-lora \ --enable-lora \
--lora-modules sql-lora=~/.cache/huggingface/hub/models--yard1--llama-2-7b-sql-lora-test/ --lora-modules sql-lora=~/.cache/huggingface/hub/models--yard1--llama-2-7b-sql-lora-test/
...@@ -89,3 +89,15 @@ with its base model: ...@@ -89,3 +89,15 @@ with its base model:
Requests can specify the LoRA adapter as if it were any other model via the ``model`` request parameter. The requests will be Requests can specify the LoRA adapter as if it were any other model via the ``model`` request parameter. The requests will be
processed according to the server-wide LoRA configuration (i.e. in parallel with base model requests, and potentially other processed according to the server-wide LoRA configuration (i.e. in parallel with base model requests, and potentially other
LoRA adapter requests if they were provided and ``max_loras`` is set high enough). LoRA adapter requests if they were provided and ``max_loras`` is set high enough).
The following is an example request
.. code-block::bash
curl http://localhost:8000/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "sql-lora",
"prompt": "San Francisco is a",
"max_tokens": 7,
"temperature": 0
}' | jq
...@@ -71,6 +71,9 @@ Alongside each architecture, we include some popular models that use it. ...@@ -71,6 +71,9 @@ Alongside each architecture, we include some popular models that use it.
* - :code:`OPTForCausalLM` * - :code:`OPTForCausalLM`
- OPT, OPT-IML - OPT, OPT-IML
- :code:`facebook/opt-66b`, :code:`facebook/opt-iml-max-30b`, etc. - :code:`facebook/opt-66b`, :code:`facebook/opt-iml-max-30b`, etc.
* - :code:`OrionForCausalLM`
- Orion
- :code:`OrionStarAI/Orion-14B-Base`, :code:`OrionStarAI/Orion-14B-Chat`, etc.
* - :code:`PhiForCausalLM` * - :code:`PhiForCausalLM`
- Phi - Phi
- :code:`microsoft/phi-1_5`, :code:`microsoft/phi-2`, etc. - :code:`microsoft/phi-1_5`, :code:`microsoft/phi-2`, etc.
...@@ -80,7 +83,7 @@ Alongside each architecture, we include some popular models that use it. ...@@ -80,7 +83,7 @@ Alongside each architecture, we include some popular models that use it.
* - :code:`Qwen2ForCausalLM` * - :code:`Qwen2ForCausalLM`
- Qwen2 - Qwen2
- :code:`Qwen/Qwen2-beta-7B`, :code:`Qwen/Qwen2-beta-7B-Chat`, etc. - :code:`Qwen/Qwen2-beta-7B`, :code:`Qwen/Qwen2-beta-7B-Chat`, etc.
* - :code:`StableLMEpochForCausalLM` * - :code:`StableLmForCausalLM`
- StableLM - StableLM
- :code:`stabilityai/stablelm-3b-4e1t/` , :code:`stabilityai/stablelm-base-alpha-7b-v2`, etc. - :code:`stabilityai/stablelm-3b-4e1t/` , :code:`stabilityai/stablelm-base-alpha-7b-v2`, etc.
......
.. _deploying_with_kserve:
Deploying with KServe
============================
vLLM can be deployed with `KServe <https://github.com/kserve/kserve>`_ on Kubernetes for highly scalable distributed model serving.
Please see `this guide <https://kserve.github.io/website/latest/modelserving/v1beta1/llm/vllm/>`_ for more details on using vLLM with KServe.
from vllm import LLM, SamplingParams
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
# Create an LLM.
llm = LLM(
model="openlm-research/open_llama_3b",
max_num_seqs=8,
# The max_model_len and block_size arguments are required to be same as max sequence length,
# when targeting neuron device. Currently, this is a known limitation in continuous batching
# support in transformers-neuronx.
# TODO(liangfu): Support paged-attention in transformers-neuronx.
max_model_len=128,
block_size=128,
# The device can be automatically detected when AWS Neuron SDK is installed.
# The device argument can be either unspecified for automated detection, or explicitly assigned.
device="neuron")
# Generate texts from the prompts. The output is a list of RequestOutput objects
# that contain the prompt, generated text, and other information.
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
...@@ -24,6 +24,7 @@ builtin cd "$ROOT" || exit 1 ...@@ -24,6 +24,7 @@ builtin cd "$ROOT" || exit 1
YAPF_VERSION=$(yapf --version | awk '{print $2}') YAPF_VERSION=$(yapf --version | awk '{print $2}')
RUFF_VERSION=$(ruff --version | awk '{print $2}') RUFF_VERSION=$(ruff --version | awk '{print $2}')
MYPY_VERSION=$(mypy --version | awk '{print $2}') MYPY_VERSION=$(mypy --version | awk '{print $2}')
CODESPELL_VERSION=$(codespell --version)
# # params: tool name, tool version, required version # # params: tool name, tool version, required version
tool_version_check() { tool_version_check() {
...@@ -36,6 +37,7 @@ tool_version_check() { ...@@ -36,6 +37,7 @@ tool_version_check() {
tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-dev.txt | cut -d'=' -f3)" tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-dev.txt | cut -d'=' -f3)" tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "mypy" "$MYPY_VERSION" "$(grep mypy requirements-dev.txt | cut -d'=' -f3)" tool_version_check "mypy" "$MYPY_VERSION" "$(grep mypy requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "codespell" "$CODESPELL_VERSION" "$(grep codespell requirements-dev.txt | cut -d'=' -f3)"
YAPF_FLAGS=( YAPF_FLAGS=(
'--recursive' '--recursive'
...@@ -93,6 +95,47 @@ echo 'vLLM yapf: Done' ...@@ -93,6 +95,47 @@ echo 'vLLM yapf: Done'
# echo 'vLLM mypy:' # echo 'vLLM mypy:'
# mypy # mypy
# check spelling of specified files
spell_check() {
codespell "$@"
}
spell_check_all(){
codespell --toml pyproject.toml
}
# Spelling check of files that differ from main branch.
spell_check_changed() {
# The `if` guard ensures that the list of filenames is not empty, which
# could cause ruff to receive 0 positional arguments, making it hang
# waiting for STDIN.
#
# `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that
# exist on both branches.
MERGEBASE="$(git merge-base origin/main HEAD)"
if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then
git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \
codespell
fi
}
# Run Codespell
## This flag runs spell check of individual files. --files *must* be the first command line
## arg to use this option.
if [[ "$1" == '--files' ]]; then
spell_check "${@:2}"
# If `--all` is passed, then any further arguments are ignored and the
# entire python directory is linted.
elif [[ "$1" == '--all' ]]; then
spell_check_all
else
# Check spelling only of the files that changed in last commit.
spell_check_changed
fi
echo 'vLLM codespell: Done'
# Lint specified files # Lint specified files
lint() { lint() {
ruff "$@" ruff "$@"
...@@ -117,9 +160,9 @@ lint_changed() { ...@@ -117,9 +160,9 @@ lint_changed() {
} }
# Run Ruff # Run Ruff
echo 'vLLM Ruff:' echo 'vLLM ruff:'
## This flag lints individual files. --files *must* be the first command line ### This flag lints individual files. --files *must* be the first command line
## arg to use this option. ### arg to use this option.
if [[ "$1" == '--files' ]]; then if [[ "$1" == '--files' ]]; then
lint "${@:2}" lint "${@:2}"
# If `--all` is passed, then any further arguments are ignored and the # If `--all` is passed, then any further arguments are ignored and the
...@@ -139,3 +182,5 @@ if ! git diff --quiet &>/dev/null; then ...@@ -139,3 +182,5 @@ if ! git diff --quiet &>/dev/null; then
exit 1 exit 1
fi fi
[mypy]
python_version = 3.8
ignore_missing_imports = True
files = vllm
# TODO(woosuk): Include the code from Megatron and HuggingFace.
exclude = vllm/model_executor/parallel_utils/|vllm/model_executor/models/
...@@ -31,4 +31,22 @@ ignore = [ ...@@ -31,4 +31,22 @@ ignore = [
"E731", "E731",
# line too long, handled by black formatting # line too long, handled by black formatting
"E501", "E501",
# .strip() with multi-character strings
"B005",
# Loop control variable not used within loop body
"B007",
] ]
[tool.mypy]
python_version = "3.8"
ignore_missing_imports = true
files = "vllm"
# TODO(woosuk): Include the code from Megatron and HuggingFace.
exclude = "vllm/model_executor/parallel_utils/|vllm/model_executor/models/"
[tool.codespell]
ignore-words-list = "dout, te, indicies"
skip = "./tests/prompts"
# formatting # formatting
yapf==0.32.0 yapf==0.32.0
toml==0.10.2 toml==0.10.2
tomli==2.0.1
ruff==0.1.5 ruff==0.1.5
codespell==2.2.6
# type checking # type checking
mypy==0.991 mypy==0.991
...@@ -13,9 +15,9 @@ types-setuptools ...@@ -13,9 +15,9 @@ types-setuptools
pytest pytest
pytest-forked pytest-forked
pytest-asyncio pytest-asyncio
pytest-rerunfailures
httpx httpx
einops # required for MPT einops # required for MPT
flash_attn # required for HuggingFace's llama implementation
openai openai
requests requests
ray ray
\ No newline at end of file
...@@ -6,4 +6,4 @@ neuronx-cc ...@@ -6,4 +6,4 @@ neuronx-cc
fastapi fastapi
uvicorn[standard] uvicorn[standard]
pydantic >= 2.0 # Required for OpenAI server. pydantic >= 2.0 # Required for OpenAI server.
aioprometheus[starlette] prometheus_client >= 0.18.0
...@@ -10,4 +10,4 @@ transformers >= 4.38.0 # Required for Gemma. ...@@ -10,4 +10,4 @@ transformers >= 4.38.0 # Required for Gemma.
fastapi fastapi
uvicorn[standard] uvicorn[standard]
pydantic >= 2.0 # Required for OpenAI server. pydantic >= 2.0 # Required for OpenAI server.
aioprometheus[starlette] prometheus_client >= 0.18.0
...@@ -9,7 +9,8 @@ xformers == 0.0.23.post1 # Required for CUDA 12.1. ...@@ -9,7 +9,8 @@ xformers == 0.0.23.post1 # Required for CUDA 12.1.
fastapi fastapi
uvicorn[standard] uvicorn[standard]
pydantic >= 2.0 # Required for OpenAI server. pydantic >= 2.0 # Required for OpenAI server.
aioprometheus[starlette] prometheus_client >= 0.18.0
pynvml == 11.5.0 pynvml == 11.5.0
triton >= 2.1.0 triton >= 2.1.0
outlines >= 0.0.27
cupy-cuda12x == 12.1.0 # Required for CUDA graphs. CUDA 11.8 users should install cupy-cuda11x instead. cupy-cuda12x == 12.1.0 # Required for CUDA graphs. CUDA 11.8 users should install cupy-cuda11x instead.
...@@ -28,7 +28,7 @@ MAIN_CUDA_VERSION = "12.1" ...@@ -28,7 +28,7 @@ MAIN_CUDA_VERSION = "12.1"
# Supported NVIDIA GPU architectures. # Supported NVIDIA GPU architectures.
NVIDIA_SUPPORTED_ARCHS = {"7.0", "7.5", "8.0", "8.6", "8.9", "9.0"} NVIDIA_SUPPORTED_ARCHS = {"7.0", "7.5", "8.0", "8.6", "8.9", "9.0"}
ROCM_SUPPORTED_ARCHS = {"gfx908", "gfx90a", "gfx906", "gfx926", "gfx942", "gfx1100"} ROCM_SUPPORTED_ARCHS = {"gfx908", "gfx90a", "gfx906", "gfx926", "gfx928", "gfx936","gfx942", "gfx1100"}
# SUPPORTED_ARCHS = NVIDIA_SUPPORTED_ARCHS.union(ROCM_SUPPORTED_ARCHS) # SUPPORTED_ARCHS = NVIDIA_SUPPORTED_ARCHS.union(ROCM_SUPPORTED_ARCHS)
...@@ -40,7 +40,7 @@ def _is_neuron() -> bool: ...@@ -40,7 +40,7 @@ def _is_neuron() -> bool:
torch_neuronx_installed = True torch_neuronx_installed = True
try: try:
subprocess.run(["neuron-ls"], capture_output=True, check=True) subprocess.run(["neuron-ls"], capture_output=True, check=True)
except FileNotFoundError: except (FileNotFoundError, PermissionError):
torch_neuronx_installed = False torch_neuronx_installed = False
return torch_neuronx_installed return torch_neuronx_installed
...@@ -346,6 +346,8 @@ vllm_extension_sources = [ ...@@ -346,6 +346,8 @@ vllm_extension_sources = [
if _is_cuda(): if _is_cuda():
vllm_extension_sources.append("csrc/quantization/awq/gemm_kernels.cu") vllm_extension_sources.append("csrc/quantization/awq/gemm_kernels.cu")
vllm_extension_sources.append(
"csrc/quantization/marlin/marlin_cuda_kernel.cu")
vllm_extension_sources.append("csrc/custom_all_reduce.cu") vllm_extension_sources.append("csrc/custom_all_reduce.cu")
# Add MoE kernels. # Add MoE kernels.
...@@ -491,7 +493,9 @@ def get_requirements() -> List[str]: ...@@ -491,7 +493,9 @@ def get_requirements() -> List[str]:
return requirements return requirements
package_data = {"vllm": ["py.typed"]} package_data = {
"vllm": ["py.typed", "model_executor/layers/fused_moe/configs/*.json"]
}
if os.environ.get("VLLM_USE_PRECOMPILED"): if os.environ.get("VLLM_USE_PRECOMPILED"):
ext_modules = [] ext_modules = []
package_data["vllm"].append("*.so") package_data["vllm"].append("*.so")
......
...@@ -165,6 +165,7 @@ class VllmRunner: ...@@ -165,6 +165,7 @@ class VllmRunner:
dtype: str = "half", dtype: str = "half",
disable_log_stats: bool = True, disable_log_stats: bool = True,
tensor_parallel_size: int = 1, tensor_parallel_size: int = 1,
**kwargs,
) -> None: ) -> None:
self.model = LLM( self.model = LLM(
model=model_name, model=model_name,
...@@ -174,6 +175,7 @@ class VllmRunner: ...@@ -174,6 +175,7 @@ class VllmRunner:
swap_space=0, swap_space=0,
disable_log_stats=disable_log_stats, disable_log_stats=disable_log_stats,
tensor_parallel_size=tensor_parallel_size, tensor_parallel_size=tensor_parallel_size,
**kwargs,
) )
def generate( def generate(
...@@ -197,6 +199,24 @@ class VllmRunner: ...@@ -197,6 +199,24 @@ class VllmRunner:
outputs.append((req_sample_output_ids, req_sample_output_strs)) outputs.append((req_sample_output_ids, req_sample_output_strs))
return outputs return outputs
def generate_w_logprobs(
self,
prompts: List[str],
sampling_params: SamplingParams,
) -> List[Tuple[List[int], str]]:
assert sampling_params.logprobs is not None
req_outputs = self.model.generate(prompts,
sampling_params=sampling_params)
outputs = []
for req_output in req_outputs:
for sample in req_output.outputs:
output_str = sample.text
output_ids = sample.token_ids
output_logprobs = sample.logprobs
outputs.append((output_ids, output_str, output_logprobs))
return outputs
def generate_greedy( def generate_greedy(
self, self,
prompts: List[str], prompts: List[str],
...@@ -207,6 +227,20 @@ class VllmRunner: ...@@ -207,6 +227,20 @@ class VllmRunner:
return [(output_ids[0], output_str[0]) return [(output_ids[0], output_str[0])
for output_ids, output_str in outputs] for output_ids, output_str in outputs]
def generate_greedy_logprobs(
self,
prompts: List[str],
max_tokens: int,
num_logprobs: int,
) -> List[Tuple[List[int], str]]:
greedy_logprobs_params = SamplingParams(temperature=0.0,
max_tokens=max_tokens,
logprobs=num_logprobs)
outputs = self.generate_w_logprobs(prompts, greedy_logprobs_params)
return [(output_ids, output_str, output_logprobs)
for output_ids, output_str, output_logprobs in outputs]
def generate_beam_search( def generate_beam_search(
self, self,
prompts: List[str], prompts: List[str],
......
# This unit test should be moved to a new
# tests/test_guided_decoding directory.
from transformers import AutoTokenizer
import torch
from vllm.model_executor.guided_logits_processors import (RegexLogitsProcessor,
JSONLogitsProcessor)
TEST_SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "integer"
},
"skills": {
"type": "array",
"items": {
"type": "string",
"maxLength": 10
},
"minItems": 3
},
"work history": {
"type": "array",
"items": {
"type": "object",
"properties": {
"company": {
"type": "string"
},
"duration": {
"type": "string"
},
"position": {
"type": "string"
}
},
"required": ["company", "position"]
}
}
},
"required": ["name", "age", "skills", "work history"]
}
TEST_REGEX = r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}" + \
r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)"
def test_guided_logits_processors():
"""Basic unit test for RegexLogitsProcessor and JSONLogitsProcessor."""
tokenizer = AutoTokenizer.from_pretrained('HuggingFaceH4/zephyr-7b-beta')
regex_LP = RegexLogitsProcessor(TEST_REGEX, tokenizer)
json_LP = JSONLogitsProcessor(TEST_SCHEMA, tokenizer)
regex_LP.init_state()
token_ids = tokenizer.encode(
f"Give an example IPv4 address with this regex: {TEST_REGEX}")
tensor = torch.rand(32000)
original_tensor = torch.clone(tensor)
regex_LP(token_ids, tensor)
assert tensor.shape == original_tensor.shape
assert not torch.allclose(tensor, original_tensor)
json_LP.init_state()
token_ids = tokenizer.encode(
f"Give an employee profile that fits this schema: {TEST_SCHEMA}")
tensor = torch.rand(32000)
original_tensor = torch.clone(tensor)
json_LP(token_ids, tensor)
assert tensor.shape == original_tensor.shape
assert not torch.allclose(tensor, original_tensor)
...@@ -9,10 +9,64 @@ import ray # using Ray for overall ease of process management, parallel request ...@@ -9,10 +9,64 @@ import ray # using Ray for overall ease of process management, parallel request
import openai # use the official client for correctness check import openai # use the official client for correctness check
from huggingface_hub import snapshot_download # downloading lora to test lora requests from huggingface_hub import snapshot_download # downloading lora to test lora requests
# imports for guided decoding tests
import json
import jsonschema
import re
from vllm.transformers_utils.tokenizer import get_tokenizer
MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" # any model with a chat template should work here MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" # any model with a chat template should work here
LORA_NAME = "typeof/zephyr-7b-beta-lora" # technically this needs Mistral-7B-v0.1 as base, but we're not testing generation quality here LORA_NAME = "typeof/zephyr-7b-beta-lora" # technically this needs Mistral-7B-v0.1 as base, but we're not testing generation quality here
TEST_SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "integer"
},
"skills": {
"type": "array",
"items": {
"type": "string",
"maxLength": 10
},
"minItems": 3
},
"work history": {
"type": "array",
"items": {
"type": "object",
"properties": {
"company": {
"type": "string"
},
"duration": {
"type": "string"
},
"position": {
"type": "string"
}
},
"required": ["company", "position"]
}
}
},
"required": ["name", "age", "skills", "work history"]
}
TEST_REGEX = r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}" + \
r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)"
TEST_CHOICE = [
"Python", "Java", "JavaScript", "C++", "C#", "PHP", "TypeScript", "Ruby",
"Swift", "Kotlin"
]
pytestmark = pytest.mark.asyncio pytestmark = pytest.mark.asyncio
...@@ -155,15 +209,18 @@ async def test_single_chat_session(server, client: openai.AsyncOpenAI, ...@@ -155,15 +209,18 @@ async def test_single_chat_session(server, client: openai.AsyncOpenAI,
}] }]
# test single completion # test single completion
chat_completion = await client.chat.completions.create( chat_completion = await client.chat.completions.create(model=model_name,
model=model_name, messages=messages,
messages=messages, max_tokens=10,
max_tokens=10, logprobs=True,
) top_logprobs=10)
assert chat_completion.id is not None assert chat_completion.id is not None
assert chat_completion.choices is not None and len( assert chat_completion.choices is not None and len(
chat_completion.choices) == 1 chat_completion.choices) == 1
assert chat_completion.choices[0].message is not None assert chat_completion.choices[0].message is not None
assert chat_completion.choices[0].logprobs is not None
assert chat_completion.choices[0].logprobs.top_logprobs is not None
assert len(chat_completion.choices[0].logprobs.top_logprobs[0]) == 10
message = chat_completion.choices[0].message message = chat_completion.choices[0].message
assert message.content is not None and len(message.content) >= 10 assert message.content is not None and len(message.content) >= 10
assert message.role == "assistant" assert message.role == "assistant"
...@@ -198,13 +255,11 @@ async def test_completion_streaming(server, client: openai.AsyncOpenAI, ...@@ -198,13 +255,11 @@ async def test_completion_streaming(server, client: openai.AsyncOpenAI,
single_output = single_completion.choices[0].text single_output = single_completion.choices[0].text
single_usage = single_completion.usage single_usage = single_completion.usage
stream = await client.completions.create( stream = await client.completions.create(model=model_name,
model=model_name, prompt=prompt,
prompt=prompt, max_tokens=5,
max_tokens=5, temperature=0.0,
temperature=0.0, stream=True)
stream=True,
)
chunks = [] chunks = []
async for chunk in stream: async for chunk in stream:
chunks.append(chunk.choices[0].text) chunks.append(chunk.choices[0].text)
...@@ -309,5 +364,236 @@ async def test_batch_completions(server, client: openai.AsyncOpenAI, ...@@ -309,5 +364,236 @@ async def test_batch_completions(server, client: openai.AsyncOpenAI,
assert texts[0] == texts[1] assert texts[0] == texts[1]
async def test_logits_bias(server, client: openai.AsyncOpenAI):
prompt = "Hello, my name is"
max_tokens = 5
tokenizer = get_tokenizer(tokenizer_name=MODEL_NAME)
# Test exclusive selection
token_id = 1000
completion = await client.completions.create(
model=MODEL_NAME,
prompt=prompt,
max_tokens=max_tokens,
temperature=0.0,
logit_bias={str(token_id): 100},
seed=42,
)
assert completion.choices[0].text is not None and len(
completion.choices[0].text) >= 5
response_tokens = tokenizer(completion.choices[0].text,
add_special_tokens=False)["input_ids"]
expected_tokens = tokenizer(tokenizer.decode([token_id] * 5),
add_special_tokens=False)["input_ids"]
assert all([
response == expected
for response, expected in zip(response_tokens, expected_tokens)
])
# Test ban
completion = await client.completions.create(
model=MODEL_NAME,
prompt=prompt,
max_tokens=max_tokens,
temperature=0.0,
)
response_tokens = tokenizer(completion.choices[0].text,
add_special_tokens=False)["input_ids"]
first_response = completion.choices[0].text
completion = await client.completions.create(
model=MODEL_NAME,
prompt=prompt,
max_tokens=max_tokens,
temperature=0.0,
logit_bias={str(token): -100
for token in response_tokens},
)
assert first_response != completion.choices[0].text
async def test_guided_json_completion(server, client: openai.AsyncOpenAI):
completion = await client.completions.create(
model=MODEL_NAME,
prompt=
f"Give an example JSON for an employee profile that fits this schema: {TEST_SCHEMA}",
n=3,
temperature=1.0,
max_tokens=500,
extra_body=dict(guided_json=TEST_SCHEMA))
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 3
for i in range(3):
assert completion.choices[i].text is not None
output_json = json.loads(completion.choices[i].text)
jsonschema.validate(instance=output_json, schema=TEST_SCHEMA)
async def test_guided_json_chat(server, client: openai.AsyncOpenAI):
messages = [{
"role": "system",
"content": "you are a helpful assistant"
}, {
"role": "user",
"content": "Give an example JSON for an employee profile that " + \
f"fits this schema: {TEST_SCHEMA}"
}]
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=500,
extra_body=dict(guided_json=TEST_SCHEMA))
message = chat_completion.choices[0].message
assert message.content is not None
json1 = json.loads(message.content)
jsonschema.validate(instance=json1, schema=TEST_SCHEMA)
messages.append({"role": "assistant", "content": message.content})
messages.append({
"role":
"user",
"content":
"Give me another one with a different name and age"
})
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=500,
extra_body=dict(guided_json=TEST_SCHEMA))
message = chat_completion.choices[0].message
assert message.content is not None
json2 = json.loads(message.content)
jsonschema.validate(instance=json2, schema=TEST_SCHEMA)
assert json1["name"] != json2["name"]
assert json1["age"] != json2["age"]
async def test_guided_regex_completion(server, client: openai.AsyncOpenAI):
completion = await client.completions.create(
model=MODEL_NAME,
prompt=f"Give an example IPv4 address with this regex: {TEST_REGEX}",
n=3,
temperature=1.0,
max_tokens=20,
extra_body=dict(guided_regex=TEST_REGEX))
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 3
for i in range(3):
assert completion.choices[i].text is not None
assert re.fullmatch(TEST_REGEX, completion.choices[i].text) is not None
async def test_guided_regex_chat(server, client: openai.AsyncOpenAI):
messages = [{
"role": "system",
"content": "you are a helpful assistant"
}, {
"role":
"user",
"content":
f"Give an example IP address with this regex: {TEST_REGEX}"
}]
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=20,
extra_body=dict(guided_regex=TEST_REGEX))
ip1 = chat_completion.choices[0].message.content
assert ip1 is not None
assert re.fullmatch(TEST_REGEX, ip1) is not None
messages.append({"role": "assistant", "content": ip1})
messages.append({"role": "user", "content": "Give me a different one"})
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=20,
extra_body=dict(guided_regex=TEST_REGEX))
ip2 = chat_completion.choices[0].message.content
assert ip2 is not None
assert re.fullmatch(TEST_REGEX, ip2) is not None
assert ip1 != ip2
async def test_guided_choice_completion(server, client: openai.AsyncOpenAI):
completion = await client.completions.create(
model=MODEL_NAME,
prompt="The best language for type-safe systems programming is ",
n=2,
temperature=1.0,
max_tokens=10,
extra_body=dict(guided_choice=TEST_CHOICE))
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 2
for i in range(2):
assert completion.choices[i].text in TEST_CHOICE
async def test_guided_choice_chat(server, client: openai.AsyncOpenAI):
messages = [{
"role": "system",
"content": "you are a helpful assistant"
}, {
"role":
"user",
"content":
"The best language for type-safe systems programming is "
}]
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=10,
extra_body=dict(guided_choice=TEST_CHOICE))
choice1 = chat_completion.choices[0].message.content
assert choice1 in TEST_CHOICE
messages.append({"role": "assistant", "content": choice1})
messages.append({
"role": "user",
"content": "I disagree, pick another one"
})
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=10,
extra_body=dict(guided_choice=TEST_CHOICE))
choice2 = chat_completion.choices[0].message.content
assert choice2 in TEST_CHOICE
assert choice1 != choice2
async def test_guided_decoding_type_error(server, client: openai.AsyncOpenAI):
with pytest.raises(openai.BadRequestError):
_ = await client.completions.create(
model=MODEL_NAME,
prompt="Give an example JSON that fits this schema: 42",
extra_body=dict(guided_json=42))
messages = [{
"role": "system",
"content": "you are a helpful assistant"
}, {
"role":
"user",
"content":
"The best language for type-safe systems programming is "
}]
with pytest.raises(openai.BadRequestError):
_ = await client.chat.completions.create(model=MODEL_NAME,
messages=messages,
extra_body=dict(guided_regex={
1: "Python",
2: "C++"
}))
with pytest.raises(openai.BadRequestError):
_ = await client.completions.create(
model=MODEL_NAME,
prompt="Give an example string that fits this regex",
extra_body=dict(guided_regex=TEST_REGEX, guided_json=TEST_SCHEMA))
if __name__ == "__main__": if __name__ == "__main__":
pytest.main([__file__]) pytest.main([__file__])
from typing import Type
import pytest import pytest
import torch import torch
from vllm.model_executor.layers.activation import FastGELU, NewGELU, SiluAndMul from vllm.model_executor.layers.activation import (FastGELU, GeluAndMul,
NewGELU, SiluAndMul)
from allclose_default import get_default_atol, get_default_rtol from allclose_default import get_default_atol, get_default_rtol
DTYPES = [torch.half, torch.bfloat16, torch.float] DTYPES = [torch.half, torch.bfloat16, torch.float]
...@@ -13,13 +16,15 @@ CUDA_DEVICES = [ ...@@ -13,13 +16,15 @@ CUDA_DEVICES = [
] ]
@pytest.mark.parametrize("activation", [SiluAndMul, GeluAndMul])
@pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D) @pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("device", CUDA_DEVICES)
@torch.inference_mode() @torch.inference_mode()
def test_silu_and_mul( def test_act_and_mul(
activation: Type[torch.nn.Module],
num_tokens: int, num_tokens: int,
d: int, d: int,
dtype: torch.dtype, dtype: torch.dtype,
...@@ -31,48 +36,23 @@ def test_silu_and_mul( ...@@ -31,48 +36,23 @@ def test_silu_and_mul(
torch.cuda.manual_seed(seed) torch.cuda.manual_seed(seed)
torch.set_default_device(device) torch.set_default_device(device)
x = torch.randn(num_tokens, 2 * d, dtype=dtype) x = torch.randn(num_tokens, 2 * d, dtype=dtype)
layer = SiluAndMul() layer = activation()
out = layer(x) out = layer(x)
ref_out = layer._forward(x) ref_out = layer._forward(x)
assert torch.allclose(out, # The SiLU and GELU implementations are equivalent to the native PyTorch
ref_out, # implementations, so we can do exact comparison.
atol=get_default_atol(out), assert torch.allclose(out, ref_out, atol=0.0, rtol=0.0)
rtol=get_default_rtol(out))
@pytest.mark.parametrize("activation", [FastGELU, NewGELU])
@pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D) @pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("device", CUDA_DEVICES)
@torch.inference_mode() @torch.inference_mode()
def test_gelu_new( def test_activation(
num_tokens: int, activation: Type[torch.nn.Module],
d: int,
dtype: torch.dtype,
seed: int,
device: str,
) -> None:
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.set_default_device(device)
x = torch.randn(num_tokens, d, dtype=dtype)
layer = NewGELU()
out = layer(x)
ref_out = layer._forward(x)
assert torch.allclose(out,
ref_out,
atol=get_default_atol(out),
rtol=get_default_rtol(out))
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("d", D)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("device", CUDA_DEVICES)
def test_gelu_fast(
num_tokens: int, num_tokens: int,
d: int, d: int,
dtype: torch.dtype, dtype: torch.dtype,
...@@ -84,7 +64,7 @@ def test_gelu_fast( ...@@ -84,7 +64,7 @@ def test_gelu_fast(
torch.cuda.manual_seed(seed) torch.cuda.manual_seed(seed)
torch.set_default_device(device) torch.set_default_device(device)
x = torch.randn(num_tokens, d, dtype=dtype) x = torch.randn(num_tokens, d, dtype=dtype)
layer = FastGELU() layer = activation()
out = layer(x) out = layer(x)
ref_out = layer._forward(x) ref_out = layer._forward(x)
assert torch.allclose(out, assert torch.allclose(out,
......
...@@ -8,7 +8,8 @@ from vllm.model_executor.layers.triton_kernel.prefix_prefill import ( ...@@ -8,7 +8,8 @@ from vllm.model_executor.layers.triton_kernel.prefix_prefill import (
from xformers import ops as xops from xformers import ops as xops
from xformers.ops.fmha.attn_bias import BlockDiagonalCausalFromBottomRightMask from xformers.ops.fmha.attn_bias import BlockDiagonalCausalFromBottomRightMask
NUM_HEADS = [12] NUM_HEADS = [64]
NUM_QUERIES_PER_KV = [1, 8, 64]
HEAD_SIZES = [128] HEAD_SIZES = [128]
DTYPES = [torch.float16] DTYPES = [torch.float16]
CUDA_DEVICES = [ CUDA_DEVICES = [
...@@ -17,12 +18,14 @@ CUDA_DEVICES = [ ...@@ -17,12 +18,14 @@ CUDA_DEVICES = [
@pytest.mark.parametrize("num_heads", NUM_HEADS) @pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("num_queries_per_kv", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES) @pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("device", CUDA_DEVICES)
@torch.inference_mode() @torch.inference_mode()
def test_contexted_kv_attention( def test_contexted_kv_attention(
num_heads: int, num_heads: int,
num_queries_per_kv: int,
head_size: int, head_size: int,
dtype: torch.dtype, dtype: torch.dtype,
device: str, device: str,
...@@ -41,28 +44,29 @@ def test_contexted_kv_attention( ...@@ -41,28 +44,29 @@ def test_contexted_kv_attention(
subquery_lens = [random.randint(16, MAX_SEQ_LEN) for _ in range(BS)] subquery_lens = [random.randint(16, MAX_SEQ_LEN) for _ in range(BS)]
ctx_lens = [random.randint(16, MAX_CTX_LEN) for _ in range(BS)] ctx_lens = [random.randint(16, MAX_CTX_LEN) for _ in range(BS)]
seq_lens = [a + b for a, b in zip(subquery_lens, ctx_lens)] seq_lens = [a + b for a, b in zip(subquery_lens, ctx_lens)]
num_kv_heads = num_heads // num_queries_per_kv
num_tokens = sum(subquery_lens) num_tokens = sum(subquery_lens)
query = torch.empty(num_tokens, num_heads, head_size, dtype=dtype) query = torch.empty(num_tokens, num_heads, head_size, dtype=dtype)
query.uniform_(-1e-3, 1e-3) query.uniform_(-1e-3, 1e-3)
output = torch.empty(num_tokens, num_heads, head_size, dtype=dtype) output = torch.empty(num_tokens, num_heads, head_size, dtype=dtype)
kv = torch.empty(sum(seq_lens), 2, num_heads, head_size, dtype=dtype) kv = torch.empty(sum(seq_lens), 2, num_kv_heads, head_size, dtype=dtype)
kv.uniform_(-1e-3, 1e-3) kv.uniform_(-1e-3, 1e-3)
key, value = kv.unbind(dim=1) key, value = kv.unbind(dim=1)
k_cache = torch.zeros(cache_size, k_cache = torch.zeros(cache_size,
block_size, block_size,
num_heads, num_kv_heads,
head_size, head_size,
dtype=dtype) dtype=dtype)
v_cache = torch.zeros(cache_size, v_cache = torch.zeros(cache_size,
block_size, block_size,
num_heads, num_kv_heads,
head_size, head_size,
dtype=dtype) dtype=dtype)
k = torch.zeros(sum(subquery_lens), num_heads, head_size, dtype=dtype) k = torch.zeros(sum(subquery_lens), num_kv_heads, head_size, dtype=dtype)
v = torch.zeros(sum(subquery_lens), num_heads, head_size, dtype=dtype) v = torch.zeros(sum(subquery_lens), num_kv_heads, head_size, dtype=dtype)
values = torch.arange(0, cache_size, dtype=torch.long) values = torch.arange(0, cache_size, dtype=torch.long)
values = values[torch.randperm(cache_size)] values = values[torch.randperm(cache_size)]
block_table = values[:BS * max_block_per_request].view( block_table = values[:BS * max_block_per_request].view(
...@@ -93,19 +97,21 @@ def test_contexted_kv_attention( ...@@ -93,19 +97,21 @@ def test_contexted_kv_attention(
end_loc = start_loc + block_size end_loc = start_loc + block_size
start_slot = block_table[i, block_id] * block_size start_slot = block_table[i, block_id] * block_size
end_slot = start_slot + end_loc - start_loc end_slot = start_slot + end_loc - start_loc
k_cache.view(-1, num_heads, head_size)[start_slot:end_slot].copy_( k_cache.view(-1, num_kv_heads,
key[start_loc:end_loc]) head_size)[start_slot:end_slot].copy_(
v_cache.view(-1, num_heads, head_size)[start_slot:end_slot].copy_( key[start_loc:end_loc])
value[start_loc:end_loc]) v_cache.view(-1, num_kv_heads,
head_size)[start_slot:end_slot].copy_(
value[start_loc:end_loc])
cur_ctx += block_size cur_ctx += block_size
block_id += 1 block_id += 1
# transpose K_cache[num_blocks, block_size, num_kv_heads, head_size] # transpose K_cache[num_blocks, block_size, num_kv_heads, head_size]
# to K_cache[num_blocks, num_kv_heads, head_size/8, block_size, 8] # to K_cache[num_blocks, num_kv_heads, head_size/8, block_size, 8]
k_cache = k_cache.view(-1, block_size, num_heads, head_size // 8, k_cache = k_cache.view(-1, block_size, num_kv_heads, head_size // 8,
8).permute(0, 2, 3, 1, 4).contiguous() 8).permute(0, 2, 3, 1, 4).contiguous()
# transpose V_cache[num_blocks, block_size, num_kv_heads, head_size] # transpose V_cache[num_blocks, block_size, num_kv_heads, head_size]
# to V_cache[num_blocks, num_kv_heads, head_size, block_size] # to V_cache[num_blocks, num_kv_heads, head_size, block_size]
v_cache = v_cache.view(-1, block_size, num_heads, v_cache = v_cache.view(-1, block_size, num_kv_heads,
head_size).permute(0, 2, 3, 1).contiguous() head_size).permute(0, 2, 3, 1).contiguous()
# Warm up the Triton kernel by calling it once before actually measuring generation time # Warm up the Triton kernel by calling it once before actually measuring generation time
...@@ -123,12 +129,29 @@ def test_contexted_kv_attention( ...@@ -123,12 +129,29 @@ def test_contexted_kv_attention(
attn_op = xops.fmha.cutlass.FwOp() attn_op = xops.fmha.cutlass.FwOp()
if num_kv_heads != num_heads:
# As of Nov 2023, xformers only supports MHA. For MQA/GQA,
# project the key and value tensors to the desired number of
# heads.
#
# see also: vllm/model_executor/layers/attention.py
query = query.view(query.shape[0], num_kv_heads, num_queries_per_kv,
query.shape[-1])
key = key[:, :, None, :].expand(key.shape[0], num_kv_heads,
num_queries_per_kv, key.shape[-1])
value = value[:, :,
None, :].expand(value.shape[0], num_kv_heads,
num_queries_per_kv, value.shape[-1])
query = query.unsqueeze(0)
key = key.unsqueeze(0)
value = value.unsqueeze(0)
attn_bias = BlockDiagonalCausalFromBottomRightMask.from_seqlens( attn_bias = BlockDiagonalCausalFromBottomRightMask.from_seqlens(
subquery_lens, seq_lens) subquery_lens, seq_lens)
output_ref = xops.memory_efficient_attention_forward( output_ref = xops.memory_efficient_attention_forward(
query.unsqueeze(0), query,
key.unsqueeze(0), key,
value.unsqueeze(0), value,
attn_bias=attn_bias, attn_bias=attn_bias,
p=0.0, p=0.0,
scale=scale, scale=scale,
...@@ -137,9 +160,9 @@ def test_contexted_kv_attention( ...@@ -137,9 +160,9 @@ def test_contexted_kv_attention(
torch.cuda.synchronize() torch.cuda.synchronize()
start_time = time.time() start_time = time.time()
output_ref = xops.memory_efficient_attention_forward( output_ref = xops.memory_efficient_attention_forward(
query.unsqueeze(0), query,
key.unsqueeze(0), key,
value.unsqueeze(0), value,
attn_bias=attn_bias, attn_bias=attn_bias,
p=0.0, p=0.0,
scale=scale, scale=scale,
...@@ -148,5 +171,5 @@ def test_contexted_kv_attention( ...@@ -148,5 +171,5 @@ def test_contexted_kv_attention(
torch.cuda.synchronize() torch.cuda.synchronize()
end_time = time.time() end_time = time.time()
print(f"xformers Time: {(end_time - start_time)*1000:.2f} ms") print(f"xformers Time: {(end_time - start_time)*1000:.2f} ms")
output_ref = output_ref.squeeze(0) output_ref = output_ref.squeeze(0, 2)
assert torch.allclose(output_ref, output, atol=1e-6, rtol=0) assert torch.allclose(output_ref, output, atol=1e-6, rtol=0)
...@@ -126,14 +126,21 @@ def mixtral_lora_files(): ...@@ -126,14 +126,21 @@ def mixtral_lora_files():
return snapshot_download(repo_id="terrysun/mixtral-lora-adapter") return snapshot_download(repo_id="terrysun/mixtral-lora-adapter")
@pytest.fixture(scope="session")
def gemma_lora_files():
return snapshot_download(repo_id="wskwon/gemma-7b-test-lora")
@pytest.fixture @pytest.fixture
def llama_2_7b_engine_extra_embeddings() -> nn.Module: def llama_2_7b_engine_extra_embeddings() -> nn.Module:
cleanup() cleanup()
get_model_old = get_model get_model_old = get_model
def get_model_patched(model_config, device_config, lora_config=None): def get_model_patched(model_config, device_config, **kwargs):
return get_model_old(model_config, device_config, return get_model_old(model_config,
LoRAConfig(max_loras=4, max_lora_rank=8)) device_config,
lora_config=LoRAConfig(max_loras=4,
max_lora_rank=8))
with patch("vllm.worker.model_runner.get_model", get_model_patched): with patch("vllm.worker.model_runner.get_model", get_model_patched):
engine = vllm.LLM("meta-llama/Llama-2-7b-hf", enable_lora=False) engine = vllm.LLM("meta-llama/Llama-2-7b-hf", enable_lora=False)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment