Unverified Commit 42873eac authored by Baizhou Zhang's avatar Baizhou Zhang Committed by GitHub
Browse files

[Fix] Improve Lora tests and reduce CI runtime (#4925)

parent 4814ecaf
...@@ -17,217 +17,38 @@ import os ...@@ -17,217 +17,38 @@ import os
import unittest import unittest
from typing import List from typing import List
import torch from utils import (
from utils import BACKENDS, TORCH_DTYPES, LoRAAdaptor, LoRAModelCase ALL_OTHER_LORA_MODELS,
BACKENDS,
CI_LORA_MODELS,
DEFAULT_PROMPTS,
TORCH_DTYPES,
LoRAModelCase,
run_batch_lora_test,
)
from sglang.test.runners import HFRunner, SRTRunner from sglang.test.test_utils import CustomTestCase, is_in_ci
from sglang.test.test_utils import CustomTestCase, calculate_rouge_l, is_in_ci
CI_LORA_MODELS = [
LoRAModelCase(
base="meta-llama/Llama-3.1-8B-Instruct",
adaptors=[
LoRAAdaptor(
name="algoprog/fact-generation-llama-3.1-8b-instruct-lora",
),
],
max_loras_per_batch=1,
),
LoRAModelCase(
base="meta-llama/Llama-3.1-8B-Instruct",
adaptors=[
LoRAAdaptor(
name="Nutanix/Meta-Llama-3.1-8B-Instruct_lora_4_alpha_16",
prefill_tolerance=1e-1,
),
],
max_loras_per_batch=1,
),
]
ALL_OTHER_LORA_MODELS = [
LoRAModelCase(
base="meta-llama/Llama-2-7b-hf",
adaptors=[LoRAAdaptor(name="winddude/wizardLM-LlaMA-LoRA-7B")],
max_loras_per_batch=2,
),
]
PROMPTS = [
"AI is a field of computer science focused on",
"""
### Instruction:
Tell me about llamas and alpacas
### Response:
Llamas are large, long-necked animals with a woolly coat. They have two toes on each foot instead of three like other camelids (camels, dromedaries). Llamas live in the Andean mountains of South America where they graze on grasses and shrubs. Alpaca is another name for domesticated llama. The word "alpaca" comes from an Incan language meaning "golden fleece." Alpacas look very similar to llamas but are smaller than their wild relatives. Both species were used by ancient people as pack animals and for meat. Today both llamas and alpacas are raised primarily for their fiber which can be spun into yarn or knitted into clothing.
### Question 2:
What do you know about llamas?
### Answer:
""",
]
class TestLoRABackend(CustomTestCase): class TestLoRABackend(CustomTestCase):
def run_backend(
self,
prompt: str,
model_case: LoRAModelCase,
torch_dtype: torch.dtype,
max_new_tokens: int,
backend: str,
):
"""
Run backend tests for a single prompt and model case.
"""
base_path = model_case.base
adaptor = model_case.adaptors[0]
print(
f"\n========== Testing backend '{backend}' for base '{base_path}' --- "
f"Prompt '{prompt[:50]}...' using adaptor '{adaptor.name}' ---"
)
with SRTRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=model_case.tp_size,
lora_paths=[adaptor.name for adaptor in model_case.adaptors],
max_loras_per_batch=model_case.max_loras_per_batch,
lora_backend=backend,
disable_cuda_graph=True,
disable_radix_cache=True,
mem_fraction_static=0.88,
disable_custom_all_reduce=False,
) as srt_runner:
srt_outputs = srt_runner.forward(
[prompt], max_new_tokens=max_new_tokens, lora_paths=[adaptor.name]
)
with HFRunner(
base_path, torch_dtype=torch_dtype, model_type="generation"
) as hf_runner:
hf_outputs = hf_runner.forward(
[prompt], max_new_tokens=max_new_tokens, lora_paths=[adaptor.name]
)
with SRTRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=model_case.tp_size,
mem_fraction_static=0.88,
disable_custom_all_reduce=False,
) as srt_runner:
srt_no_lora_outputs = srt_runner.forward(
[prompt], max_new_tokens=max_new_tokens
)
with HFRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
) as hf_runner:
hf_no_lora_outputs = hf_runner.forward(
[prompt], max_new_tokens=max_new_tokens
)
# Use individual adapter tolerances if set, otherwise use model defaults
prefill_tol = (
adaptor.prefill_tolerance
if adaptor.prefill_tolerance is not None
else model_case.prefill_tolerance
)
decode_tol = (
adaptor.decode_tolerance
if adaptor.decode_tolerance is not None
else model_case.decode_tolerance
)
rouge_tol = (
adaptor.rouge_l_tolerance
if adaptor.rouge_l_tolerance is not None
else model_case.rouge_l_tolerance
)
# Compare prefill stage logprobs (HF vs SRTRunner with LoRA)
hf_prefill = torch.tensor(hf_outputs.top_input_logprobs[0])
srt_prefill = torch.tensor(srt_outputs.top_input_logprobs[0])
max_prefill_diff = torch.max(torch.abs(hf_prefill - srt_prefill))
print("Max prefill diff (HF vs SRT):", max_prefill_diff)
# Compare decode stage logprobs
hf_decode = torch.tensor(hf_outputs.top_output_logprobs[0])
srt_decode = torch.tensor(srt_outputs.top_output_logprobs[0])
max_decode_diff = torch.max(torch.abs(hf_decode - srt_decode))
print("Max decode diff (HF vs SRT):", max_decode_diff)
srt_output_str = srt_outputs.output_strs[0].strip()
hf_output_str = hf_outputs.output_strs[0].strip()
rouge_score = calculate_rouge_l([srt_output_str], [hf_output_str])[0]
print("ROUGE-L score:", rouge_score)
print("SRT output:", srt_output_str)
print("HF output:", hf_output_str)
# Additional: compare prefill outputs between base model (no LoRA) and LoRA model for reference
hf_no_lora_prefill = torch.tensor(hf_no_lora_outputs.top_input_logprobs[0])
srt_no_lora_prefill = torch.tensor(srt_no_lora_outputs.top_input_logprobs[0])
print(
"Max diff (SRT base vs SRT LoRA prefill):",
torch.max(torch.abs(srt_no_lora_prefill - srt_prefill)),
)
print(
"Max diff (HF base vs HF LoRA prefill):",
torch.max(torch.abs(hf_no_lora_prefill - hf_prefill)),
)
if hf_prefill.shape[0] <= 100:
assert torch.all(torch.abs(hf_prefill - srt_prefill) < prefill_tol), (
f"Prefill logprobs mismatch for base '{base_path}', adaptor '{adaptor.name}', "
f"backend '{backend}', prompt: '{prompt[:50]}...'"
)
if hf_decode.shape[0] <= 100:
assert torch.all(torch.abs(hf_decode - srt_decode) < decode_tol), (
f"Decode logprobs mismatch for base '{base_path}', adaptor '{adaptor.name}', "
f"backend '{backend}', prompt: '{prompt[:50]}...'"
)
if rouge_score < rouge_tol:
raise AssertionError(
f"ROUGE-L score {rouge_score} below tolerance {rouge_tol} "
f"for base '{base_path}', adaptor '{adaptor.name}', backend '{backend}', prompt: '{prompt[:50]}...'"
)
def run_backend_batch(
self,
prompts: List[str],
model_case: LoRAModelCase,
torch_dtype: torch.dtype,
max_new_tokens: int,
backend: str,
):
# TODO: Implement batch processing version of run_backend
raise NotImplementedError(
"Batch processing version of run_backend is not implemented yet."
)
def _run_backend_on_model_cases(self, model_cases: List[LoRAModelCase]): def _run_backend_on_model_cases(self, model_cases: List[LoRAModelCase]):
for model_case in model_cases: for model_case in model_cases:
# If skip_long_prompt is True, filter out prompts longer than 1000 characters # If skip_long_prompt is True, filter out prompts longer than 1000 characters
prompts = ( prompts = (
PROMPTS DEFAULT_PROMPTS
if not model_case.skip_long_prompt if not model_case.skip_long_prompt
else [p for p in PROMPTS if len(p) < 1000] else [p for p in DEFAULT_PROMPTS if len(p) < 1000]
) )
for torch_dtype in TORCH_DTYPES: for torch_dtype in TORCH_DTYPES:
for backend in BACKENDS: for backend in BACKENDS:
for prompt in prompts: run_batch_lora_test(
self.run_backend( prompts,
prompt, model_case,
model_case, torch_dtype,
torch_dtype, max_new_tokens=32,
max_new_tokens=32, backend=backend,
backend=backend, )
)
def test_ci_lora_models(self): def test_ci_lora_models(self):
self._run_backend_on_model_cases(CI_LORA_MODELS) self._run_backend_on_model_cases(CI_LORA_MODELS)
......
...@@ -17,219 +17,40 @@ import os ...@@ -17,219 +17,40 @@ import os
import unittest import unittest
from typing import List from typing import List
import torch from utils import (
from utils import TORCH_DTYPES, LoRAAdaptor, LoRAModelCase ALL_OTHER_LORA_MODELS,
CI_LORA_MODELS,
DEFAULT_PROMPTS,
TORCH_DTYPES,
LoRAModelCase,
run_batch_lora_test,
)
from sglang.test.runners import HFRunner, SRTRunner from sglang.test.test_utils import CustomTestCase, is_in_ci
from sglang.test.test_utils import CustomTestCase, calculate_rouge_l, is_in_ci
CI_LORA_MODELS = [
LoRAModelCase(
base="meta-llama/Llama-3.1-8B-Instruct",
adaptors=[
LoRAAdaptor(
name="algoprog/fact-generation-llama-3.1-8b-instruct-lora",
),
],
max_loras_per_batch=1,
),
]
ALL_OTHER_LORA_MODELS = [
LoRAModelCase(
base="meta-llama/Llama-3.1-8B-Instruct",
adaptors=[
LoRAAdaptor(
name="Nutanix/Meta-Llama-3.1-8B-Instruct_lora_4_alpha_16",
prefill_tolerance=1e-1,
),
],
max_loras_per_batch=1,
),
LoRAModelCase(
base="meta-llama/Llama-2-7b-hf",
adaptors=[LoRAAdaptor(name="winddude/wizardLM-LlaMA-LoRA-7B")],
max_loras_per_batch=2,
),
]
PROMPTS = [
"AI is a field of computer science focused on",
"""
### Instruction:
Tell me about llamas and alpacas
### Response:
Llamas are large, long-necked animals with a woolly coat. They have two toes on each foot instead of three like other camelids (camels, dromedaries). Llamas live in the Andean mountains of South America where they graze on grasses and shrubs. Alpaca is another name for domesticated llama. The word "alpaca" comes from an Incan language meaning "golden fleece." Alpacas look very similar to llamas but are smaller than their wild relatives. Both species were used by ancient people as pack animals and for meat. Today both llamas and alpacas are raised primarily for their fiber which can be spun into yarn or knitted into clothing.
### Question 2:
What do you know about llamas?
### Answer:
""",
]
BACKEND = "triton"
class TestLoRATP(CustomTestCase): class TestLoRATP(CustomTestCase):
def run_tp(
self,
prompt: str,
model_case: LoRAModelCase,
torch_dtype: torch.dtype,
max_new_tokens: int,
):
"""
Run triton backend tests with specified TP size for a single prompt and model case.
"""
base_path = model_case.base
adaptor = model_case.adaptors[0]
tp_size = model_case.tp_size
print(
f"\n========== Testing triton backend with TP size {tp_size} for base '{base_path}' --- "
f"Prompt '{prompt[:50]}...' using adaptor '{adaptor.name}' ---"
)
with SRTRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=tp_size,
lora_paths=[adaptor.name for adaptor in model_case.adaptors],
max_loras_per_batch=model_case.max_loras_per_batch,
lora_backend=BACKEND,
disable_cuda_graph=True,
disable_radix_cache=True,
mem_fraction_static=0.88,
disable_custom_all_reduce=True,
) as srt_runner:
srt_outputs = srt_runner.forward(
[prompt], max_new_tokens=max_new_tokens, lora_paths=[adaptor.name]
)
with HFRunner(
base_path, torch_dtype=torch_dtype, model_type="generation"
) as hf_runner:
hf_outputs = hf_runner.forward(
[prompt], max_new_tokens=max_new_tokens, lora_paths=[adaptor.name]
)
with SRTRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=tp_size,
mem_fraction_static=0.88,
disable_custom_all_reduce=True,
) as srt_runner:
srt_no_lora_outputs = srt_runner.forward(
[prompt], max_new_tokens=max_new_tokens
)
with HFRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
) as hf_runner:
hf_no_lora_outputs = hf_runner.forward(
[prompt], max_new_tokens=max_new_tokens
)
# Use individual adapter tolerances if set, otherwise use model defaults
prefill_tol = (
adaptor.prefill_tolerance
if adaptor.prefill_tolerance is not None
else model_case.prefill_tolerance
)
decode_tol = (
adaptor.decode_tolerance
if adaptor.decode_tolerance is not None
else model_case.decode_tolerance
)
rouge_tol = (
adaptor.rouge_l_tolerance
if adaptor.rouge_l_tolerance is not None
else model_case.rouge_l_tolerance
)
# Compare prefill stage logprobs (HF vs SRTRunner with LoRA)
hf_prefill = torch.tensor(hf_outputs.top_input_logprobs[0])
srt_prefill = torch.tensor(srt_outputs.top_input_logprobs[0])
max_prefill_diff = torch.max(torch.abs(hf_prefill - srt_prefill))
print("Max prefill diff (HF vs SRT):", max_prefill_diff)
# Compare decode stage logprobs
hf_decode = torch.tensor(hf_outputs.top_output_logprobs[0])
srt_decode = torch.tensor(srt_outputs.top_output_logprobs[0])
max_decode_diff = torch.max(torch.abs(hf_decode - srt_decode))
print("Max decode diff (HF vs SRT):", max_decode_diff)
srt_output_str = srt_outputs.output_strs[0].strip()
hf_output_str = hf_outputs.output_strs[0].strip()
rouge_score = calculate_rouge_l([srt_output_str], [hf_output_str])[0]
print("ROUGE-L score:", rouge_score)
print("SRT output:", srt_output_str)
print("HF output:", hf_output_str)
# Additional: compare prefill outputs between base model (no LoRA) and LoRA model for reference
hf_no_lora_prefill = torch.tensor(hf_no_lora_outputs.top_input_logprobs[0])
srt_no_lora_prefill = torch.tensor(srt_no_lora_outputs.top_input_logprobs[0])
print(
"Max diff (SRT base vs SRT LoRA prefill):",
torch.max(torch.abs(srt_no_lora_prefill - srt_prefill)),
)
print(
"Max diff (HF base vs HF LoRA prefill):",
torch.max(torch.abs(hf_no_lora_prefill - hf_prefill)),
)
if hf_prefill.shape[0] <= 100:
assert torch.all(torch.abs(hf_prefill - srt_prefill) < prefill_tol), (
f"Prefill logprobs mismatch for base '{base_path}', adaptor '{adaptor.name}', "
f"triton backend with TP {tp_size}, prompt: '{prompt[:50]}...'"
)
if hf_decode.shape[0] <= 100:
assert torch.all(torch.abs(hf_decode - srt_decode) < decode_tol), (
f"Decode logprobs mismatch for base '{base_path}', adaptor '{adaptor.name}', "
f"triton backend with TP {tp_size}, prompt: '{prompt[:50]}...'"
)
if rouge_score < rouge_tol:
raise AssertionError(
f"ROUGE-L score {rouge_score} below tolerance {rouge_tol} "
f"for base '{base_path}', adaptor '{adaptor.name}', triton backend with TP {tp_size}, prompt: '{prompt[:50]}...'"
)
def run_tp_batch(
self,
prompts: List[str],
model_case: LoRAModelCase,
torch_dtype: torch.dtype,
max_new_tokens: int,
tp_size: int,
):
# TODO: Implement batch processing version of run_tp
raise NotImplementedError(
"Batch processing version of run_tp is not implemented yet."
)
def _run_tp_on_model_cases(self, model_cases: List[LoRAModelCase]): def _run_tp_on_model_cases(self, model_cases: List[LoRAModelCase]):
tp_list = [2] # Define TP sizes to iterate over tp_list = [2] # Define TP sizes to iterate over
for model_case in model_cases: for model_case in model_cases:
# If skip_long_prompt is True, filter out prompts longer than 1000 characters # If skip_long_prompt is True, filter out prompts longer than 1000 characters
prompts = ( prompts = (
PROMPTS DEFAULT_PROMPTS
if not model_case.skip_long_prompt if not model_case.skip_long_prompt
else [p for p in PROMPTS if len(p) < 1000] else [p for p in DEFAULT_PROMPTS if len(p) < 1000]
) )
for tp_size in tp_list: for tp_size in tp_list:
model_case.tp_size = tp_size model_case.tp_size = tp_size
for torch_dtype in TORCH_DTYPES: for torch_dtype in TORCH_DTYPES:
for prompt in prompts: run_batch_lora_test(
self.run_tp( prompts,
prompt, model_case,
model_case, torch_dtype,
torch_dtype, max_new_tokens=32,
max_new_tokens=32, backend="triton",
) test_tag=f"tp={tp_size}",
)
def test_ci_lora_models(self): def test_ci_lora_models(self):
self._run_tp_on_model_cases(CI_LORA_MODELS) self._run_tp_on_model_cases(CI_LORA_MODELS)
......
...@@ -13,16 +13,21 @@ ...@@ -13,16 +13,21 @@
# ============================================================================== # ==============================================================================
import multiprocessing as mp import multiprocessing as mp
import os
import unittest import unittest
from typing import List from typing import List
import torch from utils import (
from utils import BACKENDS, TORCH_DTYPES, LoRAAdaptor, LoRAModelCase BACKENDS,
TORCH_DTYPES,
LoRAAdaptor,
LoRAModelCase,
run_batch_lora_test,
)
from sglang.test.runners import HFRunner, SRTRunner from sglang.test.test_utils import CustomTestCase, is_in_ci
from sglang.test.test_utils import CustomTestCase, calculate_rouge_l, is_in_ci
MULTI_LORA_MODELS = [ CI_MULTI_LORA_MODELS = [
# multi-rank case # multi-rank case
LoRAModelCase( LoRAModelCase(
base="meta-llama/Llama-2-7b-hf", base="meta-llama/Llama-2-7b-hf",
...@@ -38,6 +43,9 @@ MULTI_LORA_MODELS = [ ...@@ -38,6 +43,9 @@ MULTI_LORA_MODELS = [
], ],
max_loras_per_batch=2, max_loras_per_batch=2,
), ),
]
ALL_OTHER_MULTI_LORA_MODELS = [
LoRAModelCase( LoRAModelCase(
base="meta-llama/Llama-3.1-8B-Instruct", base="meta-llama/Llama-3.1-8B-Instruct",
adaptors=[ adaptors=[
...@@ -70,141 +78,8 @@ PROMPTS = [ ...@@ -70,141 +78,8 @@ PROMPTS = [
class TestMultiLoRABackend(CustomTestCase): class TestMultiLoRABackend(CustomTestCase):
def run_backend_batch(
self,
prompts: List[str],
model_case: LoRAModelCase,
torch_dtype: torch.dtype,
max_new_tokens: int,
backend: str,
):
"""
The multi-LoRA backend test functionality is not supported yet.
This function uses all prompts at once and prints a message indicating that support is pending.
"""
base_path = model_case.base
adaptor_names = [adaptor.name for adaptor in model_case.adaptors]
print(
f"\n========== Testing multi-LoRA backend '{backend}' for base '{model_case.base}' --- "
f"Using prompts {[p[:50] for p in prompts]} with adaptors: {adaptor_names} ---"
)
print(
"run_backend_batch: Multi-LoRA backend test functionality is pending support."
)
with SRTRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=model_case.tp_size,
lora_paths=[adaptor.name for adaptor in model_case.adaptors],
max_loras_per_batch=model_case.max_loras_per_batch,
lora_backend=backend,
disable_cuda_graph=True,
disable_radix_cache=True,
mem_fraction_static=0.88,
) as srt_runner:
srt_outputs = srt_runner.forward(
prompts, max_new_tokens=max_new_tokens, lora_paths=adaptor_names
)
with HFRunner(
base_path, torch_dtype=torch_dtype, model_type="generation"
) as hf_runner:
hf_outputs = hf_runner.forward(
prompts, max_new_tokens=max_new_tokens, lora_paths=adaptor_names
)
with SRTRunner( def _run_multi_lora_test_on_model_cases(self, model_cases: List[LoRAModelCase]):
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=model_case.tp_size,
mem_fraction_static=0.88,
) as srt_runner:
srt_no_lora_outputs = srt_runner.forward(
prompts, max_new_tokens=max_new_tokens
)
with HFRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
) as hf_runner:
hf_no_lora_outputs = hf_runner.forward(
prompts, max_new_tokens=max_new_tokens
)
# Compare prefill stage logprobs (HF vs SRTRunner with LoRA)
for i in range(len(prompts)):
adaptor = model_case.adaptors[i]
# Use individual adapter tolerances if set, otherwise use model defaults
prefill_tol = (
adaptor.prefill_tolerance
if adaptor.prefill_tolerance is not None
else model_case.prefill_tolerance
)
decode_tol = (
adaptor.decode_tolerance
if adaptor.decode_tolerance is not None
else model_case.decode_tolerance
)
rouge_tol = (
adaptor.rouge_l_tolerance
if adaptor.rouge_l_tolerance is not None
else model_case.rouge_l_tolerance
)
# Compare prefill stage logprobs (HF vs SRTRunner with LoRA)
hf_prefill = torch.tensor(hf_outputs.top_input_logprobs[i])
srt_prefill = torch.tensor(srt_outputs.top_input_logprobs[i])
max_prefill_diff = torch.max(torch.abs(hf_prefill - srt_prefill))
print("Max prefill diff (HF vs SRT):", max_prefill_diff)
# Compare decode stage logprobs
hf_decode = torch.tensor(hf_outputs.top_output_logprobs[i])
srt_decode = torch.tensor(srt_outputs.top_output_logprobs[i])
max_decode_diff = torch.max(torch.abs(hf_decode - srt_decode))
print("Max decode diff (HF vs SRT):", max_decode_diff)
srt_output_str = srt_outputs.output_strs[i].strip()
hf_output_str = hf_outputs.output_strs[i].strip()
rouge_score = calculate_rouge_l([srt_output_str], [hf_output_str])[0]
print("ROUGE-L score:", rouge_score)
print("SRT output:", srt_output_str)
print("HF output:", hf_output_str)
# Additional: compare prefill outputs between base model (no LoRA) and LoRA model for reference
hf_no_lora_prefill = torch.tensor(hf_no_lora_outputs.top_input_logprobs[i])
srt_no_lora_prefill = torch.tensor(
srt_no_lora_outputs.top_input_logprobs[i]
)
print(
"Max diff (SRT base vs SRT LoRA prefill):",
torch.max(torch.abs(srt_no_lora_prefill - srt_prefill)),
)
print(
"Max diff (HF base vs HF LoRA prefill):",
torch.max(torch.abs(hf_no_lora_prefill - hf_prefill)),
)
if hf_prefill.shape[0] <= 100:
assert torch.all(torch.abs(hf_prefill - srt_prefill) < prefill_tol), (
f"Prefill logprobs mismatch for base '{base_path}', adaptor '{adaptor_names}', "
f"backend '{backend}', prompt: '{prompts[0][:50]}...'"
)
if hf_decode.shape[0] <= 100:
assert torch.all(torch.abs(hf_decode - srt_decode) < decode_tol), (
f"Decode logprobs mismatch for base '{base_path}', adaptor '{adaptor_names}', "
f"backend '{backend}', prompt: '{prompts[0][:50]}...'"
)
if rouge_score < rouge_tol:
raise AssertionError(
f"ROUGE-L score {rouge_score} below tolerance {rouge_tol} "
f"for base '{base_path}', adaptor '{adaptor_names}', backend '{backend}', prompt: '{prompts[0][:50]}...'"
)
def _run_backend_on_model_cases(self, model_cases: List[LoRAModelCase]):
for model_case in model_cases: for model_case in model_cases:
# If skip_long_prompt is True, filter out prompts longer than 1000 characters. # If skip_long_prompt is True, filter out prompts longer than 1000 characters.
batch_prompts = ( batch_prompts = (
...@@ -214,19 +89,30 @@ class TestMultiLoRABackend(CustomTestCase): ...@@ -214,19 +89,30 @@ class TestMultiLoRABackend(CustomTestCase):
) )
for torch_dtype in TORCH_DTYPES: for torch_dtype in TORCH_DTYPES:
for backend in BACKENDS: for backend in BACKENDS:
self.run_backend_batch( run_batch_lora_test(
batch_prompts, batch_prompts,
model_case, model_case,
torch_dtype, torch_dtype,
max_new_tokens=32, max_new_tokens=32,
backend=backend, backend=backend,
test_tag="multi-lora-backend",
) )
def test_multi_lora_models(self): def test_ci_lora_models(self):
# Optionally skip tests in CI environments. self._run_multi_lora_test_on_model_cases(CI_MULTI_LORA_MODELS)
def test_all_lora_models(self):
if is_in_ci(): if is_in_ci():
return return
self._run_backend_on_model_cases(MULTI_LORA_MODELS)
# Retain ONLY_RUN check here
filtered_models = []
for model_case in ALL_OTHER_MULTI_LORA_MODELS:
if "ONLY_RUN" in os.environ and os.environ["ONLY_RUN"] != model_case.base:
continue
filtered_models.append(model_case)
self._run_multi_lora_test_on_model_cases(filtered_models)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,6 +17,9 @@ from typing import List ...@@ -17,6 +17,9 @@ from typing import List
import torch import torch
from sglang.test.runners import HFRunner, SRTRunner
from sglang.test.test_utils import calculate_rouge_l
@dataclasses.dataclass @dataclasses.dataclass
class LoRAAdaptor: class LoRAAdaptor:
...@@ -47,3 +50,190 @@ class LoRAModelCase: ...@@ -47,3 +50,190 @@ class LoRAModelCase:
TORCH_DTYPES = [torch.float16] TORCH_DTYPES = [torch.float16]
BACKENDS = ["triton"] BACKENDS = ["triton"]
DEFAULT_PROMPTS = [
"AI is a field of computer science focused on",
"""
### Instruction:
Tell me about llamas and alpacas
### Response:
Llamas are large, long-necked animals with a woolly coat. They have two toes on each foot instead of three like other camelids (camels, dromedaries). Llamas live in the Andean mountains of South America where they graze on grasses and shrubs. Alpaca is another name for domesticated llama. The word "alpaca" comes from an Incan language meaning "golden fleece." Alpacas look very similar to llamas but are smaller than their wild relatives. Both species were used by ancient people as pack animals and for meat. Today both llamas and alpacas are raised primarily for their fiber which can be spun into yarn or knitted into clothing.
### Question 2:
What do you know about llamas?
### Answer:
""",
]
CI_LORA_MODELS = [
LoRAModelCase(
base="meta-llama/Llama-3.1-8B-Instruct",
adaptors=[
LoRAAdaptor(
name="algoprog/fact-generation-llama-3.1-8b-instruct-lora",
),
],
max_loras_per_batch=1,
),
]
ALL_OTHER_LORA_MODELS = [
LoRAModelCase(
base="meta-llama/Llama-3.1-8B-Instruct",
adaptors=[
LoRAAdaptor(
name="Nutanix/Meta-Llama-3.1-8B-Instruct_lora_4_alpha_16",
prefill_tolerance=1e-1,
),
],
max_loras_per_batch=1,
),
LoRAModelCase(
base="meta-llama/Llama-2-7b-hf",
adaptors=[LoRAAdaptor(name="winddude/wizardLM-LlaMA-LoRA-7B")],
max_loras_per_batch=2,
),
]
def run_batch_lora_test(
prompts: List[str],
model_case: LoRAModelCase,
torch_dtype: torch.dtype,
max_new_tokens: int,
backend: str,
disable_cuda_graph: bool = True,
disable_radix_cache: bool = True,
mem_fraction_static: float = 0.88,
test_tag: str = "",
):
"""
Run Lora test for a forward batch.
For prompt0, prompt1, ..., promptN,
we will use adaptor0, adaptor1, ..., adaptorN included in model case,
We will then compare the outputs of HF and SRT with and without LoRA.
If number of prompts is larger than number of adaptors,
the prompt i will use adaptor i % (number of adaptors).
Args:
prompts (List[str]): The batch of prompts to test.
model_case (LoRAModelCase): The model case to test.
torch_dtype (torch.dtype): The torch dtype to use.
max_new_tokens (int): The maximum number of new tokens to generate.
backend (str): The lora backend to use.
disable_cuda_graph (bool, optional): Whether to disable CUDA graph. Defaults to True.
disable_radix_cache (bool, optional): Whether to disable radix cache. Defaults to True.
mem_fraction_static (float, optional): The fraction of memory to use. Defaults to 0.88.
test_tag (str, optional): The tag to use for the test. Defaults to "".
"""
base_path = model_case.base
# Create used adaptors for each prompt in batch
i, adaptors = 0, []
for _ in range(len(prompts)):
adaptors.append(model_case.adaptors[i])
i = (i + 1) % len(model_case.adaptors)
adaptor_names = [adaptor.name for adaptor in adaptors]
print(
f"\n========== Testing {test_tag} on base '{model_case.base}' with backend={backend}, dtype={torch_dtype} --- "
f"Using prompts {[p[:50] for p in prompts]} with adaptors: {adaptor_names} ---"
)
with SRTRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=model_case.tp_size,
lora_paths=[adaptor.name for adaptor in model_case.adaptors],
max_loras_per_batch=model_case.max_loras_per_batch,
lora_backend=backend,
disable_cuda_graph=disable_cuda_graph,
disable_radix_cache=disable_radix_cache,
mem_fraction_static=mem_fraction_static,
) as srt_runner:
srt_outputs = srt_runner.forward(
prompts, max_new_tokens=max_new_tokens, lora_paths=adaptor_names
)
with SRTRunner(
base_path,
torch_dtype=torch_dtype,
model_type="generation",
tp_size=model_case.tp_size,
mem_fraction_static=mem_fraction_static,
) as srt_runner:
srt_no_lora_outputs = srt_runner.forward(prompts, max_new_tokens=max_new_tokens)
with HFRunner(
base_path, torch_dtype=torch_dtype, model_type="generation"
) as hf_runner:
hf_outputs = hf_runner.forward(
prompts, max_new_tokens=max_new_tokens, lora_paths=adaptor_names
)
hf_no_lora_outputs = hf_runner.forward(prompts, max_new_tokens=max_new_tokens)
# Compare prefill stage logprobs (HF vs SRTRunner with LoRA)
for i in range(len(prompts)):
adaptor = adaptors[i]
# Use individual adaptor tolerances if set, otherwise use model defaults
prefill_tol = (
adaptor.prefill_tolerance
if adaptor.prefill_tolerance is not None
else model_case.prefill_tolerance
)
decode_tol = (
adaptor.decode_tolerance
if adaptor.decode_tolerance is not None
else model_case.decode_tolerance
)
rouge_tol = (
adaptor.rouge_l_tolerance
if adaptor.rouge_l_tolerance is not None
else model_case.rouge_l_tolerance
)
# Compare prefill stage logprobs (HF vs SRTRunner with LoRA)
hf_prefill = torch.tensor(hf_outputs.top_input_logprobs[i])
srt_prefill = torch.tensor(srt_outputs.top_input_logprobs[i])
max_prefill_diff = torch.max(torch.abs(hf_prefill - srt_prefill))
print("Max prefill diff (HF vs SRT):", max_prefill_diff)
# Compare decode stage logprobs
hf_decode = torch.tensor(hf_outputs.top_output_logprobs[i])
srt_decode = torch.tensor(srt_outputs.top_output_logprobs[i])
max_decode_diff = torch.max(torch.abs(hf_decode - srt_decode))
print("Max decode diff (HF vs SRT):", max_decode_diff)
srt_output_str = srt_outputs.output_strs[i].strip()
hf_output_str = hf_outputs.output_strs[i].strip()
rouge_score = calculate_rouge_l([srt_output_str], [hf_output_str])[0]
print("ROUGE-L score:", rouge_score)
print("SRT output:", srt_output_str)
print("HF output:", hf_output_str)
# Additional: compare prefill outputs between base model (no LoRA) and LoRA model for reference
hf_no_lora_prefill = torch.tensor(hf_no_lora_outputs.top_input_logprobs[i])
srt_no_lora_prefill = torch.tensor(srt_no_lora_outputs.top_input_logprobs[i])
print(
"Max diff (SRT base vs SRT LoRA prefill):",
torch.max(torch.abs(srt_no_lora_prefill - srt_prefill)),
)
print(
"Max diff (HF base vs HF LoRA prefill):",
torch.max(torch.abs(hf_no_lora_prefill - hf_prefill)),
)
if hf_prefill.shape[0] <= 100:
assert torch.all(torch.abs(hf_prefill - srt_prefill) < prefill_tol), (
f"Prefill logprobs mismatch for base '{base_path}', adaptor '{adaptor_names}', "
f"backend '{backend}', prompt: '{prompts[0][:50]}...'"
)
if hf_decode.shape[0] <= 100:
assert torch.all(torch.abs(hf_decode - srt_decode) < decode_tol), (
f"Decode logprobs mismatch for base '{base_path}', adaptor '{adaptor_names}', "
f"backend '{backend}', prompt: '{prompts[0][:50]}...'"
)
if rouge_score < rouge_tol:
raise AssertionError(
f"ROUGE-L score {rouge_score} below tolerance {rouge_tol} "
f"for base '{base_path}', adaptor '{adaptor_names}', backend '{backend}', prompt: '{prompts[0][:50]}...'"
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment