Unverified Commit bc92107b authored by tianlian yi's avatar tianlian yi Committed by GitHub
Browse files

Support server based rollout in Verlengine (#4848)


Co-authored-by: default avatarJin Pan <jpan236@wisc.edu>
Co-authored-by: default avatarChayenne <zhaochen20@outlook.com>
Co-authored-by: default avatarJinn <47354855+jhinpan@users.noreply.github.com>
parent 3e4794aa
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Tuple, Union
import torch
class EngineBase(ABC):
"""
Abstract base class for engine interfaces that support generation, weight updating, and memory control.
This base class provides a unified API for both HTTP-based engines and engines.
"""
@abstractmethod
def generate(
self,
prompt: Optional[Union[List[str], str]] = None,
sampling_params: Optional[Union[List[Dict], Dict]] = None,
input_ids: Optional[Union[List[List[int]], List[int]]] = None,
image_data: Optional[Union[List[str], str]] = None,
return_logprob: Optional[Union[List[bool], bool]] = False,
logprob_start_len: Optional[Union[List[int], int]] = None,
top_logprobs_num: Optional[Union[List[int], int]] = None,
token_ids_logprob: Optional[Union[List[List[int]], List[int]]] = None,
lora_path: Optional[Union[List[Optional[str]], Optional[str]]] = None,
custom_logit_processor: Optional[Union[List[str], str]] = None,
) -> Union[Dict, Iterator[Dict]]:
"""Generate outputs based on given inputs."""
pass
@abstractmethod
def update_weights_from_tensor(
self,
named_tensors: List[Tuple[str, torch.Tensor]],
load_format: Optional[str] = None,
flush_cache: bool = True,
):
"""Update model weights with in-memory tensor data."""
pass
@abstractmethod
def release_memory_occupation(self):
"""Release GPU memory occupation temporarily."""
pass
@abstractmethod
def resume_memory_occupation(self):
"""Resume GPU memory occupation which is previously released."""
pass
@abstractmethod
def shutdown(self):
"""Shutdown the engine and clean up resources."""
pass
......@@ -38,6 +38,7 @@ import torch
import uvloop
from sglang.srt.code_completion_parser import load_completion_template_for_openai_api
from sglang.srt.entrypoints.EngineBase import EngineBase
from sglang.srt.managers.data_parallel_controller import (
run_data_parallel_controller_process,
)
......@@ -78,7 +79,7 @@ logger = logging.getLogger(__name__)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class Engine:
class Engine(EngineBase):
"""
The entry point to the inference engine.
......
......@@ -25,8 +25,11 @@ import multiprocessing as multiprocessing
import os
import threading
import time
from ast import Mult
from http import HTTPStatus
from typing import AsyncIterator, Callable, Dict, Optional
from typing import AsyncIterator, Callable, Dict, Optional, Union
from sglang.srt.model_executor.model_runner import LocalSerializedTensor
# Fix a bug of Python threading
setattr(threading, "_register_atexit", lambda *args, **kwargs: None)
......@@ -60,6 +63,7 @@ from sglang.srt.managers.io_struct import (
SetInternalStateReq,
UpdateWeightFromDiskReqInput,
UpdateWeightsFromDistributedReqInput,
UpdateWeightsFromTensorReqInput,
VertexGenerateReqInput,
)
from sglang.srt.managers.tokenizer_manager import TokenizerManager
......@@ -80,6 +84,7 @@ from sglang.srt.openai_api.protocol import ModelCard, ModelList
from sglang.srt.reasoning_parser import ReasoningParser
from sglang.srt.server_args import ServerArgs
from sglang.srt.utils import (
MultiprocessingSerializer,
add_api_key_middleware,
add_prometheus_middleware,
delete_directory,
......@@ -411,6 +416,26 @@ async def init_weights_update_group(
return ORJSONResponse(content, status_code=HTTPStatus.BAD_REQUEST)
@app.post("/update_weights_from_tensor")
async def update_weights_from_tensor(
obj: UpdateWeightsFromTensorReqInput, request: Request
):
"""Update the weights from tensor inplace without re-launching the server.
Notes:
1. Ensure that the model is on the correct device (e.g., GPU) before calling this endpoint. If the model is moved to the CPU unexpectedly, it may cause performance issues or runtime errors.
2. HTTP will transmit only the metadata of the tensor, while the tensor itself will be directly copied to the model.
3. Any binary data in the named tensors should be base64 encoded.
"""
success, message = await _global_state.tokenizer_manager.update_weights_from_tensor(
obj, request
)
content = {"success": success, "message": message}
return ORJSONResponse(
content, status_code=200 if success else HTTPStatus.BAD_REQUEST
)
@app.post("/update_weights_from_distributed")
async def update_weights_from_distributed(
obj: UpdateWeightsFromDistributedReqInput, request: Request
......
import base64
import copy
import dataclasses
import multiprocessing
import pickle
import threading
import time
from typing import Any, Dict, List, Optional, Tuple, Union
import requests
import torch
import torch.distributed as dist
from sglang.srt.entrypoints.EngineBase import EngineBase
from sglang.srt.entrypoints.http_server import launch_server
from sglang.srt.server_args import ServerArgs
from sglang.srt.utils import MultiprocessingSerializer, kill_process_tree
def launch_server_process(server_args: ServerArgs) -> multiprocessing.Process:
p = multiprocessing.Process(target=launch_server, args=(server_args,))
p.start()
base_url = server_args.url()
timeout = 300.0 # Increased timeout to 5 minutes for downloading large models
start_time = time.time()
with requests.Session() as session:
while time.time() - start_time < timeout:
try:
headers = {
"Content-Type": "application/json; charset=utf-8",
"Authorization": f"Bearer {server_args.api_key}",
}
response = session.get(f"{base_url}/health_generate", headers=headers)
if response.status_code == 200:
return p
except requests.RequestException:
pass
if not p.is_alive():
raise Exception("Server process terminated unexpectedly.")
time.sleep(2)
p.terminate()
raise TimeoutError("Server failed to start within the timeout period.")
class HttpServerEngineAdapter(EngineBase):
"""
You can use this class to launch a server from a VerlEngine instance.
We recommend using this class only you need to use http server.
Otherwise, you can use Engine directly.
"""
def __init__(self, **kwargs):
self.server_args = ServerArgs(**kwargs)
print(
f"Launch HttpServerEngineAdapter at: {self.server_args.host}:{self.server_args.port}"
)
self.process = launch_server_process(self.server_args)
def _make_request(self, endpoint: str, payload: Optional[dict] = None):
"""Make a POST request to the specified endpoint with the given payload.
Args:
endpoint: The API endpoint to call
payload: The JSON payload to send (default: empty dict)
Returns:
The JSON response from the server
"""
url = f"http://{self.server_args.host}:{self.server_args.port}/{endpoint}"
response = requests.post(url, json=payload or {})
response.raise_for_status()
return response.json()
def update_weights_from_tensor(
self,
named_tensors: List[Tuple[str, torch.Tensor]],
load_format: Optional[str] = None,
flush_cache: bool = False,
):
"""
Update model weights from tensor data. The HTTP server will only post meta data, and the real weights will be copied directly from GPUs.
Note: The model should be on GPUs rather than CPU for this functionality to work properly.
If you encounter issues, ensure your model is loaded on GPU devices rather than CPU.
"""
return self._make_request(
"update_weights_from_tensor",
{
"serialized_named_tensors": [
MultiprocessingSerializer.serialize(named_tensors, output_str=True)
for _ in range(self.server_args.tp_size)
],
"load_format": load_format,
"flush_cache": flush_cache,
},
)
def shutdown(self):
kill_process_tree(self.process.pid)
def generate(
self,
prompt=None,
sampling_params=None,
input_ids=None,
image_data=None,
return_logprob=False,
logprob_start_len=None,
top_logprobs_num=None,
token_ids_logprob=None,
lora_path=None,
custom_logit_processor=None,
):
payload = {
"text": prompt,
"sampling_params": sampling_params,
"input_ids": input_ids,
"image_data": image_data,
"return_logprob": return_logprob,
"logprob_start_len": logprob_start_len,
"top_logprobs_num": top_logprobs_num,
"token_ids_logprob": token_ids_logprob,
"lora_path": lora_path,
"custom_logit_processor": custom_logit_processor,
}
# Filter out None values
payload = {k: v for k, v in payload.items() if v is not None}
return self._make_request("generate", payload)
def release_memory_occupation(self):
return self._make_request("release_memory_occupation")
def resume_memory_occupation(self):
return self._make_request("resume_memory_occupation")
......@@ -12,16 +12,18 @@
# limitations under the License.
# ==============================================================================
import os
from typing import Dict, List, Optional, Tuple, Union
from typing import Dict, List, Literal, Optional, Tuple, Union
import torch
import torch.distributed as dist
from PIL.Image import Image
from torch.distributed.tensor import DeviceMesh, DTensor
from sglang.srt.entrypoints.http_server_engine import HttpServerEngineAdapter
from sglang.srt.model_executor.model_runner import LocalSerializedTensor
from sglang.srt.patch_torch import monkey_patch_torch_reductions
from sglang.srt.server import Engine
from sglang.srt.server_args import PortArgs, ServerArgs
from sglang.srt.utils import MultiprocessingSerializer, broadcast_pyobj
......@@ -30,6 +32,7 @@ class VerlEngine:
self,
device_mesh_cpu: DeviceMesh,
nnodes: int = 1,
backend: Literal["engine", "server"] = "engine",
**kwargs,
):
monkey_patch_torch_reductions()
......@@ -40,13 +43,25 @@ class VerlEngine:
node_rank = self._tp_rank // tp_size_per_node
first_rank_in_node = self._tp_rank % tp_size_per_node == 0
if first_rank_in_node:
os.environ["SGLANG_BLOCK_NONZERO_RANK_CHILDREN"] = "0"
self._engine = Engine(
**kwargs, tp_size=self._tp_size, node_rank=node_rank, nnodes=nnodes
)
# Common engine keyword arguments
engine_kwargs = dict(
**kwargs, tp_size=self._tp_size, node_rank=node_rank, nnodes=nnodes
)
if backend == "engine":
if first_rank_in_node:
os.environ["SGLANG_BLOCK_NONZERO_RANK_CHILDREN"] = "0"
self._engine = Engine(**engine_kwargs)
else:
self._engine = None
elif backend == "server":
if self._tp_rank == 0:
self._engine = HttpServerEngineAdapter(**engine_kwargs)
else:
self._engine = None
else:
self._engine = None
raise ValueError(f"Unsupported backend: {backend}")
dist.barrier(group=self._device_mesh_cpu.get_group())
......
......@@ -700,10 +700,17 @@ class UpdateWeightsFromDistributedReqOutput:
@dataclass
class UpdateWeightsFromTensorReqInput:
# List containing one serialized Dict[str, torch.Tensor] per TP worker
serialized_named_tensors: List[bytes]
load_format: Optional[str]
flush_cache: bool
"""Update model weights from tensor input.
- Tensors are serialized for transmission
- Data is structured in JSON for easy transmission over HTTP
"""
serialized_named_tensors: List[Union[str, bytes]]
# Optional format specification for loading
load_format: Optional[str] = None
# Whether to flush the cache after updating weights
flush_cache: bool = True
@dataclass
......
......@@ -1480,14 +1480,43 @@ def permute_weight(x: torch.Tensor) -> torch.Tensor:
class MultiprocessingSerializer:
@staticmethod
def serialize(obj):
def serialize(obj, output_str: bool = False):
"""
Serialize a Python object using ForkingPickler.
Args:
obj: The object to serialize.
output_str (bool): If True, return a base64-encoded string instead of raw bytes.
Returns:
bytes or str: The serialized object.
"""
buf = io.BytesIO()
ForkingPickler(buf).dump(obj)
buf.seek(0)
return buf.read()
output = buf.read()
if output_str:
# Convert bytes to base64-encoded string
output = base64.b64encode(output).decode("utf-8")
return output
@staticmethod
def deserialize(data):
"""
Deserialize a previously serialized object.
Args:
data (bytes or str): The serialized data, optionally base64-encoded.
Returns:
The deserialized Python object.
"""
if isinstance(data, str):
# Decode base64 string to bytes
data = base64.b64decode(data)
return ForkingPickler.loads(data)
......
......@@ -25,7 +25,12 @@ from sglang.bench_serving import run_benchmark
from sglang.global_config import global_config
from sglang.lang.backend.openai import OpenAI
from sglang.lang.backend.runtime_endpoint import RuntimeEndpoint
from sglang.srt.utils import get_bool_env_var, kill_process_tree, retry
from sglang.srt.utils import (
get_bool_env_var,
is_port_available,
kill_process_tree,
retry,
)
from sglang.test.run_eval import run_eval
from sglang.utils import get_exception_traceback
......@@ -98,6 +103,17 @@ def call_generate_lightllm(prompt, temperature, max_tokens, stop=None, url=None)
return pred
def find_available_port(base_port: int):
port = base_port + random.randint(100, 1000)
while True:
if is_port_available(port):
return port
if port < 60000:
port += 42
else:
port -= 43
def call_generate_vllm(prompt, temperature, max_tokens, stop=None, n=1, url=None):
assert url is not None
......
......@@ -27,7 +27,7 @@ from sglang.test.runners import (
check_close_model_outputs,
get_dtype_str,
)
from sglang.test.test_utils import CustomTestCase, is_in_ci
from sglang.test.test_utils import CustomTestCase, find_available_port, is_in_ci
_MAX_NEW_TOKENS = 8
_PROMPTS = ["1+1=2, 1+2=3, 1+3=4, 1+4=5, 1+5=", "1*1=1, 1*2=2, 1*3=3, 1*4=4, 1*5="]
......@@ -282,17 +282,5 @@ def _get_fsdp_state_dict(hf_model, tp_size: int):
return fsdp_model.state_dict()
# TODO Ask: this is extracted from PortArgs.init_new, is it allowed to extract it, i.e. touch that old code
def find_available_port(base_port: int):
port = base_port + random.randint(100, 1000)
while True:
if is_port_available(port):
return port
if port < 60000:
port += 42
else:
port -= 43
if __name__ == "__main__":
unittest.main()
import multiprocessing
import multiprocessing as mp
import os
import random
import time
import traceback
import unittest
from multiprocessing import Process
import requests
import torch
from openai import OpenAI
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.fsdp import CPUOffload
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision
from torch.distributed.fsdp.api import (
ShardedStateDictConfig,
ShardingStrategy,
StateDictType,
)
from transformers import AutoModelForCausalLM
from sglang.srt.entrypoints.verl_engine import VerlEngine
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.server_args import ServerArgs
from sglang.srt.utils import is_port_available
from sglang.test.runners import (
HFRunner,
SRTRunner,
check_close_model_outputs,
get_dtype_str,
)
from sglang.test.test_utils import CustomTestCase, find_available_port, is_in_ci
_MAX_NEW_TOKENS = 8
_PROMPTS = ["1+1=2, 1+2=3, 1+3=4, 1+4=5, 1+5=", "1*1=1, 1*2=2, 1*3=3, 1*4=4, 1*5="]
_TORCH_DTYPE = torch.float16
# Set to false to temporarily debug issues unrelated to weight update
_ENABLE_UPDATE_WEIGHTS = True
CI_MODELS = [
dict(model_path="meta-llama/Llama-3.1-8B-Instruct"),
# Fail to run gemma-2-2b after transformers==4.48.3 -> 4.50.0
# dict(model_path="google/gemma-2-2b"),
]
ALL_OTHER_MODELS = [
dict(model_path="meta-llama/Llama-3.2-1B-Instruct", tp_size=1),
dict(model_path="Qwen/Qwen2-1.5B"),
# dict(
# model_path="Qwen/Qwen2.5-14B-Instruct",
# mem_fraction_static=0.4,
# tp_size=8,
# tight_memory=True,
# decode_tolerance=1.3,
# ), # test_generation_models.py same config (qwen + tp=8) gives 1.22 decode error
dict(model_path="HuggingFaceTB/SmolLM-135M-Instruct", tp_size=3),
# dict(model_path="allenai/OLMo-1B-0724-hf"),
# dict(
# model_path="THUDM/glm-4-9b-chat",
# mem_fraction_static=0.1,
# tp_size=8,
# tight_memory=True,
# ),
# dict(model_path="allenai/OLMo-2-1124-7B-Instruct"),
# dict(
# model_path="ibm-granite/granite-3.0-2b-instruct",
# prefill_tolerance=0.22,
# decode_tolerance=0.22,
# ),
]
# This port is used for HTTP API communication with the VerlEngine server
# It handles client requests for text generation, weight updates, and memory management
# This port must be available and not used by other processes
PORT = find_available_port(2345)
# Master port is used for PyTorch's distributed communication setup
# It enables tensor-parallel processes to communicate with each other
# Default is 23456, but we find an available port dynamically in assert_fragment_e2e_execution
# This port is critical for torch.distributed.init_process_group to function properly
# Each test needs a unique master_port to avoid conflicts between parallel test executions
# master_port = find_available_port(23456) # This is set in assert_fragment_e2e_execution method
class TestVerlEngine(CustomTestCase):
@classmethod
def setUpClass(cls):
multiprocessing.set_start_method("spawn")
def assert_fragment_e2e_execution(
self,
index: int,
model_path: str,
mem_fraction_static: float = 0.4,
tp_size: int = 2,
tight_memory: bool = False,
prefill_tolerance: float = 0.1,
decode_tolerance: float = 0.1,
):
"""
Tests VerlEngine with tensor parallelism across multiple processes.
Spawns tp_size processes to test distributed execution, including:
- Model inference via direct API and HTTP server
- Weight updating functionality
- Memory management (release/resume)
The test validates output correctness against a reference implementation
within specified tolerance bounds.
Parameters:
-----------
index: int - Test index for logging
model_path: str - HuggingFace model identifier
mem_fraction_static: float - Memory fraction for static tensors
tp_size: int - Number of tensor parallel processes
tight_memory: bool - Enable memory optimization
prefill_tolerance: float - Max error for prefill computation
decode_tolerance: float - Max error for decoding computation
"""
master_port = find_available_port(23456)
print(f"assert_fragment_e2e_execution START {index=} {model_path=}")
processes = []
output_reader, output_writer = mp.Pipe(duplex=False)
for tp_rank in range(tp_size):
p = Process(
target=_run_subprocess,
kwargs=dict(
tp_rank=tp_rank,
tp_size=tp_size,
master_port=master_port,
output_writer=output_writer,
model_path=model_path,
mem_fraction_static=mem_fraction_static,
tight_memory=tight_memory,
prefill_tolerance=prefill_tolerance,
decode_tolerance=decode_tolerance,
),
)
p.start()
processes.append(p)
for _ in range(tp_size):
self.assertTrue(
output_reader.recv(),
f"Subprocess has error, please see logs above. ({index=} {model_path=})",
)
for p in processes:
p.join()
def test_models(self):
"""
Orchestrates end-to-end testing across configured model sets.
In CI environments: Randomly selects one model for faster testing.
In development: Tests all configured models for comprehensive validation.
Each model configuration specifies model path, memory settings,
tensor-parallel size, and error tolerance bounds.
"""
test_models = ALL_OTHER_MODELS
if is_in_ci():
# Randomly select one model in CI for faster testing
test_models = [random.choice(ALL_OTHER_MODELS)]
# Test all models in development environment
print(f"Development environment: Testing all {len(ALL_OTHER_MODELS)} models")
for index, model_info in enumerate(test_models):
self.assert_fragment_e2e_execution(index=index, **model_info)
def _run_subprocess(
tp_rank: int,
tp_size: int,
master_port: int,
output_writer,
model_path: str,
mem_fraction_static: float,
tight_memory: bool,
prefill_tolerance: float,
decode_tolerance: float,
):
"""
Executes a single tensor-parallel process for testing VerlEngine.
Performs the core test operations:
1. Initializes distributed environment
2. Loads HuggingFace model for reference
3. Tests VerlEngine API (generation, memory management, weight updates)
4. Tests OpenAI-compatible endpoints on rank 0
Reports success/failure via output_writer pipe.
Parameters:
tp_rank: int - Process rank in tensor parallel group
tp_size: int - Total processes in tensor parallel group
master_port: int - Port for distributed communication
output_writer - Pipe for result communication
model_path: str - HuggingFace model identifier
mem_fraction_static: float - Static memory allocation fraction
tight_memory: bool - Memory optimization flag
prefill_tolerance: float - Acceptable prefill error
decode_tolerance: float - Acceptable decode error
"""
try:
print(f"subprocess[{tp_rank=}] Start {os.environ.get('CUDA_VISIBLE_DEVICES')=}")
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(master_port)
torch.distributed.init_process_group(rank=tp_rank, world_size=tp_size)
torch.cuda.set_device(tp_rank)
mesh_kwargs = dict(mesh_shape=(tp_size, 1), mesh_dim_names=["tp", "pp"])
inference_device_mesh_device = init_device_mesh("cuda", **mesh_kwargs)
inference_device_mesh_cpu = init_device_mesh("cpu", **mesh_kwargs)
# Print basic information about this subprocess including:
# - Current tensor-parallel rank
# - Device mesh configuration for both CUDA and CPU
# - This subprocess's role in testing tensor-parallel execution
# - How it contributes to the distributed model testing
print(
f"subprocess[{tp_rank=}] initialized for VerlEngine testing - "
f"Role: Shard {tp_rank+1}/{tp_size} of tensor-parallel model execution | "
f"Device meshes: CUDA={inference_device_mesh_device}, CPU={inference_device_mesh_cpu}"
)
# hf model is used for comparison
hf_model = AutoModelForCausalLM.from_pretrained(
model_path, torch_dtype=_TORCH_DTYPE, trust_remote_code=True
).cuda()
hf_tokenizer = get_tokenizer(model_path, trust_remote_code=True)
hf_outputs = HFRunner.forward_generation_raw(
base_model=hf_model,
prompts=_PROMPTS,
max_new_tokens=_MAX_NEW_TOKENS,
tokenizer=hf_tokenizer,
lora_paths=None,
torch_dtype=_TORCH_DTYPE,
output_str_only=False,
)
if _ENABLE_UPDATE_WEIGHTS:
if tight_memory:
# If tight_memory is True, we need to move the model to CPU to save memory
hf_model.cpu()
torch.cuda.empty_cache()
# test update weights
print(f"subprocess[{tp_rank=}] get_fsdp_state_dict", flush=True)
fsdp_state_dict = _get_fsdp_state_dict(hf_model=hf_model, tp_size=tp_size)
engine = VerlEngine(
model_path=model_path,
load_format="dummy" if _ENABLE_UPDATE_WEIGHTS else "auto",
mem_fraction_static=mem_fraction_static,
random_seed=42,
trust_remote_code=True,
dtype=get_dtype_str(_TORCH_DTYPE),
device_mesh_cpu=inference_device_mesh_cpu["tp"],
backend="server",
enable_memory_saver=True,
port=PORT,
)
# test direct generate API with multiple different requests
print(
f"subprocess[{tp_rank=}] testing direct generate API with multiple requests"
)
# Request 1: Basic generation with temperature
print(f"subprocess[{tp_rank=}] test request 1: Basic generation")
direct_response = engine.generate(
prompt="Hello, world!",
sampling_params={"temperature": 0.7, "max_new_tokens": 20},
)
print(f"Response 1: {direct_response}")
# Request 2: Zero temperature (greedy) generation
print(f"subprocess[{tp_rank=}] test request 2: Greedy generation")
direct_response = engine.generate(
prompt="Complete this sequence: 1, 2, 3,",
sampling_params={"temperature": 0.0, "max_new_tokens": 10},
)
print(f"Response 2: {direct_response}")
# Request 3: Batch generation
print(f"subprocess[{tp_rank=}] test request 3: Batch generation")
batch_response = engine.generate(
prompt=["Translate 'hello' to French:", "Translate 'goodbye' to Spanish:"],
sampling_params={"temperature": 0.8, "max_new_tokens": 15},
)
print(f"Response 3: {batch_response}")
# test memory occupation APIs
print(f"subprocess[{tp_rank=}] testing memory occupation APIs")
engine.release_memory_occupation()
print("Memory released")
# time.sleep(1)
engine.resume_memory_occupation()
print("Memory resumed")
# openai API test for reference
torch.distributed.barrier()
if tp_rank == 0:
client = OpenAI(api_key="None", base_url=f"http://localhost:{PORT}/v1")
print(client.models.list().data[0].id)
# Multiple HTTP API requests
print("Testing HTTP API with multiple requests")
# Request 1
url = f"http://localhost:{PORT}/generate"
data = {"text": "1*1=1, 1*2=2, 1*3=3, 1*4=4, 1*5="}
response = requests.post(url, json=data)
print(f"HTTP Response 1: {response.json()}")
# Request 2
data = {
"text": "The capital of France is",
"sampling_params": {"temperature": 0.2},
}
response = requests.post(url, json=data)
print(f"HTTP Response 2: {response.json()}")
# Request 3
data = {
"text": "List three colors:",
"sampling_params": {"top_p": 0.95, "max_new_tokens": 25},
}
response = requests.post(url, json=data)
print(f"HTTP Response 3: {response.json()}")
if _ENABLE_UPDATE_WEIGHTS:
print(f"subprocess[{tp_rank=}] call update_weights_from_tensor", flush=True)
engine.update_weights_from_tensor(
[(k, v) for k, v in fsdp_state_dict.items()]
)
# Final generation test after weight update
print(f"subprocess[{tp_rank=}] testing generation after weight update")
direct_response = engine.generate(
prompt="After weight update: Hello, world!",
sampling_params={"temperature": 0.7, "max_new_tokens": 20},
)
print(f"Post-update response: {direct_response}")
execution_ok = True
except Exception as e:
print(f"subprocess[{tp_rank=}] has error: {e}", flush=True)
traceback.print_exc()
execution_ok = False
output_writer.send(execution_ok)
output_writer.close()
engine.shutdown()
print(f"subprocess[{tp_rank=}] end", flush=True)
# Adapted from https://github.com/volcengine/verl/blob/main/tests/rollout/run_fsdp_vllm.py
def _get_fsdp_state_dict(hf_model, tp_size: int):
"""
Creates a sharded state dictionary for weight update testing.
Wraps the HuggingFace model with FSDP (FullyShardedDataParallel),
configures precision settings, and returns a sharded state dict
for testing VerlEngine's weight update capabilities.
Parameters:
hf_model - HuggingFace model to wrap
tp_size: int - Number of tensor-parallel shards
Returns:
dict - Sharded state dict for update_weights_from_tensor
"""
device_mesh = init_device_mesh(
"cuda", mesh_shape=(tp_size,), mesh_dim_names=["fsdp"]
)
mixed_precision = MixedPrecision(
param_dtype=torch.bfloat16,
reduce_dtype=torch.float32,
buffer_dtype=torch.float32,
)
fsdp_model = FSDP(
hf_model,
use_orig_params=True,
auto_wrap_policy=None,
device_id=torch.cuda.current_device(),
sharding_strategy=ShardingStrategy.FULL_SHARD,
mixed_precision=mixed_precision,
cpu_offload=CPUOffload(offload_params=False),
sync_module_states=False,
device_mesh=device_mesh,
)
print(f"{fsdp_model=}")
FSDP.set_state_dict_type(
fsdp_model,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig(),
)
return fsdp_model.state_dict()
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment