ray_utils.py 4.52 KB
Newer Older
1
import socket
2
3
4
from typing import Optional, Tuple, TYPE_CHECKING

from vllm.config import ParallelConfig
5
6
7
from vllm.logger import init_logger

logger = init_logger(__name__)
8
9
10

try:
    import ray
11
12
13
14
15
16
    from ray.air.util.torch_dist import TorchDistributedWorker

    class RayWorker(TorchDistributedWorker):
        """Ray wrapper for vllm.worker.Worker, allowing Worker to be
        lazliy initialized after Ray sets CUDA_VISIBLE_DEVICES."""

17
18
19
20
21
        def __init__(self, init_cached_hf_modules=False) -> None:
            if init_cached_hf_modules:
                # pylint: disable=import-outside-toplevel
                from transformers.dynamic_module_utils import init_hf_modules
                init_hf_modules()
22
23
24
25
26
27
28
29
30
31
32
33
            self.worker = None

        def init_worker(self, worker_init_fn):
            self.worker = worker_init_fn()

        def __getattr__(self, name):
            return getattr(self.worker, name)

        def execute_method(self, method, *args, **kwargs):
            executor = getattr(self, method)
            return executor(*args, **kwargs)

34
35
36
37
except ImportError as e:
    logger.warning(f"Failed to import Ray with {e!r}. "
                   "For distributed inference, please install Ray with "
                   "`pip install ray pandas pyarrow`.")
38
    ray = None
39
    TorchDistributedWorker = None
40
    RayWorker = None  # pylint: disable=invalid-name
41

42
43
if TYPE_CHECKING:
    from ray.util.placement_group import PlacementGroup
44
45


46
47
48
49
50
51
def get_open_port():
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        s.bind(("", 0))
        return s.getsockname()[1]


52
53
def initialize_cluster(
    parallel_config: ParallelConfig,
Zhuohan Li's avatar
Zhuohan Li committed
54
55
    engine_use_ray: bool = False,
    ray_address: Optional[str] = None,
56
) -> Tuple[str, Optional["PlacementGroup"]]:
57
58
59
60
    """Initialize the distributed cluster probably with Ray.

    Args:
        parallel_config: The configurations for parallel execution.
Zhuohan Li's avatar
Zhuohan Li committed
61
62
        engine_use_ray: Whether to use Ray for async engine.
        ray_address: The address of the Ray cluster. If None, uses
63
64
65
66
67
68
69
70
71
            the default Ray cluster address.

    Returns:
        A tuple of (`distributed_init_method`, `all_stage_devices`). The
        `distributed_init_method` is the address for initializing the
        distributed backend. `all_stage_devices` includes device IDs for
        each worker in each pipeline stage. Each device ID is a tuple of
        (rank, node resource, device id).
    """
Zhuohan Li's avatar
Zhuohan Li committed
72
    if parallel_config.worker_use_ray or engine_use_ray:
73
74
75
76
77
        if ray is None:
            raise ImportError(
                "Ray is not installed. Please install Ray to use distributed "
                "serving.")
        # Connect to a ray cluster.
78
        ray.init(address=ray_address, ignore_reinit_error=True)
79
80

    if not parallel_config.worker_use_ray:
81
        # Initialize cluster locally.
82
        port = get_open_port()
83
84
85
        # We need to setup the distributed init method to make sure
        # the distributed megatron code (e.g., get world size) works correctly.
        distributed_init_method = f"tcp://localhost:{port}"
86
87
88
89
90
91
92
93
94
        return distributed_init_method, None

    current_placement_group = ray.util.get_current_placement_group()
    if current_placement_group:
        # We are in a placement group
        bundles = current_placement_group.bundle_specs
        # Verify that we can use the placement group.
        gpu_bundles = 0
        for bundle in bundles:
95
96
97
98
99
            bundle_gpus = bundle.get("GPU", 0)
            if bundle_gpus > 1:
                raise ValueError(
                    "Placement group bundle cannot have more than 1 GPU.")
            if bundle_gpus:
100
101
                gpu_bundles += 1
        if parallel_config.world_size > gpu_bundles:
102
            raise ValueError(
103
104
                "The number of required GPUs exceeds the total number of "
                "available GPUs in the placement group.")
105
    else:
106
107
        num_gpus_in_cluster = ray.cluster_resources().get("GPU", 0)
        if parallel_config.world_size > num_gpus_in_cluster:
108
            raise ValueError(
109
110
111
112
113
114
115
116
117
118
119
120
                "The number of required GPUs exceeds the total number of "
                "available GPUs in the cluster.")
        # Create a new placement group
        current_placement_group = ray.util.placement_group([{
            "GPU": 1
        }] * parallel_config.world_size)
        # Wait until PG is ready - this will block until all
        # requested resources are available, and will timeout
        # if they cannot be provisioned.
        ray.get(current_placement_group.ready(), timeout=1800)

    return None, current_placement_group