interface.py 2.77 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project

from enum import Enum

import torch
from vllm.platforms import Platform


class OmniPlatformEnum(Enum):
    """Enum for supported Omni platforms."""

    CUDA = "cuda"
    ROCM = "rocm"
    NPU = "npu"
    XPU = "xpu"
    UNSPECIFIED = "unspecified"


class OmniPlatform(Platform):
    """
    Abstract base class for vllm-omni Platform.

    Inherits from vLLM's Platform and adds Omni-specific interfaces.
    This gives OmniPlatform all vLLM Platform capabilities plus
    Omni-specific methods.
    """

    _omni_enum: OmniPlatformEnum

    def is_npu(self) -> bool:
        return self._omni_enum == OmniPlatformEnum.NPU

    def is_xpu(self) -> bool:
        return self._omni_enum == OmniPlatformEnum.XPU

    def is_cuda(self) -> bool:
        return self._omni_enum == OmniPlatformEnum.CUDA

    def is_rocm(self) -> bool:
        return self._omni_enum == OmniPlatformEnum.ROCM

    @classmethod
    def get_omni_ar_worker_cls(cls) -> str:
        raise NotImplementedError

    @classmethod
    def get_omni_generation_worker_cls(cls) -> str:
        raise NotImplementedError

    @classmethod
    def get_default_stage_config_path(cls) -> str:
        raise NotImplementedError

    @classmethod
    def get_diffusion_attn_backend_cls(
        cls,
        selected_backend: str | None,
        head_size: int,
    ) -> str:
        """Get the diffusion attention backend class path for this platform.

        This method selects the appropriate attention backend for diffusion
        models based on platform capabilities and user preferences.

        Args:
            selected_backend: User-selected backend name (e.g., "FLASH_ATTN",
                "TORCH_SDPA", "SAGE_ATTN"). If None, uses platform default.
            head_size: Attention head size.

        Returns:
            Fully qualified class path of the selected backend.
        """
        raise NotImplementedError

    @classmethod
    def supports_torch_inductor(cls) -> bool:
        """Check if the platform supports torch.compile with inductor backend."""
        raise NotImplementedError

    @classmethod
    def get_torch_device(cls, local_rank: int | None = None) -> torch.device:
        raise NotImplementedError

    @classmethod
    def get_device_count(cls) -> int:
        raise NotImplementedError

    @classmethod
    def get_device_version(cls) -> str | None:
        raise NotImplementedError

    @classmethod
    def synchronize(cls) -> None:
        raise NotImplementedError

    @classmethod
    def get_free_memory(cls, device: torch.device | None = None) -> int:
        raise NotImplementedError


class UnspecifiedOmniPlatform(OmniPlatform):
    _omni_enum = OmniPlatformEnum.UNSPECIFIED
    device_type = ""