inference_request.py 1.38 KB
Newer Older
xingjinliang's avatar
xingjinliang committed
1
2
3
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from dataclasses import dataclass
from enum import Enum
wangxj's avatar
wangxj committed
4
from typing import List, Optional
xingjinliang's avatar
xingjinliang committed
5
6
7

import torch

wangxj's avatar
wangxj committed
8
from megatron.core.inference.sampling_params import SamplingParams
xingjinliang's avatar
xingjinliang committed
9
10
11
12
13
14
15
16
17
18
19
20


# class syntax
class Status(Enum):
    """Enum for status"""

    WAITING_IN_QUEUE = 1
    ACTIVE_AND_GENERATING_TOKENS = 2
    ACTIVE_BUT_NOT_GENERATING_TOKENS = 3
    COMPLETED = 4


wangxj's avatar
wangxj committed
21
@dataclass(kw_only=True)
xingjinliang's avatar
xingjinliang committed
22
23
24
25
26
27
28
29
30
class InferenceRequest:
    """Class for one inference request

    Containing relevant data for an inference request

    """

    request_id: str
    prompt: str
wangxj's avatar
wangxj committed
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
    inference_parameters: Optional[SamplingParams] = None
    prompt_tokens: Optional[List[int]] = None
    arrival_time: Optional[float] = None
    status: Optional[Status] = None
    encoder_prompt: Optional[str] = None
    generated_text: Optional[str] = None
    segments: Optional[List[str]] = None
    generated_segments: Optional[List[str]] = None
    generated_sequence_lengths: Optional[List[int]] = None
    generated_tokens: Optional[torch.Tensor] = None
    generated_log_probs: Optional[torch.Tensor] = None
    generated_length: Optional[int] = None


@dataclass(kw_only=True)
class VLMInferenceRequest(InferenceRequest):
    """Class for a VLM inference request"""

    num_img_embeddings_per_tile: int
    imgs: torch.Tensor
    num_tiles: torch.Tensor
    decoder_seq_length: int