inference_request.py 739 Bytes
Newer Older
liangjing's avatar
liangjing committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from dataclasses import dataclass
from enum import Enum
from typing import List

import torch

from megatron.core.inference.common_inference_params import CommonInferenceParams


# class syntax
class Status(Enum):
    WAITING_IN_QUEUE = 1
    ACTIVE_AND_GENERATING_TOKENS = 2
    ACTIVE_BUT_NOT_GENERATING_TOKENS = 3
    COMPLETED = 4


@dataclass
class InferenceRequest:
    request_id: str
    prompt: str
    inference_parameters: CommonInferenceParams
    prompt_tokens: List[int]
    arrival_time: float
    status: Status
    generated_text: str = None
    generated_tokens: torch.Tensor = None
    generated_log_probs: torch.Tensor = None
    generated_length: int = 0