delta.rs 8.07 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
use super::{NvCreateChatCompletionRequest, NvCreateChatCompletionStreamResponse};
17
18
use crate::protocols::common;

19
/// Provides a method for generating a [`DeltaGenerator`] from a chat completion request.
20
impl NvCreateChatCompletionRequest {
21
22
23
24
    /// Creates a [`DeltaGenerator`] instance based on the chat completion request.
    ///
    /// # Returns
    /// * [`DeltaGenerator`] configured with model name and response options.
25
26
27
    pub fn response_generator(&self) -> DeltaGenerator {
        let options = DeltaGeneratorOptions {
            enable_usage: true,
Paul Hendricks's avatar
Paul Hendricks committed
28
            enable_logprobs: self.inner.logprobs.unwrap_or(false),
29
30
        };

Paul Hendricks's avatar
Paul Hendricks committed
31
        DeltaGenerator::new(self.inner.model.clone(), options)
32
33
34
    }
}

35
/// Configuration options for the [`DeltaGenerator`], controlling response behavior.
36
37
#[derive(Debug, Clone, Default)]
pub struct DeltaGeneratorOptions {
38
    /// Determines whether token usage statistics should be included in the response.
39
    pub enable_usage: bool,
40
    /// Determines whether log probabilities should be included in the response.
41
42
43
    pub enable_logprobs: bool,
}

44
/// Generates incremental chat completion responses in a streaming fashion.
45
46
#[derive(Debug, Clone)]
pub struct DeltaGenerator {
47
    /// Unique identifier for the chat completion session.
48
    id: String,
49
    /// Object type, representing a streamed chat completion response.
50
    object: String,
51
    /// Timestamp (Unix epoch) when the response was created.
Paul Hendricks's avatar
Paul Hendricks committed
52
    created: u32,
53
    /// Model name used for generating responses.
54
    model: String,
55
    /// Optional system fingerprint for version tracking.
56
    system_fingerprint: Option<String>,
57
    /// Optional service tier information for the response.
Paul Hendricks's avatar
Paul Hendricks committed
58
    service_tier: Option<async_openai::types::ServiceTierResponse>,
59
    /// Tracks token usage for the completion request.
Paul Hendricks's avatar
Paul Hendricks committed
60
    usage: async_openai::types::CompletionUsage,
61
    /// Counter tracking the number of messages issued.
62
    msg_counter: u64,
63
    /// Configuration options for response generation.
64
65
66
67
    options: DeltaGeneratorOptions,
}

impl DeltaGenerator {
68
69
70
71
72
73
74
75
    /// Creates a new [`DeltaGenerator`] instance with the specified model and options.
    ///
    /// # Arguments
    /// * `model` - The model name used for response generation.
    /// * `options` - Configuration options for enabling usage and log probabilities.
    ///
    /// # Returns
    /// * A new instance of [`DeltaGenerator`].
76
    pub fn new(model: String, options: DeltaGeneratorOptions) -> Self {
77
78
        // SAFETY: Casting from `u64` to `u32` could lead to precision loss after `u32::MAX`,
        // but this will not be an issue until 2106.
79
80
81
        let now = std::time::SystemTime::now()
            .duration_since(std::time::UNIX_EPOCH)
            .unwrap()
Paul Hendricks's avatar
Paul Hendricks committed
82
83
84
85
86
87
88
89
90
            .as_secs() as u32;

        let usage = async_openai::types::CompletionUsage {
            prompt_tokens: 0,
            completion_tokens: 0,
            total_tokens: 0,
            prompt_tokens_details: None,
            completion_tokens_details: None,
        };
91
92
93
94
95
96
97
98

        Self {
            id: format!("chatcmpl-{}", uuid::Uuid::new_v4()),
            object: "chat.completion.chunk".to_string(),
            created: now,
            model,
            system_fingerprint: None,
            service_tier: None,
Paul Hendricks's avatar
Paul Hendricks committed
99
            usage,
100
101
102
103
104
            msg_counter: 0,
            options,
        }
    }

105
106
107
108
    /// Updates the prompt token usage count.
    ///
    /// # Arguments
    /// * `isl` - The number of prompt tokens used.
Paul Hendricks's avatar
Paul Hendricks committed
109
    pub fn update_isl(&mut self, isl: u32) {
110
111
112
        self.usage.prompt_tokens = isl;
    }

113
114
115
116
117
118
119
120
121
122
    /// Creates a choice within a chat completion response.
    ///
    /// # Arguments
    /// * `index` - The index of the choice in the completion response.
    /// * `text` - The text content for the response.
    /// * `finish_reason` - The reason why the response finished (e.g., stop, length, etc.).
    /// * `logprobs` - Optional log probabilities of the generated tokens.
    ///
    /// # Returns
    /// * An [`async_openai::types::CreateChatCompletionStreamResponse`] instance representing the choice.
Paul Hendricks's avatar
Paul Hendricks committed
123
    #[allow(deprecated)]
124
125
    pub fn create_choice(
        &self,
Paul Hendricks's avatar
Paul Hendricks committed
126
        index: u32,
127
        text: Option<String>,
Paul Hendricks's avatar
Paul Hendricks committed
128
129
130
131
132
        finish_reason: Option<async_openai::types::FinishReason>,
        logprobs: Option<async_openai::types::ChatChoiceLogprobs>,
    ) -> async_openai::types::CreateChatCompletionStreamResponse {
        // TODO: Update for tool calling
        let delta = async_openai::types::ChatCompletionStreamResponseDelta {
133
            role: if self.msg_counter == 0 {
Paul Hendricks's avatar
Paul Hendricks committed
134
                Some(async_openai::types::Role::Assistant)
135
136
137
            } else {
                None
            },
Paul Hendricks's avatar
Paul Hendricks committed
138
            content: text,
139
            tool_calls: None,
Paul Hendricks's avatar
Paul Hendricks committed
140
141
            function_call: None,
            refusal: None,
142
143
        };

Paul Hendricks's avatar
Paul Hendricks committed
144
145
146
147
148
149
150
151
152
153
        let choice = async_openai::types::ChatChoiceStream {
            index,
            delta,
            finish_reason,
            logprobs,
        };

        let choices = vec![choice];

        async_openai::types::CreateChatCompletionStreamResponse {
154
155
156
157
158
            id: self.id.clone(),
            object: self.object.clone(),
            created: self.created,
            model: self.model.clone(),
            system_fingerprint: self.system_fingerprint.clone(),
Paul Hendricks's avatar
Paul Hendricks committed
159
            choices,
160
161
162
163
164
165
166
167
168
169
            usage: if self.options.enable_usage {
                Some(self.usage.clone())
            } else {
                None
            },
            service_tier: self.service_tier.clone(),
        }
    }
}

170
/// Implements the [`crate::protocols::openai::DeltaGeneratorExt`] trait for [`DeltaGenerator`], allowing
171
/// it to transform backend responses into OpenAI-style streaming responses.
172
173
174
impl crate::protocols::openai::DeltaGeneratorExt<NvCreateChatCompletionStreamResponse>
    for DeltaGenerator
{
175
176
177
178
179
180
181
182
    /// Converts a backend response into a structured OpenAI-style streaming response.
    ///
    /// # Arguments
    /// * `delta` - The backend response containing generated text and metadata.
    ///
    /// # Returns
    /// * `Ok(NvCreateChatCompletionStreamResponse)` if conversion succeeds.
    /// * `Err(anyhow::Error)` if an error occurs.
183
184
185
    fn choice_from_postprocessor(
        &mut self,
        delta: crate::protocols::common::llm_backend::BackendOutput,
186
    ) -> anyhow::Result<NvCreateChatCompletionStreamResponse> {
187
        // Aggregate token usage if enabled.
188
        if self.options.enable_usage {
Paul Hendricks's avatar
Paul Hendricks committed
189
            self.usage.completion_tokens += delta.token_ids.len() as u32;
190
191
        }

192
        // TODO: Implement log probabilities aggregation.
193
194
        let logprobs = None;

195
        // Map backend finish reasons to OpenAI's finish reasons.
196
        let finish_reason = match delta.finish_reason {
Paul Hendricks's avatar
Paul Hendricks committed
197
198
199
200
            Some(common::FinishReason::EoS) => Some(async_openai::types::FinishReason::Stop),
            Some(common::FinishReason::Stop) => Some(async_openai::types::FinishReason::Stop),
            Some(common::FinishReason::Length) => Some(async_openai::types::FinishReason::Length),
            Some(common::FinishReason::Cancelled) => Some(async_openai::types::FinishReason::Stop),
201
202
203
204
205
206
            Some(common::FinishReason::Error(err_msg)) => {
                return Err(anyhow::anyhow!(err_msg));
            }
            None => None,
        };

207
        // Create the streaming response.
208
        let index = 0;
Paul Hendricks's avatar
Paul Hendricks committed
209
210
        let stream_response = self.create_choice(index, delta.text, finish_reason, logprobs);

211
        Ok(NvCreateChatCompletionStreamResponse {
Paul Hendricks's avatar
Paul Hendricks committed
212
213
            inner: stream_response,
        })
214
215
    }
}