text.rs 7.08 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
use dynamo_llm::protocols::openai::nvext::NvExt;
17
18
use dynamo_llm::types::openai::chat_completions::{
    NvCreateChatCompletionRequest, OpenAIChatCompletionsStreamingEngine,
19
};
20
use dynamo_runtime::{pipeline::Context, runtime::CancellationToken, Runtime};
21
use futures::StreamExt;
22
use std::io::{ErrorKind, Write};
23

24
use crate::input::common;
25
use crate::{EngineConfig, Flags, RequestTemplate};
26
27

/// Max response tokens for each single query. Must be less than model context size.
28
/// TODO: Cmd line flag to overwrite this
Paul Hendricks's avatar
Paul Hendricks committed
29
const MAX_TOKENS: u32 = 8192;
30
31

pub async fn run(
32
    runtime: Runtime,
33
    flags: Flags,
34
    single_prompt: Option<String>,
35
    engine_config: EngineConfig,
36
    template: Option<RequestTemplate>,
37
) -> anyhow::Result<()> {
38
    let cancel_token = runtime.primary_token();
39
40
41
42
    let (service_name, engine, inspect_template): (
        String,
        OpenAIChatCompletionsStreamingEngine,
        bool,
43
    ) = common::prepare_engine(runtime, flags, engine_config).await?;
44
45
46
47
48
49
    main_loop(
        cancel_token,
        &service_name,
        engine,
        single_prompt,
        inspect_template,
50
        template,
51
52
    )
    .await
53
54
55
56
57
58
}

async fn main_loop(
    cancel_token: CancellationToken,
    service_name: &str,
    engine: OpenAIChatCompletionsStreamingEngine,
59
    mut initial_prompt: Option<String>,
Paul Hendricks's avatar
Paul Hendricks committed
60
    _inspect_template: bool,
61
    template: Option<RequestTemplate>,
62
) -> anyhow::Result<()> {
63
64
65
    if initial_prompt.is_none() {
        tracing::info!("Ctrl-c to exit");
    }
66
67
    let theme = dialoguer::theme::ColorfulTheme::default();

68
69
70
    // Initial prompt is the pipe case: `echo "Hello" | dynamo-run ..`
    // We run that single prompt and exit
    let single = initial_prompt.is_some();
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
    let mut history = dialoguer::BasicHistory::default();
    let mut messages = vec![];
    while !cancel_token.is_cancelled() {
        // User input
        let prompt = match initial_prompt.take() {
            Some(p) => p,
            None => {
                let input_ui = dialoguer::Input::<String>::with_theme(&theme)
                    .history_with(&mut history)
                    .with_prompt("User");
                match input_ui.interact_text() {
                    Ok(prompt) => prompt,
                    Err(dialoguer::Error::IO(err)) => {
                        match err.kind() {
                            ErrorKind::Interrupted => {
                                // Ctrl-C
                                // Unfortunately I could not make dialoguer handle Ctrl-d
                            }
                            k => {
                                tracing::info!("IO error: {k}");
                            }
                        }
                        break;
                    }
                }
            }
        };
Paul Hendricks's avatar
Paul Hendricks committed
98
99
100
101
102
103
104
105
106

        // Construct messages
        let user_message = async_openai::types::ChatCompletionRequestMessage::User(
            async_openai::types::ChatCompletionRequestUserMessage {
                content: async_openai::types::ChatCompletionRequestUserMessageContent::Text(prompt),
                name: None,
            },
        );
        messages.push(user_message);
107
        // Request
Paul Hendricks's avatar
Paul Hendricks committed
108
109
        let inner = async_openai::types::CreateChatCompletionRequestArgs::default()
            .messages(messages.clone())
110
111
112
113
114
            .model(
                template
                    .as_ref()
                    .map_or_else(|| service_name.to_string(), |t| t.model.clone()),
            )
115
            .stream(true)
116
117
118
119
120
121
            .max_completion_tokens(
                template
                    .as_ref()
                    .map_or(MAX_TOKENS, |t| t.max_completion_tokens),
            )
            .temperature(template.as_ref().map_or(0.7, |t| t.temperature))
122
            .n(1) // only generate one response
Paul Hendricks's avatar
Paul Hendricks committed
123
            .build()?;
124
125
126
127
        let nvext = NvExt {
            ignore_eos: Some(true),
            ..Default::default()
        };
Paul Hendricks's avatar
Paul Hendricks committed
128
129
130
131
132
133
134

        // TODO We cannot set min_tokens with async-openai
        // if inspect_template {
        //     // This makes the pre-processor ignore stop tokens
        //     req_builder.min_tokens(8192);
        // }

135
136
137
138
        let req = NvCreateChatCompletionRequest {
            inner,
            nvext: Some(nvext),
        };
139
140
141
142
143
144
145
146

        // Call the model
        let mut stream = engine.generate(Context::new(req)).await?;

        // Stream the output to stdout
        let mut stdout = std::io::stdout();
        let mut assistant_message = String::new();
        while let Some(item) = stream.next().await {
147
148
149
            if cancel_token.is_cancelled() {
                break;
            }
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
            match (item.data.as_ref(), item.event.as_deref()) {
                (Some(data), _) => {
                    // Normal case
                    let entry = data.inner.choices.first();
                    let chat_comp = entry.as_ref().unwrap();
                    if let Some(c) = &chat_comp.delta.content {
                        let _ = stdout.write(c.as_bytes());
                        let _ = stdout.flush();
                        assistant_message += c;
                    }
                    if chat_comp.finish_reason.is_some() {
                        tracing::trace!("finish reason: {:?}", chat_comp.finish_reason.unwrap());
                        break;
                    }
                }
                (None, Some("error")) => {
                    // There's only one error but we loop in case that changes
                    for err in item.comment.unwrap_or_default() {
                        tracing::error!("Engine error: {err}");
                    }
                }
                (None, Some(annotation)) => {
                    tracing::debug!("Annotation. {annotation}: {:?}", item.comment);
                }
                _ => {
                    unreachable!("Event from engine with no data, no error, no annotation.");
                }
177
178
179
180
            }
        }
        println!();

Paul Hendricks's avatar
Paul Hendricks committed
181
182
183
184
185
186
187
188
        let assistant_content =
            async_openai::types::ChatCompletionRequestAssistantMessageContent::Text(
                assistant_message,
            );

        let assistant_message = async_openai::types::ChatCompletionRequestMessage::Assistant(
            async_openai::types::ChatCompletionRequestAssistantMessage {
                content: Some(assistant_content),
189
                ..Default::default()
Paul Hendricks's avatar
Paul Hendricks committed
190
191
192
            },
        );
        messages.push(assistant_message);
193
194
195
196

        if single {
            break;
        }
197
198
199
200
    }
    println!();
    Ok(())
}