lib.rs 20.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
17
#[cfg(any(feature = "vllm", feature = "sglang"))]
use std::{future::Future, pin::Pin};
18
use std::{io::Read, sync::Arc};
19

Neelay Shah's avatar
Neelay Shah committed
20
use dynamo_llm::{
21
    backend::ExecutionContext, engines::StreamingEngine, kv_router::publisher::KvMetricsPublisher,
22
    model_card::model::ModelDeploymentCard,
23
};
24
use dynamo_runtime::{protocols::Endpoint, DistributedRuntime};
25

26
27
mod flags;
pub use flags::Flags;
28
mod hub;
29
mod input;
30
#[cfg(any(feature = "vllm", feature = "sglang"))]
31
mod net;
32
mod opt;
33
pub use dynamo_llm::request_template::RequestTemplate;
34
35
pub use opt::{Input, Output};

36
37
38
/// How we identify a namespace/component/endpoint URL.
/// Technically the '://' is not part of the scheme but it eliminates several string
/// concatenations.
39
const ENDPOINT_SCHEME: &str = "dyn://";
40

41
42
43
44
/// When `in=text` the user doesn't need to know the model name, and doesn't need to provide it on
/// the command line. Hence it's optional, and defaults to this.
const INVISIBLE_MODEL_NAME: &str = "dynamo-run";

45
46
47
/// The component name for the KV publisher, if used
const KV_PUBLISHER_COMPONENT: &str = "kvpublisher";

48
49
50
51
/// How we identify a python string endpoint
#[cfg(feature = "python")]
const PYTHON_STR_SCHEME: &str = "pystr:";

52
53
54
55
/// How we identify a python token endpoint
#[cfg(feature = "python")]
const PYTHON_TOK_SCHEME: &str = "pytok:";

56
pub enum EngineConfig {
57
    /// An remote networked engine we don't know about yet
58
    Dynamic(Endpoint),
59

60
61
62
    /// A Full service engine does it's own tokenization and prompt formatting.
    StaticFull {
        service_name: String,
63
        engine: Arc<dyn StreamingEngine>,
64
        card: Box<ModelDeploymentCard>,
65
    },
66
67
68
69
70
71
72

    /// A core engine expects to be wrapped with pre/post processors that handle tokenization.
    StaticCore {
        service_name: String,
        engine: ExecutionContext,
        card: Box<ModelDeploymentCard>,
    },
73

74
75
    /// vllm multi-node doesn't run an engine on nodes other than 0. 'ray' does all the work.
    None,
76
77
}

78
79
80
81
82
83
/// Distributed system values
struct DynInput {
    endpoint_id: Endpoint,
    distributed_runtime: DistributedRuntime,
}

84
#[allow(unused_mut)]
85
pub async fn run(
Neelay Shah's avatar
Neelay Shah committed
86
    runtime: dynamo_runtime::Runtime,
87
    mut in_opt: Input, // mut because vllm and sglang multi-node can change it
88
89
    out_opt: Output,
    flags: Flags,
90
    #[allow(unused_variables)] zmq_socket_prefix: Option<String>,
91
) -> anyhow::Result<()> {
92
93
    let cancel_token = runtime.primary_token();

94
    // Turn relative paths into absolute paths
95
    let mut model_path = flags
96
        .model_path_pos
97
98
        .clone()
        .or(flags.model_path_flag.clone())
99
100
101
102
103
104
105
        .and_then(|p| {
            if p.exists() {
                p.canonicalize().ok()
            } else {
                Some(p)
            }
        });
106

Graham King's avatar
Graham King committed
107
    // Serve the model under the name provided, or the name of the GGUF file or HF repo.
108
    let mut model_name = flags
109
        .model_name
110
        .clone()
111
112
113
        .or_else(|| {
            model_path
                .as_ref()
114
                .and_then(|p| p.iter().next_back())
115
116
117
118
119
120
121
122
123
                .map(|n| n.to_string_lossy().into_owned())
        })
        .or_else(|| {
            if in_opt == Input::Text {
                Some(INVISIBLE_MODEL_NAME.to_string())
            } else {
                None
            }
        });
124
125
126
127

    // If it's an HF repo download it
    if let Some(inner_model_path) = model_path.as_ref() {
        if !inner_model_path.exists() {
128
            model_name = Some(inner_model_path.display().to_string());
129
130
131
132
            model_path = Some(hub::from_hf(inner_model_path).await?);
        }
    }

Graham King's avatar
Graham King committed
133
134
135
    // Load the model deployment card, if any
    // Only used by some engines, so without those feature flags it's unused.
    #[allow(unused_variables)]
136
    let maybe_card = match (&model_path, &flags.model_config) {
Graham King's avatar
Graham King committed
137
138
        // --model-config takes precedence
        (_, Some(model_config)) => {
139
140
141
142
143
144
145
146
147
148
            match ModelDeploymentCard::from_local_path(model_config, model_name.as_deref()).await {
                Ok(card) => Some(card),
                Err(e) => {
                    tracing::error!(
                        "Failed to load model card from --model-config path {}: {e}",
                        model_config.display(),
                    );
                    None
                }
            }
149
        }
Graham King's avatar
Graham King committed
150
151
        // If --model-path is an HF repo use that
        (Some(model_path), _) if model_path.is_dir() => {
152
            match ModelDeploymentCard::from_local_path(model_path, model_name.as_deref()).await {
153
154
155
                Ok(card) => Some(card),
                Err(e) => {
                    tracing::error!(
156
                        "Failed to load model card from --model-path {}: {e}",
157
158
159
160
                        model_path.display(),
                    );
                    None
                }
161
162
163
164
165
166
167
168
169
170
171
172
173
            }
        }
        (Some(model_path), _) if model_path.is_file() => {
            match ModelDeploymentCard::from_gguf(model_path, model_name.as_deref()).await {
                Ok(card) => Some(card),
                Err(e) => {
                    tracing::error!(
                        "Failed to load model card from GGUF {}: {e}",
                        model_path.display(),
                    );
                    None
                }
            }
Graham King's avatar
Graham King committed
174
175
        }
        // Otherwise we don't have one, but we only need it if we're tokenizing
176
        _ => {
177
178
179
            tracing::debug!(
                "No model card path provided (neither --model-config nor --model-path)"
            );
180
            None
181
        }
182
    };
183

184
185
    let dyn_input = match &in_opt {
        Input::Endpoint(endpoint_path) => {
186
187
188
189
190
191
192
193
194
195
196
            if model_path.as_ref().map(|mp| mp.is_file()).unwrap_or(false)
                && flags.model_config.is_none()
            {
                // TODO We need to convert tokenizer extract from GGUF file into something we can
                // publish to NATS. Ideally `tokenizer.json` directly, but otherwise an
                // intermediate format.
                tracing::error!("Serving GGUF files in a distributed system requires `--model-config <hf-repo-dir>` so that we can find the tokenzier config");
                return Ok(());
            }

            // If we are in a distributed system, we need to know our component upfront
197
198
199
200
201
202
203
204
205
206
            let distributed_runtime = DistributedRuntime::from_settings(runtime.clone()).await?;
            let endpoint_id: Endpoint = endpoint_path.parse()?;
            Some(DynInput {
                endpoint_id,
                distributed_runtime,
            })
        }
        _ => None,
    };

Graham King's avatar
Graham King committed
207
    #[cfg(any(feature = "vllm", feature = "sglang"))]
208
    let mut extra: Option<Pin<Box<dyn Future<Output = ()> + Send>>> = None; // vllm and sglang sub-process
209

210
211
212
213
214
215
216
217
    let template = if let Some(path) = flags.request_template.as_ref() {
        let template = RequestTemplate::load(path)?;
        tracing::debug!("Using request template: {template:?}");
        Some(template)
    } else {
        None
    };

218
219
    // Create the engine matching `out`
    let engine_config = match out_opt {
220
221
222
223
224
225
226
        Output::EchoFull => {
            let Some(model_name) = model_name else {
                anyhow::bail!(
                    "Pass --model-name or --model-path so we know which model to imitate"
                );
            };
            EngineConfig::StaticFull {
227
                card: Box::new(ModelDeploymentCard::with_name_only(&model_name)),
228
                service_name: model_name,
229
                engine: dynamo_llm::engines::make_engine_full(),
230
231
            }
        }
232
233
234
235
236
237
        Output::EchoCore => {
            let Some(mut card) = maybe_card.clone() else {
                anyhow::bail!(
                    "out=echo_core need to find the tokenizer. Pass flag --model-path <path>"
                );
            };
238
239
240
241

            // TODO: Switch to `true` once pre-processing moves ingress side
            card.requires_preprocessing = false;

242
243
            EngineConfig::StaticCore {
                service_name: card.service_name.clone(),
244
                engine: dynamo_llm::engines::make_engine_core(),
245
246
247
                card: Box::new(card),
            }
        }
248
        Output::Endpoint(path) => {
249
            let endpoint: Endpoint = path.parse()?;
250
            EngineConfig::Dynamic(endpoint)
251
        }
252
253
254
255
256
257
258
259
260
        #[cfg(feature = "mistralrs")]
        Output::MistralRs => {
            let Some(model_path) = model_path else {
                anyhow::bail!("out=mistralrs requires flag --model-path=<full-path-to-model-gguf>");
            };
            let Some(model_name) = model_name else {
                unreachable!("We checked model_path earlier, and set model_name from model_path");
            };
            EngineConfig::StaticFull {
261
                card: Box::new(ModelDeploymentCard::with_name_only(&model_name)),
262
                service_name: model_name,
263
                engine: dynamo_engine_mistralrs::make_engine(&model_path).await?,
264
265
            }
        }
266
267
268
269
270
271
272
273
274
275
276
277
278
        #[cfg(feature = "sglang")]
        Output::SgLang => {
            let Some(model_path) = model_path else {
                anyhow::bail!("out=sglang requires flag --model-path=<full-path-to-model-dir>");
            };
            if !model_path.is_dir() {
                anyhow::bail!("`--model-path should point at a HuggingFace repo checkout");
            }
            // Safety: Earlier we build maybe_card from model_path, which we checked right above
            let card = maybe_card.clone().unwrap();
            let Some(sock_prefix) = zmq_socket_prefix else {
                anyhow::bail!("sglang requires zmq_socket_prefix");
            };
Neelay Shah's avatar
Neelay Shah committed
279
            let node_conf = dynamo_llm::engines::MultiNodeConfig {
280
281
                num_nodes: flags.num_nodes,
                node_rank: flags.node_rank,
282
                leader_addr: flags.leader_addr.clone().unwrap_or_default(),
283
284
285
286
287
288
289
            };
            if node_conf.num_nodes > 1 {
                if let Ok(Some(if_name)) = net::get_primary_interface().await {
                    tracing::info!("If you see 'gloo' errors from sglang try setting these environment variables:");
                    tracing::info!("export GLOO_SOCKET_IFNAME={if_name}");
                    tracing::info!("export NCCL_SOCKET_IFNAME={if_name}");
                }
290
291
292
293
294
                if node_conf.node_rank != 0 {
                    // Follower nodes take input from leader node over pytorch distributed, not
                    // from user.
                    in_opt = Input::None;
                }
295
296
            }

297
            let (engine, sglang_process) = dynamo_engine_sglang::make_engine(
298
299
300
301
302
303
                cancel_token.clone(),
                &model_path,
                &sock_prefix,
                node_conf,
                flags.tensor_parallel_size,
                flags.base_gpu_id,
304
                flags.extra_engine_args.clone(),
305
306
            )
            .await?;
307
308
309
            extra = Some(Box::pin(async move {
                let _ = sglang_process.await;
            }));
310
311
            EngineConfig::StaticCore {
                service_name: card.service_name.clone(),
312
313
314
315
                engine,
                card: Box::new(card),
            }
        }
Graham King's avatar
Graham King committed
316
        #[cfg(feature = "vllm")]
317
        Output::Vllm0_7 => {
318
319
320
            if flags.base_gpu_id != 0 {
                anyhow::bail!("vllm does not support base_gpu_id. Set environment variable CUDA_VISIBLE_DEVICES instead.");
            }
Graham King's avatar
Graham King committed
321
322
323
324
325
326
327
            let Some(model_path) = model_path else {
                anyhow::bail!(
                    "out=vllm requires flag --model-path=<full-path-to-hf-repo-or-model-gguf>"
                );
            };
            let Some(card) = maybe_card.clone() else {
                anyhow::bail!(
328
                    "Unable to build tokenizer. out=vllm requires --model-path to be an HF repo with fast tokenizer (tokenizer.json) or a GGUF file"
Graham King's avatar
Graham King committed
329
330
331
332
333
                );
            };
            let Some(sock_prefix) = zmq_socket_prefix else {
                anyhow::bail!("vllm requires zmq_socket_prefix");
            };
Neelay Shah's avatar
Neelay Shah committed
334
            let node_conf = dynamo_llm::engines::MultiNodeConfig {
335
336
                num_nodes: flags.num_nodes,
                node_rank: flags.node_rank,
337
                leader_addr: flags.leader_addr.clone().unwrap_or_default(),
338
339
340
341
342
343
344
345
346
347
348
349
            };
            if node_conf.num_nodes > 1 {
                if let Ok(Some(if_name)) = net::get_primary_interface().await {
                    tracing::info!("If you see network errors from vllm try setting this environment variable:");
                    tracing::info!("export NCCL_SOCKET_IFNAME={if_name}");
                }
                if node_conf.node_rank != 0 {
                    // Only node 0 runs vllm, the others communicate over ray
                    in_opt = Input::None;
                }
            }
            if node_conf.node_rank == 0 {
350
351
352
353
354
355
356
                let kv_metrics_publisher = if let Some(dyn_input) = &dyn_input {
                    let kvp_component = dyn_input
                        .distributed_runtime
                        .namespace(dyn_input.endpoint_id.namespace.clone())?
                        .component(KV_PUBLISHER_COMPONENT)?;
                    let kvp = Arc::new(KvMetricsPublisher::new()?);
                    let kvp_inner = kvp.clone();
357
358
359
                    tokio::spawn(
                        async move { kvp_inner.create_endpoint(kvp_component, None).await },
                    );
360
361
362
363
364
                    Some(kvp)
                } else {
                    None
                };

365
                // vllm multi-node only the leader runs vllm
366
                let (engine, vllm_future) = dynamo_engine_vllm0_7::make_leader_engine(
367
368
369
370
371
                    cancel_token.clone(),
                    &model_path,
                    &sock_prefix,
                    node_conf,
                    flags.tensor_parallel_size,
372
373
                    flags.extra_engine_args.clone(),
                    kv_metrics_publisher,
374
375
376
377
378
379
380
381
382
383
384
385
                )
                .await?;
                extra = Some(Box::pin(async move {
                    let _ = vllm_future.await;
                }));
                EngineConfig::StaticCore {
                    service_name: card.service_name.clone(),
                    engine,
                    card: Box::new(card),
                }
            } else {
                // Nodes rank > 0 only run 'ray'
386
                let stop_future =
387
                    dynamo_engine_vllm0_7::start_follower(cancel_token.clone(), node_conf).await?;
388
389
                extra = Some(Box::pin(stop_future));
                EngineConfig::None
Graham King's avatar
Graham King committed
390
391
            }
        }
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427

        #[cfg(feature = "vllm")]
        Output::Vllm | Output::Vllm0_8 => {
            if flags.base_gpu_id != 0 {
                anyhow::bail!("vllm does not support base_gpu_id. Set environment variable CUDA_VISIBLE_DEVICES instead.");
            }
            let Some(model_path) = model_path else {
                anyhow::bail!(
                    "out=vllm requires flag --model-path=<full-path-to-hf-repo-or-model-gguf>"
                );
            };
            let Some(card) = maybe_card.clone() else {
                anyhow::bail!(
                    "Unable to build tokenizer. out=vllm requires --model-path to be an HF repo with fast tokenizer (tokenizer.json) or a GGUF file"
                );
            };
            let node_conf = dynamo_llm::engines::MultiNodeConfig {
                num_nodes: flags.num_nodes,
                node_rank: flags.node_rank,
                leader_addr: flags.leader_addr.clone().unwrap_or_default(),
            };
            let engine = dynamo_engine_vllm0_8::make_engine(
                cancel_token.clone(),
                &model_path,
                node_conf,
                flags.tensor_parallel_size,
                flags.extra_engine_args.clone(),
            )
            .await?;
            EngineConfig::StaticCore {
                service_name: card.service_name.clone(),
                engine,
                card: Box::new(card),
            }
        }

428
429
430
431
432
433
434
435
        #[cfg(feature = "llamacpp")]
        Output::LlamaCpp => {
            let Some(model_path) = model_path else {
                anyhow::bail!("out=llamacpp requires flag --model-path=<full-path-to-model-gguf>");
            };
            if !model_path.is_file() {
                anyhow::bail!("--model-path should refer to a GGUF file. llama_cpp does not support safetensors.");
            }
436
            let Some(card) = maybe_card.clone() else {
Graham King's avatar
Graham King committed
437
438
439
                anyhow::bail!(
                    "Pass --model-config so we can find the tokenizer, should be an HF checkout."
                );
440
            };
441
442
            let engine =
                dynamo_engine_llamacpp::make_engine(cancel_token.clone(), &model_path).await?;
443
444
            EngineConfig::StaticCore {
                service_name: card.service_name.clone(),
445
                engine,
Graham King's avatar
Graham King committed
446
447
448
                card: Box::new(card),
            }
        }
449
450
        #[cfg(feature = "python")]
        Output::PythonStr(path_str) => {
451
            let Some(model_name) = &model_name else {
452
453
                anyhow::bail!("Provide model service name as `--model-name <this>`");
            };
454
            let py_args = flags.as_vec(&path_str, model_name);
455
            let p = std::path::PathBuf::from(path_str);
456
457
            let engine =
                dynamo_engine_python::make_string_engine(cancel_token.clone(), &p, py_args).await?;
458
            EngineConfig::StaticFull {
459
                service_name: model_name.to_string(),
460
                engine,
461
                card: Box::new(ModelDeploymentCard::with_name_only(model_name)),
462
463
            }
        }
464
465
466
467
468
469
470
471
        #[cfg(feature = "python")]
        Output::PythonTok(path_str) => {
            let Some(card) = maybe_card.clone() else {
                anyhow::bail!("Could not find tokenizer. Pass flag --model-path <path>");
            };
            let Some(model_name) = model_name else {
                unreachable!("If we have a card we must have a model name");
            };
472
            let py_args = flags.as_vec(&path_str, &model_name);
473
            let p = std::path::PathBuf::from(path_str);
474
475
            let engine =
                dynamo_engine_python::make_token_engine(cancel_token.clone(), &p, py_args).await?;
476
477
478
479
480
481
            EngineConfig::StaticCore {
                service_name: model_name.clone(),
                engine,
                card: Box::new(card),
            }
        }
482
483
484
485
    };

    match in_opt {
        Input::Http => {
486
            crate::input::http::run(runtime.clone(), flags, engine_config, template).await?;
487
488
        }
        Input::Text => {
489
            crate::input::text::run(runtime.clone(), flags, None, engine_config, template).await?;
490
491
492
493
        }
        Input::Stdin => {
            let mut prompt = String::new();
            std::io::stdin().read_to_string(&mut prompt).unwrap();
494
495
496
497
498
499
500
501
            crate::input::text::run(
                runtime.clone(),
                flags,
                Some(prompt),
                engine_config,
                template,
            )
            .await?;
502
        }
503
        Input::Batch(path) => {
504
505
506
507
508
509
510
511
512
            crate::input::batch::run(
                runtime.clone(),
                flags,
                maybe_card,
                path,
                engine_config,
                template,
            )
            .await?;
513
        }
514
        Input::Endpoint(path) => {
515
516
517
518
            let Some(dyn_input) = dyn_input else {
                unreachable!("We set dyn_input earlier");
            };
            crate::input::endpoint::run(dyn_input.distributed_runtime, path, engine_config).await?;
519
        }
520
521
522
523
524
525
526
527
        Input::None => {
            // Multi-node setup. The engine sub-process has been started and is talking
            // to it's node_rank 0 controller. We do nothing.
            // TODO: Acquire an etcd lease, we are running
            cancel_token.cancelled().await;
        }
    }

Graham King's avatar
Graham King committed
528
    #[cfg(any(feature = "vllm", feature = "sglang"))]
529
530
    // Allow engines to ask main thread to wait on an extra future.
    if let Some(extra) = extra {
531
        extra.await;
532
533
534
535
    }

    Ok(())
}