subprocess.rs 2.84 KB
Newer Older
Graham King's avatar
Graham King committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use pyo3::{types::IntoPyDict, Python};
17
use std::env;
Graham King's avatar
Graham King committed
18
19
use std::path::Path;

20
21
use crate::engines::MultiNodeConfig;

Graham King's avatar
Graham King committed
22
23
24
25
26
27
28
29
const PY_START_ENGINE: &std::ffi::CStr = cr#"
import multiprocessing
import signal

from vllm.engine.multiprocessing.engine import run_mp_engine
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.usage.usage_lib import UsageContext

30
31
32
33
engine_args = AsyncEngineArgs(
    model=f"{model_path}",
    served_model_name=None,
    task='generate',
34
    skip_tokenizer_init=True,
35
36
37
38
39
40
    seed=0,
    max_model_len=8192,
    max_seq_len_to_capture=8192,
    tensor_parallel_size = int(tp_size_str),
    pipeline_parallel_size = int(nnodes_str),
)
Graham King's avatar
Graham King committed
41
42
43
44

ipc_path = f"ipc:///tmp/{socket_id}";

engine_alive = multiprocessing.Value('b', True, lock=False)
45
46

# 0.7.3
Graham King's avatar
Graham King committed
47
run_mp_engine(engine_args, UsageContext.OPENAI_API_SERVER, ipc_path, engine_alive)
48
49
50
51
52
53
54

# 0.8.1
# TODO: In 0.8+ first argument is VllmConfig, not AsyncEngineArgs
# disable_log_stats = False
# disable_log_requests = True
# run_mp_engine(engine_args, UsageContext.OPENAI_API_SERVER, ipc_path, disable_log_stats, disable_log_requests, engine_alive)

Graham King's avatar
Graham King committed
55
56
57
58
59
60
61
62
"#;

/// Start the Python vllm engine that listens on zmq socket
/// This is called by running `<bin> --internal-vllm-process
/// This does not return until vllm exits.
pub fn run_subprocess(
    socket_id: &str,
    model_path: &Path,
63
64
    node_config: MultiNodeConfig,
    tp_size: u32,
Graham King's avatar
Graham King committed
65
66
) -> anyhow::Result<()> {
    pyo3::prepare_freethreaded_python(); // or enable feature "auto-initialize"
67
    if let Ok(venv) = env::var("VIRTUAL_ENV") {
68
        let _ = Python::with_gil(|py| crate::engines::fix_venv(venv, py));
69
    }
Graham King's avatar
Graham King committed
70
71
72
73
74
    let model_path_str = model_path.display().to_string();
    Python::with_gil(|py| {
        let locals = [
            ("socket_id", socket_id),
            ("model_path", model_path_str.as_str()),
75
76
            ("tp_size_str", &tp_size.to_string()),
            ("nnodes_str", &node_config.num_nodes.to_string()),
Graham King's avatar
Graham King committed
77
78
79
80
81
82
83
84
85
86
        ]
        .into_py_dict(py)
        .unwrap();
        if let Err(err) = py.run(PY_START_ENGINE, None, Some(&locals)) {
            anyhow::bail!("vllm engine run error: {err}");
        }
        tracing::info!("vllm subprocess exit");
        Ok(())
    })
}