"googlemock/git@developer.sourcefind.cn:yangql/googletest.git" did not exist on "77380cddf77133b98a16b5427ac732648233de29"
http.rs 4.73 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
17
use std::sync::Arc;

18
19
use crate::input::common;
use crate::{EngineConfig, Flags};
20
use dynamo_llm::http::service::ModelManager;
Neelay Shah's avatar
Neelay Shah committed
21
use dynamo_llm::{
22
    engines::StreamingEngineAdapter,
23
    http::service::{discovery, service_v2},
24
    request_template::RequestTemplate,
25
    types::{
26
27
28
        openai::chat_completions::{
            NvCreateChatCompletionRequest, NvCreateChatCompletionStreamResponse,
        },
29
        openai::completions::{CompletionRequest, CompletionResponse},
30
31
    },
};
32
use dynamo_runtime::transports::etcd;
33
use dynamo_runtime::{DistributedRuntime, Runtime};
34
35
36

/// Build and run an HTTP service
pub async fn run(
37
    runtime: Runtime,
38
    flags: Flags,
39
    engine_config: EngineConfig,
40
    template: Option<RequestTemplate>,
41
) -> anyhow::Result<()> {
42
    let http_service = service_v2::HttpService::builder()
43
        .port(flags.http_port)
44
45
        .enable_chat_endpoints(true)
        .enable_cmpl_endpoints(true)
46
        .with_request_template(template)
47
        .build()?;
48
    match engine_config {
49
        EngineConfig::Dynamic(endpoint) => {
50
            let distributed_runtime = DistributedRuntime::from_settings(runtime.clone()).await?;
51
52
53
            match distributed_runtime.etcd_client() {
                Some(etcd_client) => {
                    // This will attempt to connect to NATS and etcd
54

55
56
57
58
                    let component = distributed_runtime
                        .namespace(endpoint.namespace)?
                        .component(endpoint.component)?;
                    let network_prefix = component.service_name();
59

60
                    // Listen for models registering themselves in etcd, add them to HTTP service
61
62
63
64
65
66
67
                    run_watcher(
                        distributed_runtime.clone(),
                        http_service.model_manager().clone(),
                        etcd_client.clone(),
                        &network_prefix,
                    )
                    .await?;
68
69
70
71
72
                }
                None => {
                    // Static endpoints don't need discovery
                }
            }
73
        }
74
        EngineConfig::StaticFull { engine, model } => {
75
76
            let engine = Arc::new(StreamingEngineAdapter::new(engine));
            let manager = http_service.model_manager();
77
78
            manager.add_completions_model(model.service_name(), engine.clone())?;
            manager.add_chat_completions_model(model.service_name(), engine)?;
79
        }
80
81
        EngineConfig::StaticCore {
            engine: inner_engine,
82
            model,
83
        } => {
84
85
86
87
88
            let manager = http_service.model_manager();

            let chat_pipeline = common::build_pipeline::<
                NvCreateChatCompletionRequest,
                NvCreateChatCompletionStreamResponse,
89
            >(model.card(), inner_engine.clone())
90
            .await?;
91
            manager.add_chat_completions_model(model.service_name(), chat_pipeline)?;
92

93
            let cmpl_pipeline = common::build_pipeline::<CompletionRequest, CompletionResponse>(
94
                model.card(),
95
96
97
                inner_engine,
            )
            .await?;
98
            manager.add_completions_model(model.service_name(), cmpl_pipeline)?;
99
        }
100
        EngineConfig::None => unreachable!(),
101
    }
102
    http_service.run(runtime.primary_token()).await
103
}
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123

/// Spawns a task that watches for new models in etcd at network_prefix,
/// and registers them with the ModelManager so that the HTTP service can use them.
async fn run_watcher(
    distributed_runtime: DistributedRuntime,
    model_manager: ModelManager,
    etcd_client: etcd::Client,
    network_prefix: &str,
) -> anyhow::Result<()> {
    let state = Arc::new(discovery::ModelWatchState {
        prefix: network_prefix.to_string(),
        manager: model_manager,
        drt: distributed_runtime.clone(),
    });
    tracing::info!("Watching for remote model at {network_prefix}");
    let models_watcher = etcd_client.kv_get_and_watch_prefix(network_prefix).await?;
    let (_prefix, _watcher, receiver) = models_watcher.dissolve();
    let _watcher_task = tokio::spawn(discovery::model_watcher(state, receiver));
    Ok(())
}