http.rs 4.06 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
17
use std::sync::Arc;

Neelay Shah's avatar
Neelay Shah committed
18
use dynamo_llm::{
19
20
    backend::Backend,
    http::service::{discovery, service_v2},
21
    model_type::ModelType,
22
23
    preprocessor::OpenAIPreprocessor,
    types::{
24
25
26
        openai::chat_completions::{
            NvCreateChatCompletionRequest, NvCreateChatCompletionStreamResponse,
        },
27
28
29
        Annotated,
    },
};
Neelay Shah's avatar
Neelay Shah committed
30
use dynamo_runtime::{
31
32
33
    pipeline::{ManyOut, Operator, ServiceBackend, ServiceFrontend, SingleIn, Source},
    DistributedRuntime, Runtime,
};
34
35
36
37
38

use crate::EngineConfig;

/// Build and run an HTTP service
pub async fn run(
39
    runtime: Runtime,
40
41
42
    http_port: u16,
    engine_config: EngineConfig,
) -> anyhow::Result<()> {
43
44
45
46
47
    let http_service = service_v2::HttpService::builder()
        .port(http_port)
        .enable_chat_endpoints(true)
        .enable_cmpl_endpoints(true)
        .build()?;
48
    match engine_config {
49
50
        EngineConfig::Dynamic(endpoint) => {
            // This will attempt to connect to NATS and etcd
51
            let distributed_runtime = DistributedRuntime::from_settings(runtime.clone()).await?;
52
53
54
55
56
57

            let component = distributed_runtime
                .namespace(endpoint.namespace)?
                .component(endpoint.component)?;
            let network_prefix = component.service_name();

58
59
            // Listen for models registering themselves in etcd, add them to HTTP service
            let state = Arc::new(discovery::ModelWatchState {
60
61
                prefix: network_prefix.clone(),
                model_type: ModelType::Chat,
62
63
64
                manager: http_service.model_manager().clone(),
                drt: distributed_runtime.clone(),
            });
65
            tracing::info!("Waiting for remote model at {network_prefix}");
66
            let etcd_client = distributed_runtime.etcd_client();
67
            let models_watcher = etcd_client.kv_get_and_watch_prefix(network_prefix).await?;
68
69
70
            let (_prefix, _watcher, receiver) = models_watcher.dissolve();
            let _watcher_task = tokio::spawn(discovery::model_watcher(state, receiver));
        }
71
72
73
74
75
76
77
78
79
        EngineConfig::StaticFull {
            service_name,
            engine,
            ..
        } => {
            http_service
                .model_manager()
                .add_chat_completions_model(&service_name, engine)?;
        }
80
81
82
83
84
85
        EngineConfig::StaticCore {
            service_name,
            engine: inner_engine,
            card,
        } => {
            let frontend = ServiceFrontend::<
86
                SingleIn<NvCreateChatCompletionRequest>,
87
                ManyOut<Annotated<NvCreateChatCompletionStreamResponse>>,
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
            >::new();
            let preprocessor = OpenAIPreprocessor::new(*card.clone())
                .await?
                .into_operator();
            let backend = Backend::from_mdc(*card.clone()).await?.into_operator();
            let engine = ServiceBackend::from_engine(inner_engine);

            let pipeline = frontend
                .link(preprocessor.forward_edge())?
                .link(backend.forward_edge())?
                .link(engine)?
                .link(backend.backward_edge())?
                .link(preprocessor.backward_edge())?
                .link(frontend)?;
            http_service
                .model_manager()
                .add_chat_completions_model(&service_name, pipeline)?;
        }
106
        EngineConfig::None => unreachable!(),
107
    }
108
    http_service.run(runtime.primary_token()).await
109
}