endpoint.rs 6.23 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
use std::{pin::Pin, sync::Arc};
17

Neelay Shah's avatar
Neelay Shah committed
18
use dynamo_llm::{
19
    backend::Backend,
20
    engines::StreamingEngineAdapter,
21
22
    http::service::discovery::{ModelEntry, ModelNetworkName},
    key_value_store::{EtcdStorage, KeyValueStore, KeyValueStoreManager},
23
    model_card::{self, ModelDeploymentCard},
24
    model_type::ModelType,
25
    preprocessor::{BackendInput, BackendOutput},
26
    types::{
27
28
29
        openai::chat_completions::{
            NvCreateChatCompletionRequest, NvCreateChatCompletionStreamResponse,
        },
30
31
        Annotated,
    },
32
};
Neelay Shah's avatar
Neelay Shah committed
33
use dynamo_runtime::pipeline::{
34
    network::Ingress, Context, ManyOut, Operator, SegmentSource, ServiceBackend, SingleIn, Source,
35
};
36
37
use dynamo_runtime::{component::Endpoint, engine::AsyncEngineStream};
use dynamo_runtime::{protocols::Endpoint as EndpointId, DistributedRuntime};
38

39
use crate::EngineConfig;
40
41

pub async fn run(
42
    distributed_runtime: DistributedRuntime,
43
44
45
    path: String,
    engine_config: EngineConfig,
) -> anyhow::Result<()> {
46
    let cancel_token = distributed_runtime.primary_token().clone();
47
    let endpoint_id: EndpointId = path.parse()?;
48

49
    let (rt_fut, mut card) = match engine_config {
50
51
52
        EngineConfig::StaticFull {
            service_name,
            engine,
53
            mut card,
54
55
        } => {
            let engine = Arc::new(StreamingEngineAdapter::new(engine));
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
            card.requires_preprocessing = false;

            let ingress_chat = Ingress::<
                Context<NvCreateChatCompletionRequest>,
                Pin<Box<dyn AsyncEngineStream<Annotated<NvCreateChatCompletionStreamResponse>>>>,
            >::for_engine(engine)?;
            let endpoint_chat = register(
                distributed_runtime.clone(),
                &service_name,
                endpoint_id,
                *card.clone(),
                ModelType::Chat,
            )
            .await?;
            let fut_chat = endpoint_chat
                .endpoint_builder()
                .handler(ingress_chat)
                .start();

            (fut_chat, card)
76
        }
77
78
79
        EngineConfig::StaticCore {
            service_name,
            engine: inner_engine,
80
            mut card,
81
        } => {
82
83
84
            // Pre-processing is done ingress-side, so it should be already done.
            let frontend =
                SegmentSource::<SingleIn<BackendInput>, ManyOut<Annotated<BackendOutput>>>::new();
85
86
            let backend = Backend::from_mdc(*card.clone()).await?.into_operator();
            let engine = ServiceBackend::from_engine(inner_engine);
87

88
89
90
91
92
            let pipeline = frontend
                .link(backend.forward_edge())?
                .link(engine)?
                .link(backend.backward_edge())?
                .link(frontend)?;
93

94
95
96
97
98
99
100
101
102
103
104
            let ingress = Ingress::for_pipeline(pipeline)?;
            card.requires_preprocessing = true;
            let endpoint = register(
                distributed_runtime.clone(),
                &service_name,
                endpoint_id,
                *card.clone(),
                ModelType::Backend,
            )
            .await?;
            (endpoint.endpoint_builder().handler(ingress).start(), card)
105
106
107
108
        }
        EngineConfig::Dynamic(_) => {
            anyhow::bail!("Cannot use endpoint for both in and out");
        }
109
        EngineConfig::None => unreachable!(),
110
111
    };

112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
    tokio::select! {
        _ = rt_fut => {
            tracing::debug!("Endpoint ingress ended");
        }
        _ = cancel_token.cancelled() => {
        }
    }

    // Cleanup on shutdown
    if let Err(err) = card
        .delete_from_nats(distributed_runtime.nats_client())
        .await
    {
        tracing::error!(%err, "delete_from_nats error on shutdown");
    }

    Ok(())
}
130

131
132
133
134
135
136
137
async fn register(
    distributed_runtime: DistributedRuntime,
    service_name: &str,
    endpoint_id: EndpointId,
    mut card: ModelDeploymentCard,
    model_type: ModelType,
) -> anyhow::Result<Endpoint> {
138
    let component = distributed_runtime
139
140
        .namespace(&endpoint_id.namespace)?
        .component(&endpoint_id.component)?;
141
142
143
144
    let endpoint = component
        .service_builder()
        .create()
        .await?
145
146
        .endpoint(&endpoint_id.name);

147
148
    // A static component doesn't have an etcd_client because it doesn't need to register
    if let Some(etcd_client) = distributed_runtime.etcd_client() {
149
150
151
152
153
154
        // Store model config files in NATS object store
        let nats_client = distributed_runtime.nats_client();
        card.move_to_nats(nats_client.clone()).await?;

        // Publish the Model Deployment Card to etcd
        let kvstore: Box<dyn KeyValueStore> =
155
            Box::new(EtcdStorage::new(etcd_client.clone(), endpoint_id.clone()));
156
157
158
        let card_store = Arc::new(KeyValueStoreManager::new(kvstore));
        let key = card.slug().to_string();
        card_store
159
            .publish(model_card::BUCKET_NAME, None, &key, &mut card)
160
161
162
163
164
            .await?;

        // Publish our ModelEntry to etcd. This allows ingress to find the model card.
        // (Why don't we put the model card directly under this key?)
        let network_name = ModelNetworkName::from_local(&endpoint, etcd_client.lease_id());
165
        tracing::debug!("Registering with etcd as {network_name}");
166
167
168
169
170
        let model_registration = ModelEntry {
            name: service_name.to_string(),
            endpoint: endpoint_id.clone(),
            model_type,
        };
171
172
        etcd_client
            .kv_create(
173
                network_name.to_string(),
174
                serde_json::to_vec_pretty(&model_registration)?,
175
                None, // use primary lease
176
177
178
            )
            .await?;
    }
179
    Ok(endpoint)
180
}