discovery.rs 12.7 KB
Newer Older
Ryan Olson's avatar
Ryan Olson committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Ryan Olson's avatar
Ryan Olson committed
16
use std::sync::Arc;
17

18
use anyhow::Context as _;
Ryan Olson's avatar
Ryan Olson committed
19
use serde::{Deserialize, Serialize};
20
use tokio::sync::mpsc::Receiver;
Ryan Olson's avatar
Ryan Olson committed
21

Neelay Shah's avatar
Neelay Shah committed
22
use dynamo_runtime::{
23
    component::{self, ComponentEndpointInfo},
24
25
26
27
    pipeline::{
        network::egress::push_router::PushRouter, ManyOut, Operator, RouterMode, SegmentSource,
        ServiceBackend, SingleIn, Source,
    },
Ryan Olson's avatar
Ryan Olson committed
28
    protocols::{self, annotated::Annotated},
29
30
    slug::Slug,
    transports::etcd::{self, KeyValue, WatchEvent},
31
    DistributedRuntime,
32
};
Ryan Olson's avatar
Ryan Olson committed
33
34
35

use super::ModelManager;
use crate::protocols::openai::chat_completions::{
36
    NvCreateChatCompletionRequest, NvCreateChatCompletionStreamResponse,
37
};
38
use crate::protocols::openai::completions::{CompletionRequest, CompletionResponse};
39
40
41
42
43
44
use crate::{
    backend::Backend,
    model_type::ModelType,
    preprocessor::{BackendInput, OpenAIPreprocessor},
    protocols::common::llm_backend::LLMEngineOutput,
};
45
46
47
48
use crate::{
    key_value_store::{EtcdStorage, KeyValueStore, KeyValueStoreManager},
    model_card::{self, ModelDeploymentCard},
};
49
use tracing;
50

Ryan Olson's avatar
Ryan Olson committed
51
52
53
54
55
56
/// [ModelEntry] is a struct that contains the information for the HTTP service to discover models
/// from the etcd cluster.
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct ModelEntry {
    /// Public name of the model
    /// This will be used to identify the model in the HTTP service and the value used in an
57
    /// an [OAI ChatRequest][crate::protocols::openai::chat_completions::NvCreateChatCompletionRequest].
Ryan Olson's avatar
Ryan Olson committed
58
59
60
61
    pub name: String,

    /// Component of the endpoint.
    pub endpoint: protocols::Endpoint,
62
63
64

    /// Specifies whether the model is a chat or completion model.s
    pub model_type: ModelType,
Ryan Olson's avatar
Ryan Olson committed
65
66
}

67
impl ModelEntry {
68
69
70
71
72
73
    pub fn requires_preprocessing(&self) -> bool {
        matches!(self.model_type, ModelType::Backend)
    }

    /// Fetch the ModelDeploymentCard from NATS.
    /// This does not touch it's fields so you may need to call move_from_nats on it.
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
    pub async fn load_mdc(
        &self,
        endpoint_id: protocols::Endpoint,
        etcd_client: etcd::Client,
    ) -> anyhow::Result<ModelDeploymentCard> {
        let kvstore: Box<dyn KeyValueStore> =
            Box::new(EtcdStorage::new(etcd_client.clone(), endpoint_id));
        let card_store = Arc::new(KeyValueStoreManager::new(kvstore));
        let card_key = ModelDeploymentCard::service_name_slug(&self.name);
        match card_store
            .load::<ModelDeploymentCard>(model_card::BUCKET_NAME, &card_key)
            .await
        {
            Ok(Some(mdc)) => Ok(mdc),
            Ok(None) => {
                anyhow::bail!("Missing ModelDeploymentCard in etcd under key {card_key}");
            }
            Err(err) => {
                anyhow::bail!(
                    "Error fetching ModelDeploymentCard from etcd under key {card_key}. {err}"
                );
            }
        }
    }
}

#[derive(Debug, Clone)]
pub struct ModelNetworkName(String);

impl ModelNetworkName {
    /// Key to store this model entry in networked key-value store (etcd).
    ///
    /// It looks like this:
    /// ns.cp.ep-694d967ca5efd804
    fn from_parts(namespace: &str, component: &str, endpoint: &str, lease_id: i64) -> Self {
        ModelNetworkName(
            Slug::slugify(&format!("{namespace}.{component}.{endpoint}-{lease_id:x}")).to_string(),
        )
    }

    // We can't do From<&component::Endpoint> here because we also need the lease_id
    pub fn from_local(endpoint: &component::Endpoint, lease_id: i64) -> Self {
        Self::from_parts(
            &endpoint.component().namespace().to_string(),
            &endpoint.component().name(),
            endpoint.name(),
            lease_id,
        )
    }

124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
    /// Fetch the ModelEntry from etcd.
    pub async fn load_entry(&self, etcd_client: etcd::Client) -> anyhow::Result<ModelEntry> {
        let mut model_entries = etcd_client.kv_get(self.to_string(), None).await?;
        if model_entries.is_empty() {
            anyhow::bail!("No ModelEntry in etcd for key {self}");
        }
        let model_entry = model_entries.remove(0);
        serde_json::from_slice(model_entry.value()).with_context(|| {
            format!(
                "Error deserializing JSON. Key={self}. JSON={}",
                model_entry.value_str().unwrap_or("INVALID UTF-8")
            )
        })
    }

    /// Fetch the ModelDeploymentCard from NATS.
    /// This does not touch it's fields so you may need to call move_from_nats on it.
    /// TODO We have potentially two for each endpoint, one Chat and one Completion.
142
143
144
145
146
    pub async fn load_mdc(
        &self,
        endpoint_id: protocols::Endpoint,
        etcd_client: etcd::Client,
    ) -> anyhow::Result<ModelDeploymentCard> {
147
        let entry = self.load_entry(etcd_client.clone()).await?;
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
        entry.load_mdc(endpoint_id, etcd_client).await
    }
}

impl From<&ComponentEndpointInfo> for ModelNetworkName {
    fn from(cei: &ComponentEndpointInfo) -> Self {
        Self::from_parts(&cei.namespace, &cei.component, &cei.endpoint, cei.lease_id)
    }
}

impl std::fmt::Display for ModelNetworkName {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        write!(f, "{}", self.0)
    }
}

Ryan Olson's avatar
Ryan Olson committed
164
165
166
167
168
169
pub struct ModelWatchState {
    pub prefix: String,
    pub manager: ModelManager,
    pub drt: DistributedRuntime,
}

170
pub async fn model_watcher(state: Arc<ModelWatchState>, mut events_rx: Receiver<WatchEvent>) {
171
    tracing::debug!("model watcher started");
Ryan Olson's avatar
Ryan Olson committed
172
173
174

    while let Some(event) = events_rx.recv().await {
        match event {
175
176
177
178
179
180
181
182
183
184
185
186
187
188
            WatchEvent::Put(kv) => {
                let model_entry = match serde_json::from_slice::<ModelEntry>(kv.value()) {
                    Ok(model_entry) => model_entry,
                    Err(err) => {
                        tracing::error!(%err, ?kv, "Invalid JSON in model entry");
                        continue;
                    }
                };
                if state.manager.has_model_any(&model_entry.name) {
                    tracing::trace!(
                        service_name = model_entry.name,
                        "New endpoint for existing model"
                    );
                    continue;
Ryan Olson's avatar
Ryan Olson committed
189
                }
190

191
192
193
                match handle_put(&model_entry, state.clone()).await {
                    Ok(()) => {
                        tracing::info!(model_name = model_entry.name, "added model");
194
195
                    }
                    Err(e) => {
196
                        tracing::error!(%e, "error adding model {}", model_entry.name);
197
                    }
Ryan Olson's avatar
Ryan Olson committed
198
                }
199
            }
Ryan Olson's avatar
Ryan Olson committed
200
            WatchEvent::Delete(kv) => match handle_delete(&kv, state.clone()).await {
201
202
                Ok(model_name) => {
                    tracing::info!("removed model {}", model_name);
Ryan Olson's avatar
Ryan Olson committed
203
204
                }
                Err(e) => {
205
                    tracing::error!("error removing model: {}", e);
Ryan Olson's avatar
Ryan Olson committed
206
207
208
209
210
211
                }
            },
        }
    }
}

212
async fn handle_delete(kv: &KeyValue, state: Arc<ModelWatchState>) -> anyhow::Result<&str> {
Ryan Olson's avatar
Ryan Olson committed
213
    let key = kv.key_str()?;
214
    tracing::debug!(key, "removing model");
Ryan Olson's avatar
Ryan Olson committed
215
216

    let model_name = key.trim_start_matches(&state.prefix);
217

218
219
220
    // Ignore the errors because model could be either type
    let _ = state.manager.remove_chat_completions_model(model_name);
    let _ = state.manager.remove_completions_model(model_name);
221

222
    Ok(model_name)
Ryan Olson's avatar
Ryan Olson committed
223
224
225
226
227
228
}

// Handles a PUT event from etcd, this usually means adding a new model to the list of served
// models.
//
// If this method errors, for the near term, we will delete the offending key.
229
230
231
232
233
234
235
236
237
async fn handle_put(model_entry: &ModelEntry, state: Arc<ModelWatchState>) -> anyhow::Result<()> {
    let endpoint_id = model_entry.endpoint.clone();
    let client = state
        .drt
        .namespace(&endpoint_id.namespace)?
        .component(&endpoint_id.component)?
        .endpoint(&endpoint_id.name)
        .client()
        .await?;
Ryan Olson's avatar
Ryan Olson committed
238

239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
    let Some(etcd_client) = state.drt.etcd_client() else {
        // Should be impossible because we only get here on an etcd event
        anyhow::bail!("Missing etcd_client");
    };
    let card = match model_entry.load_mdc(endpoint_id, etcd_client).await {
        Ok(card) => {
            tracing::debug!(card.display_name, "adding model");
            Some(card)
        }
        Err(err) => {
            // `dynamo serve` isn't using MDC yet so can't be an error
            tracing::info!(%err, "load_mdc did not complete");
            None
        }
    };
    match model_entry.model_type {
        ModelType::Backend => {
            // A Backend model expects pre-processed requests meaning it's up to us whether we
            // handle Chat or Completions requests, so handle both.
258

259
260
            let Some(mut card) = card else {
                anyhow::bail!("Missing model deployment card");
261
            };
262
263
264
265
266
            // Download tokenizer.json etc to local disk
            // This cache_dir is a tempfile::TempDir will be deleted on drop. I _think_
            // OpenAIPreprocessor::new loads the files, so we can delete them after this
            // function. Needs checking carefully, possibly we need to store it in state.
            let _cache_dir = Some(card.move_from_nats(state.drt.nats_client()).await?);
267

268
269
270
271
272
273
274
275
276
277
278
            let frontend = SegmentSource::<
                SingleIn<NvCreateChatCompletionRequest>,
                ManyOut<Annotated<NvCreateChatCompletionStreamResponse>>,
            >::new();
            let preprocessor = OpenAIPreprocessor::new(card.clone()).await?.into_operator();
            let backend = Backend::from_mdc(card.clone()).await?.into_operator();
            let router = PushRouter::<BackendInput, Annotated<LLMEngineOutput>>::from_client(
                client.clone(),
                RouterMode::Random, // TODO how do we configure this?
            )
            .await?;
279

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
            let chat_engine = frontend
                .link(preprocessor.forward_edge())?
                .link(backend.forward_edge())?
                .link(ServiceBackend::from_engine(Arc::new(router)))?
                .link(backend.backward_edge())?
                .link(preprocessor.backward_edge())?
                .link(frontend)?;
            state
                .manager
                .add_chat_completions_model(&model_entry.name, chat_engine)?;

            let frontend = SegmentSource::<
                SingleIn<CompletionRequest>,
                ManyOut<Annotated<CompletionResponse>>,
            >::new();
            let preprocessor = OpenAIPreprocessor::new(card.clone()).await?.into_operator();
            let backend = Backend::from_mdc(card.clone()).await?.into_operator();
            let router = PushRouter::<BackendInput, Annotated<LLMEngineOutput>>::from_client(
                client,
                RouterMode::Random, // TODO how do we configure this?
            )
            .await?;
302

303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
            let completions_engine = frontend
                .link(preprocessor.forward_edge())?
                .link(backend.forward_edge())?
                .link(ServiceBackend::from_engine(Arc::new(router)))?
                .link(backend.backward_edge())?
                .link(preprocessor.backward_edge())?
                .link(frontend)?;
            state
                .manager
                .add_completions_model(&model_entry.name, completions_engine)?;
        }
        ModelType::Chat => {
            let push_router = PushRouter::<
                NvCreateChatCompletionRequest,
                Annotated<NvCreateChatCompletionStreamResponse>,
            >::from_client(client, Default::default())
            .await?;
            let engine = Arc::new(push_router);
            state
                .manager
                .add_chat_completions_model(&model_entry.name, engine)?;
        }
        ModelType::Completion => {
326
327
328
329
330
            let push_router =
                PushRouter::<CompletionRequest, Annotated<CompletionResponse>>::from_client(
                    client,
                    Default::default(),
                )
331
                .await?;
332
            let engine = Arc::new(push_router);
333
334
            state
                .manager
335
                .add_completions_model(&model_entry.name, engine)?;
336
337
        }
    }
Ryan Olson's avatar
Ryan Olson committed
338

339
    Ok(())
Ryan Olson's avatar
Ryan Olson committed
340
}