model.rs 15.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! # Model Deployment Card
//!
//! The ModelDeploymentCard (MDC) is the primary model configuration structure that will be available to any
//! component that needs to interact with the model or its dependent artifacts.
//!
//! The ModelDeploymentCard contains LLM model deployment configuration information:
//! - Display name and service name for the model
//! - Model information (ModelInfoType)
//! - Tokenizer configuration (TokenizerKind)
//! - Prompt formatter settings (PromptFormatterArtifact)
//! - Various metadata like revision, publish time, etc.

use std::fmt;
29
30
31
use std::fs::File;
use std::path::{Path, PathBuf};
use std::sync::Arc;
32
33
use std::time::Duration;

34
use anyhow::{Context, Result};
35
use derive_builder::Builder;
Neelay Shah's avatar
Neelay Shah committed
36
use dynamo_runtime::slug::Slug;
37
use dynamo_runtime::transports::nats;
38
39
40
use either::Either;
use serde::{Deserialize, Serialize};
use tokenizers::Tokenizer as HfTokenizer;
41
use url::Url;
42
43

use crate::gguf::{Content, ContentConfig};
44
use crate::key_value_store::Versioned;
45
use crate::protocols::TokenIdType;
46
47
48
49
50
51
52
53
54
55
56
57
58

pub const BUCKET_NAME: &str = "mdc";

/// Delete model deployment cards that haven't been re-published after this long.
/// Cleans up if the worker stopped.
pub const BUCKET_TTL: Duration = Duration::from_secs(5 * 60);

/// If a model deployment card hasn't been refreshed in this much time the worker is likely gone
const CARD_MAX_AGE: chrono::TimeDelta = chrono::TimeDelta::minutes(5);

#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "snake_case")]
pub enum ModelInfoType {
59
60
    HfConfigJson(String),
    GGUF(PathBuf),
61
62
63
64
65
}

#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "snake_case")]
pub enum TokenizerKind {
66
67
    HfTokenizerJson(String),
    GGUF(Box<HfTokenizer>),
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
}

/// Supported types of prompt formatters.
///
/// We need a way to associate the prompt formatter template definition with an associated
/// data model which is expected for rendering.
///
/// All current prompt formatters are Jinja2 templates which use the OpenAI ChatCompletionRequest
/// format. However, we currently do not have a discovery path to know if the model supports tool use
/// unless we inspect the template.
///
/// TODO(): Add an enum for the PromptFormatDataModel with at minimum arms for:
/// - OaiChat
/// - OaiChatToolUse
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "snake_case")]
pub enum PromptFormatterArtifact {
85
86
    HfTokenizerConfigJson(String),
    GGUF(PathBuf),
87
88
89
90
91
92
93
94
}

#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
#[serde(rename_all = "snake_case")]
pub enum PromptContextMixin {
    /// Support OAI Chat Messages and Tools
    OaiChat,

95
    /// Enables templates with `{{datetime}}` to be rendered with the current date and time.
96
97
98
    Llama3DateTime,
}

99
#[derive(Serialize, Deserialize, Clone, Debug, Builder, Default)]
100
101
102
103
104
105
106
107
108
pub struct ModelDeploymentCard {
    /// Human readable model name, e.g. "Meta Llama 3.1 8B Instruct"
    pub display_name: String,

    /// Identifier to expect in OpenAI compatible HTTP request, e.g. "meta-llama/Meta-Llama-3.1-8B-Instruct"
    /// This will get slugified for use in NATS.
    pub service_name: String,

    /// Model information
109
    pub model_info: Option<ModelInfoType>,
110
111

    /// Tokenizer configuration
112
    pub tokenizer: Option<TokenizerKind>,
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

    /// Prompt Formatter configuration
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub prompt_formatter: Option<PromptFormatterArtifact>,

    /// Prompt Formatter Config
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub prompt_context: Option<Vec<PromptContextMixin>>,

    /// When this card was last advertised by a worker. None if not yet published.
    pub last_published: Option<chrono::DateTime<chrono::Utc>>,

    /// Incrementing count of how many times we published this card
    #[serde(default, skip_serializing)]
    pub revision: u64,

    /// Does this model expect preprocessing (tokenization, etc) to be already done?
    /// If this is true they get a BackendInput JSON. If this is false they get
    /// a ChatCompletionRequest JSON.
    #[serde(default)]
    pub requires_preprocessing: bool,
}

impl ModelDeploymentCard {
    pub fn builder() -> ModelDeploymentCardBuilder {
        ModelDeploymentCardBuilder::default()
    }

141
142
143
144
145
146
147
148
    /// Create a ModelDeploymentCard where only the name is filled in.
    ///
    /// Single-process setups don't need an MDC to communicate model details, but it
    /// simplifies the code to assume we always have one. This is how you get one in those
    /// cases. A quasi-null object: <https://en.wikipedia.org/wiki/Null_object_pattern>
    pub fn with_name_only(name: &str) -> ModelDeploymentCard {
        ModelDeploymentCard {
            display_name: name.to_string(),
149
            service_name: Slug::slugify(name).to_string(),
150
151
152
153
            ..Default::default()
        }
    }

154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
    /// A URL and NATS friendly and very likely unique ID for this model.
    /// Mostly human readable. a-z, 0-9, _ and - only.
    /// Pass the service_name.
    pub fn service_name_slug(s: &str) -> Slug {
        Slug::from_string(s)
    }

    /// How often we should check if a model deployment card expired because it's workers are gone
    pub fn expiry_check_period() -> Duration {
        match CARD_MAX_AGE.to_std() {
            Ok(duration) => duration / 3,
            Err(_) => {
                // Only happens if CARD_MAX_AGE is negative, which it isn't
                unreachable!("Cannot run card expiry watcher, invalid CARD_MAX_AGE");
            }
        }
    }

    /// Load a model deployment card from a JSON file
    pub fn load_from_json_file<P: AsRef<Path>>(file: P) -> std::io::Result<Self> {
        let mut card: ModelDeploymentCard = serde_json::from_str(&std::fs::read_to_string(file)?)?;
        card.requires_preprocessing = false;
        Ok(card)
    }

    /// Load a model deployment card from a JSON string
    pub fn load_from_json_str(json: &str) -> Result<Self, anyhow::Error> {
        Ok(serde_json::from_str(json)?)
    }

184
185
186
187
    //
    // Methods
    //

188
189
190
191
192
193
    /// Save the model deployment card to a JSON file
    pub fn save_to_json_file(&self, file: &str) -> Result<(), anyhow::Error> {
        std::fs::write(file, self.to_json()?)?;
        Ok(())
    }

194
195
196
197
198
199
200
201
    pub fn set_service_name(&mut self, service_name: &str) {
        self.service_name = service_name.to_string();
    }

    pub fn slug(&self) -> Slug {
        ModelDeploymentCard::service_name_slug(&self.service_name)
    }

202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
    /// Serialize the model deployment card to a JSON string
    pub fn to_json(&self) -> Result<String, anyhow::Error> {
        Ok(serde_json::to_string(self)?)
    }

    pub fn mdcsum(&self) -> String {
        let json = self.to_json().unwrap();
        format!("{}", blake3::hash(json.as_bytes()))
    }

    /// Was this card last published a long time ago, suggesting the worker is gone?
    pub fn is_expired(&self) -> bool {
        if let Some(last_published) = self.last_published.as_ref() {
            chrono::Utc::now() - last_published > CARD_MAX_AGE
        } else {
            false
        }
    }
220
221
222

    pub fn tokenizer_hf(&self) -> anyhow::Result<HfTokenizer> {
        match &self.tokenizer {
223
            Some(TokenizerKind::HfTokenizerJson(file)) => {
224
225
                HfTokenizer::from_file(file).map_err(anyhow::Error::msg)
            }
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
            Some(TokenizerKind::GGUF(t)) => Ok(*t.clone()),
            None => {
                anyhow::bail!("Blank ModelDeploymentCard does not have a tokenizer");
            }
        }
    }

    /// Move the files this MDC uses into the NATS object store.
    /// Updates the URI's to point to NATS.
    pub async fn move_to_nats(&mut self, nats_client: nats::Client) -> Result<()> {
        let nats_addr = nats_client.addr();
        let bucket_name = self.slug();
        tracing::debug!(
            nats_addr,
            %bucket_name,
241
            "Uploading model deployment card fields to NATS"
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
        );

        if let Some(ModelInfoType::HfConfigJson(ref src_file)) = self.model_info {
            if !nats::is_nats_url(src_file) {
                let target = format!("nats://{nats_addr}/{bucket_name}/config.json");
                nats_client
                    .object_store_upload(&PathBuf::from(src_file), Url::parse(&target)?)
                    .await?;
                self.model_info = Some(ModelInfoType::HfConfigJson(target));
            }
        }

        if let Some(PromptFormatterArtifact::HfTokenizerConfigJson(ref src_file)) =
            self.prompt_formatter
        {
            if !nats::is_nats_url(src_file) {
                let target = format!("nats://{nats_addr}/{bucket_name}/tokenizer_config.json");
                nats_client
                    .object_store_upload(&PathBuf::from(src_file), Url::parse(&target)?)
                    .await?;
                self.prompt_formatter =
                    Some(PromptFormatterArtifact::HfTokenizerConfigJson(target));
            }
        }

        if let Some(TokenizerKind::HfTokenizerJson(ref src_file)) = self.tokenizer {
            if !nats::is_nats_url(src_file) {
                let target = format!("nats://{nats_addr}/{bucket_name}/tokenizer.json");
                nats_client
                    .object_store_upload(&PathBuf::from(src_file), Url::parse(&target)?)
                    .await?;
                self.tokenizer = Some(TokenizerKind::HfTokenizerJson(target));
            }
275
        }
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302

        Ok(())
    }

    /// Delete this card from the key-value store and it's URLs from the object store
    pub async fn delete_from_nats(&mut self, nats_client: nats::Client) -> Result<()> {
        let nats_addr = nats_client.addr();
        let bucket_name = self.slug();
        tracing::trace!(
            nats_addr,
            %bucket_name,
            "Delete model deployment card from NATS"
        );
        nats_client
            .object_store_delete_bucket(bucket_name.as_ref())
            .await
    }
}

impl Versioned for ModelDeploymentCard {
    fn revision(&self) -> u64 {
        self.revision
    }

    fn set_revision(&mut self, revision: u64) {
        self.last_published = Some(chrono::Utc::now());
        self.revision = revision;
303
    }
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
}

impl fmt::Display for ModelDeploymentCard {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(f, "{}", self.slug())
    }
}
pub trait ModelInfo: Send + Sync {
    /// Model type
    fn model_type(&self) -> String;

    /// Token ID for the beginning of sequence
    fn bos_token_id(&self) -> TokenIdType;

    /// Token ID for the end of sequence
    fn eos_token_ids(&self) -> Vec<TokenIdType>;

    /// Maximum position embeddings / max sequence length
    fn max_position_embeddings(&self) -> usize;

    /// Vocabulary size
    fn vocab_size(&self) -> usize;
}

impl ModelInfoType {
    pub async fn get_model_info(&self) -> Result<Arc<dyn ModelInfo>> {
        match self {
331
332
            Self::HfConfigJson(info) => HFConfig::from_json_file(info).await,
            Self::GGUF(path) => HFConfig::from_gguf(path),
333
334
335
336
337
        }
    }
}

#[derive(Debug, Clone, Serialize, Deserialize)]
338
struct HFConfig {
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
    bos_token_id: TokenIdType,

    #[serde(with = "either::serde_untagged")]
    eos_token_id: Either<TokenIdType, Vec<TokenIdType>>,

    /// denotes the mixin to the flattened data model which can be present
    /// in the config.json file
    architectures: Vec<String>,

    /// general model type
    model_type: String,

    /// max sequence length
    max_position_embeddings: usize,

    /// number of layers in the model
    num_hidden_layers: usize,

    /// number of attention heads in the model
    num_attention_heads: usize,

    /// Vocabulary size
    vocab_size: usize,
}

364
365
impl HFConfig {
    async fn from_json_file(file: &String) -> Result<Arc<dyn ModelInfo>> {
366
        let contents = std::fs::read_to_string(file)?;
367
368
369
        let config: Self = serde_json::from_str(&contents)?;
        Ok(Arc::new(config))
    }
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
    fn from_gguf(gguf_file: &Path) -> Result<Arc<dyn ModelInfo>> {
        let content = load_gguf(gguf_file)?;
        let model_config_metadata: ContentConfig = (&content).into();
        let num_hidden_layers =
            content.get_metadata()[&format!("{}.block_count", content.arch())].to_u32()? as usize;

        let bos_token_id = content.get_metadata()["tokenizer.ggml.bos_token_id"].to_u32()?;
        let eos_token_id = content.get_metadata()["tokenizer.ggml.eos_token_id"].to_u32()?;

        // to_vec returns a Vec that's already there, so it's cheap
        let vocab_size = content.get_metadata()["tokenizer.ggml.tokens"]
            .to_vec()?
            .len();

        let arch = content.arch().to_string();
        Ok(Arc::new(HFConfig {
            bos_token_id,
            eos_token_id: Either::Left(eos_token_id),
            architectures: vec![format!("{}ForCausalLM", capitalize(&arch))],
            // "general.architecture"
            model_type: arch,
            // "llama.context_length"
            max_position_embeddings: model_config_metadata.max_seq_len(),
            // "llama.block_count"
            num_hidden_layers,
            // "llama.attention.head_count"
            num_attention_heads: model_config_metadata.num_attn_heads(),
            // "tokenizer.ggml.tokens".len()
            vocab_size,
        }))
    }
401
402
}

403
impl ModelInfo for HFConfig {
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
    fn model_type(&self) -> String {
        self.model_type.clone()
    }

    fn bos_token_id(&self) -> TokenIdType {
        self.bos_token_id
    }

    fn eos_token_ids(&self) -> Vec<TokenIdType> {
        match &self.eos_token_id {
            Either::Left(eos_token_id) => vec![*eos_token_id],
            Either::Right(eos_token_ids) => eos_token_ids.clone(),
        }
    }

    fn max_position_embeddings(&self) -> usize {
        self.max_position_embeddings
    }

    fn vocab_size(&self) -> usize {
        self.vocab_size
    }
}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456

impl TokenizerKind {
    pub fn from_gguf(gguf_file: &Path) -> anyhow::Result<Self> {
        let content = load_gguf(gguf_file)?;
        let out = crate::gguf::convert_gguf_to_hf_tokenizer(&content)
            .with_context(|| gguf_file.display().to_string())?;
        Ok(TokenizerKind::GGUF(Box::new(out.tokenizer)))
    }
}

fn load_gguf(gguf_file: &Path) -> anyhow::Result<Content> {
    let filename = gguf_file.display().to_string();
    let mut f = File::open(gguf_file).with_context(|| filename.clone())?;
    // vec because GGUF can be split into multiple files (shards)
    let mut readers = vec![&mut f];
    crate::gguf::Content::from_readers(&mut readers).with_context(|| filename.clone())
}

fn capitalize(s: &str) -> String {
    s.chars()
        .enumerate()
        .map(|(i, c)| {
            if i == 0 {
                c.to_uppercase().to_string()
            } else {
                c.to_lowercase().to_string()
            }
        })
        .collect()
}