main.rs 18.6 KB
Newer Older
1
use axum::http::HeaderValue;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
2
use clap::Parser;
3
4
use hf_hub::api::tokio::{Api, ApiBuilder, ApiRepo};
use hf_hub::{Repo, RepoType};
5
6
7
8
9
10
use opentelemetry::sdk::propagation::TraceContextPropagator;
use opentelemetry::sdk::trace;
use opentelemetry::sdk::trace::Sampler;
use opentelemetry::sdk::Resource;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
11
12
use std::fs::File;
use std::io::BufReader;
Olivier Dehaene's avatar
Olivier Dehaene committed
13
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
14
use std::path::Path;
15
use text_generation_client::{ClientError, ShardedClient};
16
use text_generation_router::config::Config;
17
use text_generation_router::{server, HubModelInfo, HubTokenizerConfig};
18
use thiserror::Error;
19
use tokenizers::Tokenizer;
20
use tower_http::cors::AllowOrigin;
21
22
23
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{EnvFilter, Layer};
Olivier Dehaene's avatar
Olivier Dehaene committed
24
25
26
27
28

/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
29
30
    #[clap(default_value = "128", long, env)]
    max_concurrent_requests: usize,
31
32
    #[clap(default_value = "2", long, env)]
    max_best_of: usize,
33
34
    #[clap(default_value = "4", long, env)]
    max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
35
36
    #[clap(default_value = "5", long, env)]
    max_top_n_tokens: u32,
37
    #[clap(default_value = "1024", long, env)]
38
    max_input_length: usize,
39
    #[clap(default_value = "2048", long, env)]
40
    max_total_tokens: usize,
41
42
    #[clap(default_value = "1.2", long, env)]
    waiting_served_ratio: f32,
43
44
    #[clap(default_value = "4096", long, env)]
    max_batch_prefill_tokens: u32,
45
46
    #[clap(long, env)]
    max_batch_total_tokens: Option<u32>,
47
48
    #[clap(default_value = "20", long, env)]
    max_waiting_tokens: usize,
49
50
    #[clap(long, env)]
    max_batch_size: Option<usize>,
51
52
    #[clap(default_value = "0.0.0.0", long, env)]
    hostname: String,
Olivier Dehaene's avatar
Olivier Dehaene committed
53
54
    #[clap(default_value = "3000", long, short, env)]
    port: u16,
55
    #[clap(default_value = "/tmp/text-generation-server-0", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
56
    master_shard_uds_path: String,
Olivier Dehaene's avatar
Olivier Dehaene committed
57
58
    #[clap(default_value = "bigscience/bloom", long, env)]
    tokenizer_name: String,
59
    #[clap(long, env)]
60
61
    tokenizer_config_path: Option<String>,
    #[clap(long, env)]
62
    revision: Option<String>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
63
64
    #[clap(default_value = "2", long, env)]
    validation_workers: usize,
65
66
    #[clap(long, env)]
    json_output: bool,
67
68
    #[clap(long, env)]
    otlp_endpoint: Option<String>,
69
70
    #[clap(long, env)]
    cors_allow_origin: Option<Vec<String>>,
71
72
73
74
75
    #[clap(long, env)]
    ngrok: bool,
    #[clap(long, env)]
    ngrok_authtoken: Option<String>,
    #[clap(long, env)]
76
    ngrok_edge: Option<String>,
77
    #[clap(long, env, default_value_t = false)]
78
    messages_api_enabled: bool,
drbh's avatar
drbh committed
79
80
    #[clap(long, env, default_value_t = false)]
    disable_grammar_support: bool,
Olivier Dehaene's avatar
Olivier Dehaene committed
81
}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
82

83
84
#[tokio::main]
async fn main() -> Result<(), RouterError> {
Olivier Dehaene's avatar
Olivier Dehaene committed
85
86
    // Get args
    let args = Args::parse();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
87
    // Pattern match configuration
Olivier Dehaene's avatar
Olivier Dehaene committed
88
    let Args {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
89
        max_concurrent_requests,
90
        max_best_of,
91
        max_stop_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
92
        max_top_n_tokens,
93
        max_input_length,
94
        max_total_tokens,
95
        waiting_served_ratio,
96
97
        max_batch_prefill_tokens,
        max_batch_total_tokens,
98
        max_waiting_tokens,
99
        max_batch_size,
100
        hostname,
Olivier Dehaene's avatar
Olivier Dehaene committed
101
        port,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
102
        master_shard_uds_path,
Olivier Dehaene's avatar
Olivier Dehaene committed
103
        tokenizer_name,
104
        tokenizer_config_path,
105
        revision,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
106
        validation_workers,
107
        json_output,
108
        otlp_endpoint,
109
        cors_allow_origin,
110
111
        ngrok,
        ngrok_authtoken,
112
        ngrok_edge,
113
        messages_api_enabled,
drbh's avatar
drbh committed
114
        disable_grammar_support,
Olivier Dehaene's avatar
Olivier Dehaene committed
115
116
    } = args;

117
118
119
    // Launch Tokio runtime
    init_logging(otlp_endpoint, json_output);

120
    // Validate args
121
    if max_input_length >= max_total_tokens {
122
        return Err(RouterError::ArgumentValidation(
123
            "`max_input_length` must be < `max_total_tokens`".to_string(),
124
125
        ));
    }
126
127
    if max_input_length as u32 > max_batch_prefill_tokens {
        return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {max_batch_prefill_tokens} and {max_input_length}")));
128
    }
129

130
    if validation_workers == 0 {
131
132
133
        return Err(RouterError::ArgumentValidation(
            "`validation_workers` must be > 0".to_string(),
        ));
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
134
135
    }

136
137
138
139
140
141
142
143
144
    if let Some(ref max_batch_total_tokens) = max_batch_total_tokens {
        if max_batch_prefill_tokens > *max_batch_total_tokens {
            return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}")));
        }
        if max_total_tokens as u32 > *max_batch_total_tokens {
            return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}")));
        }
    }

145
146
147
148
149
150
151
152
153
154
155
    // CORS allowed origins
    // map to go inside the option and then map to parse from String to HeaderValue
    // Finally, convert to AllowOrigin
    let cors_allow_origin: Option<AllowOrigin> = cors_allow_origin.map(|cors_allow_origin| {
        AllowOrigin::list(
            cors_allow_origin
                .iter()
                .map(|origin| origin.parse::<HeaderValue>().unwrap()),
        )
    });

156
157
158
    // Parse Huggingface hub token
    let authorization_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok();

159
    // Tokenizer instance
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
160
    // This will only be used to validate payloads
161
    let local_path = Path::new(&tokenizer_name);
162
    let local_model = local_path.exists() && local_path.is_dir();
163

164
165
    // Shared API builder initialization
    let api_builder = || {
166
167
168
169
        let mut builder = ApiBuilder::new()
            .with_progress(false)
            .with_token(authorization_token);

170
        if let Ok(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE") {
171
172
173
            builder = builder.with_cache_dir(cache_dir.into());
        }

174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
        builder
    };

    // Decide if we need to use the API based on the revision and local path
    let use_api = revision.is_some() || !local_path.exists() || !local_path.is_dir();

    // Initialize API if needed
    let api = if use_api {
        tracing::info!("Using the Hugging Face API");
        match api_builder().build() {
            Ok(api) => Some(api),
            Err(_) => {
                tracing::warn!("Unable to build the Hugging Face API");
                None
            }
189
        }
190
191
192
193
194
    } else {
        None
    };

    // Load tokenizer and model info
195
    let (tokenizer, model_info, config) = if local_model {
196
197
198
199
200
201
        let tokenizer = Tokenizer::from_file(local_path.join("tokenizer.json")).ok();
        let model_info = HubModelInfo {
            model_id: tokenizer_name.to_string(),
            sha: None,
            pipeline_tag: None,
        };
202
203
204
205
        let config: Option<Config> = std::fs::read_to_string(local_path.join("config.json"))
            .ok()
            .as_ref()
            .and_then(|c| serde_json::from_str(c).ok());
206

207
        (tokenizer, model_info, config)
208
    } else if let Some(api) = api.clone() {
209
        let api_repo = api.repo(Repo::with_revision(
210
            tokenizer_name.to_string(),
211
            RepoType::Model,
212
            revision.clone().unwrap_or_else(|| "main".to_string()),
213
214
        ));

215
216
217
218
219
        let tokenizer = match api_repo.get("tokenizer.json").await {
            Ok(tokenizer_filename) => Tokenizer::from_file(tokenizer_filename).ok(),
            Err(_) => get_base_tokenizer(&api, &api_repo).await,
        };

220
221
222
223
224
225
226
227
228
229
230
231
232
        let config: Option<Config> = api_repo.get("config.json").await.ok().and_then(|filename| {
            std::fs::read_to_string(filename)
                .ok()
                .as_ref()
                .and_then(|c| {
                    let config: Result<Config, _> = serde_json::from_str(c);
                    if let Err(err) = &config {
                        tracing::warn!("Could not parse config {err:?}");
                    }
                    config.ok()
                })
        });

233
234
235
236
237
238
239
240
241
        let model_info = get_model_info(&api_repo).await.unwrap_or_else(|| {
            tracing::warn!("Could not retrieve model info from the Hugging Face hub.");
            HubModelInfo {
                model_id: tokenizer_name.to_string(),
                sha: None,
                pipeline_tag: None,
            }
        });

242
        (tokenizer, model_info, config)
243
244
245
246
247
248
249
    } else {
        // No API and no local model
        return Err(RouterError::ArgumentValidation(
            "No local model found and no revision specified".to_string(),
        ));
    };

250
251
    tracing::info!("Using config {config:?}");

252
    // Load tokenizer config if found locally, or check if we can get it from the API if needed
253
254
255
256
    let tokenizer_config = if let Some(path) = tokenizer_config_path {
        tracing::info!("Using local tokenizer config from user specified path");
        HubTokenizerConfig::from_file(&std::path::PathBuf::from(path))
    } else if local_model {
257
        tracing::info!("Using local tokenizer config");
258
        HubTokenizerConfig::from_file(&local_path.join("tokenizer_config.json"))
259
    } else {
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
        match api {
            Some(api) => {
                tracing::info!("Using the Hugging Face API to retrieve tokenizer config");
                let repo = Repo::with_revision(
                    tokenizer_name.to_string(),
                    RepoType::Model,
                    revision.unwrap_or("main".to_string()),
                );
                get_tokenizer_config(&api.repo(repo))
                    .await
                    .unwrap_or_else(|| {
                        tracing::warn!(
                            "Could not retrieve tokenizer config from the Hugging Face hub."
                        );
                        HubTokenizerConfig::default()
                    })
            }
            None => {
                tracing::warn!("Could not find tokenizer config locally and no API specified");
                HubTokenizerConfig::default()
            }
        }
282
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
283

284
285
286
287
288
289
290
291
292
    if tokenizer.is_none() {
        tracing::warn!("Could not find a fast tokenizer implementation for {tokenizer_name}");
        tracing::warn!("Rust input length validation and truncation is disabled");
    }

    // if pipeline-tag == text-generation we default to return_full_text = true
    let compat_return_full_text = match &model_info.pipeline_tag {
        None => {
            tracing::warn!("no pipeline tag found for model {tokenizer_name}");
OlivierDehaene's avatar
OlivierDehaene committed
293
            true
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
        }
        Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation",
    };

    // Instantiate sharded client from the master unix socket
    let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
        .await
        .map_err(RouterError::Connection)?;
    // Clear the cache; useful if the webserver rebooted
    sharded_client
        .clear_cache(None)
        .await
        .map_err(RouterError::Cache)?;
    // Get info from the shard
    let shard_info = sharded_client.info().await.map_err(RouterError::Info)?;
OlivierDehaene's avatar
OlivierDehaene committed
309

310
311
312
313
    // Warmup model
    tracing::info!("Warming up model");
    let max_supported_batch_total_tokens = match sharded_client
        .warmup(
314
            max_input_length as u32,
315
316
            max_batch_prefill_tokens,
            max_total_tokens as u32,
317
            max_batch_size,
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
        )
        .await
        .map_err(RouterError::Warmup)?
    {
        // Older models do not support automatic max-batch-total-tokens
        None => {
            let max_batch_total_tokens = max_batch_total_tokens
                .unwrap_or(16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens)));
            tracing::warn!("Model does not support automatic max batch total tokens");
            max_batch_total_tokens
        }
        // Flash attention models return their max supported total tokens
        Some(max_supported_batch_total_tokens) => {
            // Warn if user added his own max-batch-total-tokens as we will ignore it
            if max_batch_total_tokens.is_some() {
333
                tracing::warn!(
334
335
                    "`--max-batch-total-tokens` is deprecated for Flash \
                        Attention models."
336
                );
337
338
339
340
341
342
                tracing::warn!(
                    "Inferred max batch total tokens: {max_supported_batch_total_tokens}"
                );
            }
            if max_total_tokens as u32 > max_supported_batch_total_tokens {
                return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_supported_batch_total_tokens}")));
343
344
            }

345
346
347
348
349
350
            max_supported_batch_total_tokens
        }
    };
    tracing::info!("Setting max batch total tokens to {max_supported_batch_total_tokens}");
    tracing::info!("Connected");

drbh's avatar
drbh committed
351
352
353
354
355
356
357
358
359
    // Determine the server port based on the feature and environment variable.
    let port = if cfg!(feature = "google") {
        std::env::var("AIP_HTTP_PORT")
            .map(|aip_http_port| aip_http_port.parse::<u16>().unwrap_or(port))
            .unwrap_or(port)
    } else {
        port
    };

360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
    let addr = match hostname.parse() {
        Ok(ip) => SocketAddr::new(ip, port),
        Err(_) => {
            tracing::warn!("Invalid hostname, defaulting to 0.0.0.0");
            SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port)
        }
    };

    // Run server
    server::run(
        model_info,
        shard_info,
        compat_return_full_text,
        max_concurrent_requests,
        max_best_of,
        max_stop_sequences,
        max_top_n_tokens,
377
        max_input_length,
378
379
380
381
382
        max_total_tokens,
        waiting_served_ratio,
        max_batch_prefill_tokens,
        max_supported_batch_total_tokens,
        max_waiting_tokens,
383
        max_batch_size,
384
385
        sharded_client,
        tokenizer,
386
        config,
387
388
389
390
391
392
        validation_workers,
        addr,
        cors_allow_origin,
        ngrok,
        ngrok_authtoken,
        ngrok_edge,
393
        tokenizer_config,
394
        messages_api_enabled,
drbh's avatar
drbh committed
395
        disable_grammar_support,
396
397
398
    )
    .await?;
    Ok(())
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
399
}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441

/// Init logging using env variables LOG_LEVEL and LOG_FORMAT:
///     - otlp_endpoint is an optional URL to an Open Telemetry collector
///     - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO)
///     - LOG_FORMAT may be TEXT or JSON (default to TEXT)
fn init_logging(otlp_endpoint: Option<String>, json_output: bool) {
    let mut layers = Vec::new();

    // STDOUT/STDERR layer
    let fmt_layer = tracing_subscriber::fmt::layer()
        .with_file(true)
        .with_line_number(true);

    let fmt_layer = match json_output {
        true => fmt_layer.json().flatten_event(true).boxed(),
        false => fmt_layer.boxed(),
    };
    layers.push(fmt_layer);

    // OpenTelemetry tracing layer
    if let Some(otlp_endpoint) = otlp_endpoint {
        global::set_text_map_propagator(TraceContextPropagator::new());

        let tracer = opentelemetry_otlp::new_pipeline()
            .tracing()
            .with_exporter(
                opentelemetry_otlp::new_exporter()
                    .tonic()
                    .with_endpoint(otlp_endpoint),
            )
            .with_trace_config(
                trace::config()
                    .with_resource(Resource::new(vec![KeyValue::new(
                        "service.name",
                        "text-generation-inference.router",
                    )]))
                    .with_sampler(Sampler::AlwaysOn),
            )
            .install_batch(opentelemetry::runtime::Tokio);

        if let Ok(tracer) = tracer {
            layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed());
Nicolas Patry's avatar
Nicolas Patry committed
442
            init_tracing_opentelemetry::init_propagator().unwrap();
443
444
445
446
447
448
449
450
451
452
453
454
        };
    }

    // Filter events with LOG_LEVEL
    let env_filter =
        EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));

    tracing_subscriber::registry()
        .with(env_filter)
        .with(layers)
        .init();
}
455
456

/// get model info from the Huggingface Hub
457
458
pub async fn get_model_info(api: &ApiRepo) -> Option<HubModelInfo> {
    let response = api.info_request().send().await.ok()?;
459
460

    if response.status().is_success() {
461
462
463
464
465
466
467
468
469
470
471
        let hub_model_info: HubModelInfo =
            serde_json::from_str(&response.text().await.ok()?).ok()?;
        if let Some(sha) = &hub_model_info.sha {
            tracing::info!(
                "Serving revision {sha} of model {}",
                hub_model_info.model_id
            );
        }
        Some(hub_model_info)
    } else {
        None
472
    }
473
}
474

475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
/// get base tokenizer
pub async fn get_base_tokenizer(api: &Api, api_repo: &ApiRepo) -> Option<Tokenizer> {
    let config_filename = api_repo.get("config.json").await.ok()?;

    // Open the file in read-only mode with buffer.
    let file = File::open(config_filename).ok()?;
    let reader = BufReader::new(file);

    // Read the JSON contents of the file as an instance of `User`.
    let config: serde_json::Value = serde_json::from_reader(reader).ok()?;

    if let Some(serde_json::Value::String(base_model_id)) = config.get("base_model_name_or_path") {
        let api_base_repo = api.repo(Repo::with_revision(
            base_model_id.to_string(),
            RepoType::Model,
            "main".to_string(),
        ));

        let tokenizer_filename = api_base_repo.get("tokenizer.json").await.ok()?;
        Tokenizer::from_file(tokenizer_filename).ok()
    } else {
        None
    }
}

500
501
502
503
504
505
506
507
508
/// get tokenizer_config from the Huggingface Hub
pub async fn get_tokenizer_config(api_repo: &ApiRepo) -> Option<HubTokenizerConfig> {
    let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok()?;

    // Open the file in read-only mode with buffer.
    let file = File::open(tokenizer_config_filename).ok()?;
    let reader = BufReader::new(file);

    // Read the JSON contents of the file as an instance of 'HubTokenizerConfig'.
509
510
511
512
513
514
    let tokenizer_config: HubTokenizerConfig = serde_json::from_reader(reader)
        .map_err(|e| {
            tracing::warn!("Unable to parse tokenizer config: {}", e);
            e
        })
        .ok()?;
515
516
517
518

    Some(tokenizer_config)
}

519
520
#[derive(Debug, Error)]
enum RouterError {
521
522
    #[error("Argument validation error: {0}")]
    ArgumentValidation(String),
523
524
525
526
527
528
529
530
531
532
533
534
535
    #[error("Unable to connect to the Python model shards: {0}")]
    Connection(ClientError),
    #[error("Unable to clear the Python model shards cache: {0}")]
    Cache(ClientError),
    #[error("Unable to get the Python model shards info: {0}")]
    Info(ClientError),
    #[error("Unable to warmup the Python model shards: {0}")]
    Warmup(ClientError),
    #[error("Tokio runtime failed to start: {0}")]
    Tokio(#[from] std::io::Error),
    #[error("Axum webserver failed: {0}")]
    Axum(#[from] axum::BoxError),
}