main.rs 17.4 KB
Newer Older
1
use axum::http::HeaderValue;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
2
use clap::Parser;
3
4
use hf_hub::api::tokio::{Api, ApiBuilder, ApiRepo};
use hf_hub::{Repo, RepoType};
5
6
7
8
9
10
use opentelemetry::sdk::propagation::TraceContextPropagator;
use opentelemetry::sdk::trace;
use opentelemetry::sdk::trace::Sampler;
use opentelemetry::sdk::Resource;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
11
12
use std::fs::File;
use std::io::BufReader;
Olivier Dehaene's avatar
Olivier Dehaene committed
13
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
14
use std::path::Path;
15
use text_generation_client::{ClientError, ShardedClient};
16
use text_generation_router::{server, HubModelInfo, HubTokenizerConfig};
17
use thiserror::Error;
18
use tokenizers::Tokenizer;
19
use tower_http::cors::AllowOrigin;
20
21
22
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{EnvFilter, Layer};
Olivier Dehaene's avatar
Olivier Dehaene committed
23
24
25
26
27

/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
28
29
    #[clap(default_value = "128", long, env)]
    max_concurrent_requests: usize,
30
31
    #[clap(default_value = "2", long, env)]
    max_best_of: usize,
32
33
    #[clap(default_value = "4", long, env)]
    max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
34
35
    #[clap(default_value = "5", long, env)]
    max_top_n_tokens: u32,
36
    #[clap(default_value = "1024", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
37
    max_input_length: usize,
38
    #[clap(default_value = "2048", long, env)]
39
    max_total_tokens: usize,
40
41
    #[clap(default_value = "1.2", long, env)]
    waiting_served_ratio: f32,
42
43
    #[clap(default_value = "4096", long, env)]
    max_batch_prefill_tokens: u32,
44
45
    #[clap(long, env)]
    max_batch_total_tokens: Option<u32>,
46
47
    #[clap(default_value = "20", long, env)]
    max_waiting_tokens: usize,
48
49
    #[clap(long, env)]
    max_batch_size: Option<usize>,
50
51
    #[clap(default_value = "0.0.0.0", long, env)]
    hostname: String,
Olivier Dehaene's avatar
Olivier Dehaene committed
52
53
    #[clap(default_value = "3000", long, short, env)]
    port: u16,
54
    #[clap(default_value = "/tmp/text-generation-server-0", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
55
    master_shard_uds_path: String,
Olivier Dehaene's avatar
Olivier Dehaene committed
56
57
    #[clap(default_value = "bigscience/bloom", long, env)]
    tokenizer_name: String,
58
    #[clap(long, env)]
59
60
    tokenizer_config_path: Option<String>,
    #[clap(long, env)]
61
    revision: Option<String>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
62
63
    #[clap(default_value = "2", long, env)]
    validation_workers: usize,
64
65
    #[clap(long, env)]
    json_output: bool,
66
67
    #[clap(long, env)]
    otlp_endpoint: Option<String>,
68
69
    #[clap(long, env)]
    cors_allow_origin: Option<Vec<String>>,
70
71
72
73
74
    #[clap(long, env)]
    ngrok: bool,
    #[clap(long, env)]
    ngrok_authtoken: Option<String>,
    #[clap(long, env)]
75
    ngrok_edge: Option<String>,
76
    #[clap(long, env, default_value_t = false)]
77
    messages_api_enabled: bool,
Olivier Dehaene's avatar
Olivier Dehaene committed
78
}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
79

80
81
#[tokio::main]
async fn main() -> Result<(), RouterError> {
Olivier Dehaene's avatar
Olivier Dehaene committed
82
83
    // Get args
    let args = Args::parse();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
84
    // Pattern match configuration
Olivier Dehaene's avatar
Olivier Dehaene committed
85
    let Args {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
86
        max_concurrent_requests,
87
        max_best_of,
88
        max_stop_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
89
        max_top_n_tokens,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
90
        max_input_length,
91
        max_total_tokens,
92
        waiting_served_ratio,
93
94
        max_batch_prefill_tokens,
        max_batch_total_tokens,
95
        max_waiting_tokens,
96
        max_batch_size,
97
        hostname,
Olivier Dehaene's avatar
Olivier Dehaene committed
98
        port,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
99
        master_shard_uds_path,
Olivier Dehaene's avatar
Olivier Dehaene committed
100
        tokenizer_name,
101
        tokenizer_config_path,
102
        revision,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
103
        validation_workers,
104
        json_output,
105
        otlp_endpoint,
106
        cors_allow_origin,
107
108
        ngrok,
        ngrok_authtoken,
109
        ngrok_edge,
110
        messages_api_enabled,
Olivier Dehaene's avatar
Olivier Dehaene committed
111
112
    } = args;

113
114
115
    // Launch Tokio runtime
    init_logging(otlp_endpoint, json_output);

116
    // Validate args
117
118
119
120
121
    if max_input_length >= max_total_tokens {
        return Err(RouterError::ArgumentValidation(
            "`max_input_length` must be < `max_total_tokens`".to_string(),
        ));
    }
122
    if max_input_length as u32 > max_batch_prefill_tokens {
123
        return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {max_batch_prefill_tokens} and {max_input_length}")));
124
    }
125

126
    if validation_workers == 0 {
127
128
129
        return Err(RouterError::ArgumentValidation(
            "`validation_workers` must be > 0".to_string(),
        ));
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
130
131
    }

132
133
134
135
136
137
138
139
140
    if let Some(ref max_batch_total_tokens) = max_batch_total_tokens {
        if max_batch_prefill_tokens > *max_batch_total_tokens {
            return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}")));
        }
        if max_total_tokens as u32 > *max_batch_total_tokens {
            return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}")));
        }
    }

141
142
143
144
145
146
147
148
149
150
151
    // CORS allowed origins
    // map to go inside the option and then map to parse from String to HeaderValue
    // Finally, convert to AllowOrigin
    let cors_allow_origin: Option<AllowOrigin> = cors_allow_origin.map(|cors_allow_origin| {
        AllowOrigin::list(
            cors_allow_origin
                .iter()
                .map(|origin| origin.parse::<HeaderValue>().unwrap()),
        )
    });

152
153
154
    // Parse Huggingface hub token
    let authorization_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok();

155
    // Tokenizer instance
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
156
    // This will only be used to validate payloads
157
    let local_path = Path::new(&tokenizer_name);
158
    let local_model = local_path.exists() && local_path.is_dir();
159

160
161
    // Shared API builder initialization
    let api_builder = || {
162
163
164
165
        let mut builder = ApiBuilder::new()
            .with_progress(false)
            .with_token(authorization_token);

166
        if let Ok(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE") {
167
168
169
            builder = builder.with_cache_dir(cache_dir.into());
        }

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
        builder
    };

    // Decide if we need to use the API based on the revision and local path
    let use_api = revision.is_some() || !local_path.exists() || !local_path.is_dir();

    // Initialize API if needed
    let api = if use_api {
        tracing::info!("Using the Hugging Face API");
        match api_builder().build() {
            Ok(api) => Some(api),
            Err(_) => {
                tracing::warn!("Unable to build the Hugging Face API");
                None
            }
185
        }
186
187
188
189
190
191
192
193
194
195
196
197
    } else {
        None
    };

    // Load tokenizer and model info
    let (tokenizer, model_info) = if local_model {
        let tokenizer = Tokenizer::from_file(local_path.join("tokenizer.json")).ok();
        let model_info = HubModelInfo {
            model_id: tokenizer_name.to_string(),
            sha: None,
            pipeline_tag: None,
        };
198

199
200
        (tokenizer, model_info)
    } else if let Some(api) = api.clone() {
201
        let api_repo = api.repo(Repo::with_revision(
202
            tokenizer_name.to_string(),
203
            RepoType::Model,
204
            revision.clone().unwrap_or_else(|| "main".to_string()),
205
206
        ));

207
208
209
210
211
        let tokenizer = match api_repo.get("tokenizer.json").await {
            Ok(tokenizer_filename) => Tokenizer::from_file(tokenizer_filename).ok(),
            Err(_) => get_base_tokenizer(&api, &api_repo).await,
        };

212
213
214
215
216
217
218
219
220
221
        let model_info = get_model_info(&api_repo).await.unwrap_or_else(|| {
            tracing::warn!("Could not retrieve model info from the Hugging Face hub.");
            HubModelInfo {
                model_id: tokenizer_name.to_string(),
                sha: None,
                pipeline_tag: None,
            }
        });

        (tokenizer, model_info)
222
223
224
225
226
227
228
229
    } else {
        // No API and no local model
        return Err(RouterError::ArgumentValidation(
            "No local model found and no revision specified".to_string(),
        ));
    };

    // Load tokenizer config if found locally, or check if we can get it from the API if needed
230
231
232
233
    let tokenizer_config = if let Some(path) = tokenizer_config_path {
        tracing::info!("Using local tokenizer config from user specified path");
        HubTokenizerConfig::from_file(&std::path::PathBuf::from(path))
    } else if local_model {
234
        tracing::info!("Using local tokenizer config");
235
        HubTokenizerConfig::from_file(&local_path.join("tokenizer_config.json"))
236
    } else {
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
        match api {
            Some(api) => {
                tracing::info!("Using the Hugging Face API to retrieve tokenizer config");
                let repo = Repo::with_revision(
                    tokenizer_name.to_string(),
                    RepoType::Model,
                    revision.unwrap_or("main".to_string()),
                );
                get_tokenizer_config(&api.repo(repo))
                    .await
                    .unwrap_or_else(|| {
                        tracing::warn!(
                            "Could not retrieve tokenizer config from the Hugging Face hub."
                        );
                        HubTokenizerConfig::default()
                    })
            }
            None => {
                tracing::warn!("Could not find tokenizer config locally and no API specified");
                HubTokenizerConfig::default()
            }
        }
259
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
260

261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
    if tokenizer.is_none() {
        tracing::warn!("Could not find a fast tokenizer implementation for {tokenizer_name}");
        tracing::warn!("Rust input length validation and truncation is disabled");
    }

    // if pipeline-tag == text-generation we default to return_full_text = true
    let compat_return_full_text = match &model_info.pipeline_tag {
        None => {
            tracing::warn!("no pipeline tag found for model {tokenizer_name}");
            false
        }
        Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation",
    };

    // Instantiate sharded client from the master unix socket
    let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
        .await
        .map_err(RouterError::Connection)?;
    // Clear the cache; useful if the webserver rebooted
    sharded_client
        .clear_cache(None)
        .await
        .map_err(RouterError::Cache)?;
    // Get info from the shard
    let shard_info = sharded_client.info().await.map_err(RouterError::Info)?;
OlivierDehaene's avatar
OlivierDehaene committed
286

287
288
289
290
291
292
293
    // Warmup model
    tracing::info!("Warming up model");
    let max_supported_batch_total_tokens = match sharded_client
        .warmup(
            max_input_length as u32,
            max_batch_prefill_tokens,
            max_total_tokens as u32,
294
            max_batch_size,
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
        )
        .await
        .map_err(RouterError::Warmup)?
    {
        // Older models do not support automatic max-batch-total-tokens
        None => {
            let max_batch_total_tokens = max_batch_total_tokens
                .unwrap_or(16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens)));
            tracing::warn!("Model does not support automatic max batch total tokens");
            max_batch_total_tokens
        }
        // Flash attention models return their max supported total tokens
        Some(max_supported_batch_total_tokens) => {
            // Warn if user added his own max-batch-total-tokens as we will ignore it
            if max_batch_total_tokens.is_some() {
310
                tracing::warn!(
311
312
                    "`--max-batch-total-tokens` is deprecated for Flash \
                        Attention models."
313
                );
314
315
316
317
318
319
                tracing::warn!(
                    "Inferred max batch total tokens: {max_supported_batch_total_tokens}"
                );
            }
            if max_total_tokens as u32 > max_supported_batch_total_tokens {
                return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_supported_batch_total_tokens}")));
320
321
            }

322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
            max_supported_batch_total_tokens
        }
    };
    tracing::info!("Setting max batch total tokens to {max_supported_batch_total_tokens}");
    tracing::info!("Connected");

    let addr = match hostname.parse() {
        Ok(ip) => SocketAddr::new(ip, port),
        Err(_) => {
            tracing::warn!("Invalid hostname, defaulting to 0.0.0.0");
            SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port)
        }
    };

    // Run server
    server::run(
        model_info,
        shard_info,
        compat_return_full_text,
        max_concurrent_requests,
        max_best_of,
        max_stop_sequences,
        max_top_n_tokens,
        max_input_length,
        max_total_tokens,
        waiting_served_ratio,
        max_batch_prefill_tokens,
        max_supported_batch_total_tokens,
        max_waiting_tokens,
351
        max_batch_size,
352
353
354
355
356
357
358
359
        sharded_client,
        tokenizer,
        validation_workers,
        addr,
        cors_allow_origin,
        ngrok,
        ngrok_authtoken,
        ngrok_edge,
360
        tokenizer_config,
361
        messages_api_enabled,
362
363
364
    )
    .await?;
    Ok(())
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
365
}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407

/// Init logging using env variables LOG_LEVEL and LOG_FORMAT:
///     - otlp_endpoint is an optional URL to an Open Telemetry collector
///     - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO)
///     - LOG_FORMAT may be TEXT or JSON (default to TEXT)
fn init_logging(otlp_endpoint: Option<String>, json_output: bool) {
    let mut layers = Vec::new();

    // STDOUT/STDERR layer
    let fmt_layer = tracing_subscriber::fmt::layer()
        .with_file(true)
        .with_line_number(true);

    let fmt_layer = match json_output {
        true => fmt_layer.json().flatten_event(true).boxed(),
        false => fmt_layer.boxed(),
    };
    layers.push(fmt_layer);

    // OpenTelemetry tracing layer
    if let Some(otlp_endpoint) = otlp_endpoint {
        global::set_text_map_propagator(TraceContextPropagator::new());

        let tracer = opentelemetry_otlp::new_pipeline()
            .tracing()
            .with_exporter(
                opentelemetry_otlp::new_exporter()
                    .tonic()
                    .with_endpoint(otlp_endpoint),
            )
            .with_trace_config(
                trace::config()
                    .with_resource(Resource::new(vec![KeyValue::new(
                        "service.name",
                        "text-generation-inference.router",
                    )]))
                    .with_sampler(Sampler::AlwaysOn),
            )
            .install_batch(opentelemetry::runtime::Tokio);

        if let Ok(tracer) = tracer {
            layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed());
Nicolas Patry's avatar
Nicolas Patry committed
408
            init_tracing_opentelemetry::init_propagator().unwrap();
409
410
411
412
413
414
415
416
417
418
419
420
        };
    }

    // Filter events with LOG_LEVEL
    let env_filter =
        EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));

    tracing_subscriber::registry()
        .with(env_filter)
        .with(layers)
        .init();
}
421
422

/// get model info from the Huggingface Hub
423
424
pub async fn get_model_info(api: &ApiRepo) -> Option<HubModelInfo> {
    let response = api.info_request().send().await.ok()?;
425
426

    if response.status().is_success() {
427
428
429
430
431
432
433
434
435
436
437
        let hub_model_info: HubModelInfo =
            serde_json::from_str(&response.text().await.ok()?).ok()?;
        if let Some(sha) = &hub_model_info.sha {
            tracing::info!(
                "Serving revision {sha} of model {}",
                hub_model_info.model_id
            );
        }
        Some(hub_model_info)
    } else {
        None
438
    }
439
}
440

441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
/// get base tokenizer
pub async fn get_base_tokenizer(api: &Api, api_repo: &ApiRepo) -> Option<Tokenizer> {
    let config_filename = api_repo.get("config.json").await.ok()?;

    // Open the file in read-only mode with buffer.
    let file = File::open(config_filename).ok()?;
    let reader = BufReader::new(file);

    // Read the JSON contents of the file as an instance of `User`.
    let config: serde_json::Value = serde_json::from_reader(reader).ok()?;

    if let Some(serde_json::Value::String(base_model_id)) = config.get("base_model_name_or_path") {
        let api_base_repo = api.repo(Repo::with_revision(
            base_model_id.to_string(),
            RepoType::Model,
            "main".to_string(),
        ));

        let tokenizer_filename = api_base_repo.get("tokenizer.json").await.ok()?;
        Tokenizer::from_file(tokenizer_filename).ok()
    } else {
        None
    }
}

466
467
468
469
470
471
472
473
474
/// get tokenizer_config from the Huggingface Hub
pub async fn get_tokenizer_config(api_repo: &ApiRepo) -> Option<HubTokenizerConfig> {
    let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok()?;

    // Open the file in read-only mode with buffer.
    let file = File::open(tokenizer_config_filename).ok()?;
    let reader = BufReader::new(file);

    // Read the JSON contents of the file as an instance of 'HubTokenizerConfig'.
475
476
477
478
479
480
    let tokenizer_config: HubTokenizerConfig = serde_json::from_reader(reader)
        .map_err(|e| {
            tracing::warn!("Unable to parse tokenizer config: {}", e);
            e
        })
        .ok()?;
481
482
483
484

    Some(tokenizer_config)
}

485
486
#[derive(Debug, Error)]
enum RouterError {
487
488
    #[error("Argument validation error: {0}")]
    ArgumentValidation(String),
489
490
491
492
493
494
495
496
497
498
499
500
501
    #[error("Unable to connect to the Python model shards: {0}")]
    Connection(ClientError),
    #[error("Unable to clear the Python model shards cache: {0}")]
    Cache(ClientError),
    #[error("Unable to get the Python model shards info: {0}")]
    Info(ClientError),
    #[error("Unable to warmup the Python model shards: {0}")]
    Warmup(ClientError),
    #[error("Tokio runtime failed to start: {0}")]
    Tokio(#[from] std::io::Error),
    #[error("Axum webserver failed: {0}")]
    Axum(#[from] axum::BoxError),
}