main.rs 35.3 KB
Newer Older
1
use clap::{Parser, ValueEnum};
2
use serde::Deserialize;
Nicolas Patry's avatar
Nicolas Patry committed
3
use std::env;
4
use std::ffi::OsString;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
5
6
7
8
9
10
11
12
13
14
use std::io::{BufRead, BufReader, Read};
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::TryRecvError;
use std::sync::Arc;
use std::sync::{mpsc, Mutex};
use std::thread;
use std::thread::sleep;
use std::time::{Duration, Instant};
use std::{fs, io};
15
use subprocess::{ExitStatus, Popen, PopenConfig, PopenError, Redirection};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
16

17
18
mod env_runtime;

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Quantization {
    Bitsandbytes,
    Gptq,
}

impl std::fmt::Display for Quantization {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        // To keep in track with `server`.
        match self {
            Quantization::Bitsandbytes => {
                write!(f, "bitsandbytes")
            }
            Quantization::Gptq => {
                write!(f, "gptq")
            }
        }
    }
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
39
40
41
42
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
43
44
45
46
47
    /// The name of the model to load.
    /// Can be a MODEL_ID as listed on <https://hf.co/models> like
    /// `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`.
    /// Or it can be a local directory containing the necessary files
    /// as saved by `save_pretrained(...)` methods of transformers
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
48
    #[clap(default_value = "bigscience/bloom-560m", long, env)]
49
    model_id: String,
50
51
52

    /// The actual revision of the model if you're referring to a model
    /// on the hub. You can use a specific commit id or a branch like `refs/pr/2`.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
53
    #[clap(long, env)]
54
    revision: Option<String>,
55

56
    /// Whether to shard the model across multiple GPUs
57
58
    /// By default text-generation-inference will use all available GPUs to run
    /// the model. Setting it to `false` deactivates `num_shard`.
59
60
    #[clap(long, env)]
    sharded: Option<bool>,
61
62

    /// The number of shards to use if you don't want to use all GPUs on a given machine.
63
64
    /// You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2`
    /// and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to
65
    /// launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance.
66
67
    #[clap(long, env)]
    num_shard: Option<usize>,
68

69
    /// Whether you want the model to be quantized. This will use `bitsandbytes` for
70
71
72
    /// quantization on the fly, or `gptq`.
    #[clap(long, env, value_enum)]
    quantize: Option<Quantization>,
73

74
75
76
77
78
79
    /// Whether you want to execute hub modelling code. Explicitly passing a `revision` is
    /// encouraged when loading a model with custom code to ensure no malicious code has been
    /// contributed in a newer revision.
    #[clap(long, env, value_enum)]
    trust_remote_code: bool,

80
81
82
    /// The maximum amount of concurrent requests for this particular deployment.
    /// Having a low limit will refuse clients requests instead of having them
    /// wait for too long and is usually good to handle backpressure correctly.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
83
84
    #[clap(default_value = "128", long, env)]
    max_concurrent_requests: usize,
85
86
87
88

    /// This is the maximum allowed value for clients to set `best_of`.
    /// Best of makes `n` generations at the same time, and return the best
    /// in terms of overall log probability over the entire generated sequence
89
90
    #[clap(default_value = "2", long, env)]
    max_best_of: usize,
91
92
93
94
95
96

    /// This is the maximum allowed value for clients to set `stop_sequences`.
    /// Stop sequences are used to allow the model to stop on more than just
    /// the EOS token, and enable more complex "prompting" where users can preprompt
    /// the model in a specific way and define their "own" stop token aligned with
    /// their prompt.
97
98
    #[clap(default_value = "4", long, env)]
    max_stop_sequences: usize,
99
100
101
102
103

    /// This is the maximum allowed input length (expressed in number of tokens)
    /// for users. The larger this value, the longer prompt users can send which
    /// can impact the overall memory required to handle the load.
    /// Please note that some models have a finite range of sequence they can handle.
104
    #[clap(default_value = "1024", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
105
    max_input_length: usize,
106
107
108
109
110
111
112
113
114

    /// This is the most important value to set as it defines the "memory budget"
    /// of running clients requests.
    /// Clients will send input sequences and ask to generate `max_new_tokens`
    /// on top. with a value of `1512` users can send either a prompt of
    /// `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for
    /// `1511` max_new_tokens.
    /// The larger this value, the larger amount each request will be in your RAM
    /// and the less effective batching can be.
115
    #[clap(default_value = "2048", long, env)]
116
    max_total_tokens: usize,
117
118
119
120
121
122
123
124
125
126
127

    /// This represents the ratio of waiting queries vs running queries where
    /// you want to start considering pausing the running queries to include the waiting
    /// ones into the same batch.
    /// `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's
    /// only 10 queries left in the current batch we check if we can fit those 12
    /// waiting queries into the batching strategy, and if yes, then batching happens
    /// delaying the 10 running queries by a `prefill` run.
    ///
    /// This setting is only applied if there is room in the batch
    /// as defined by `max_batch_total_tokens`.
128
129
    #[clap(default_value = "1.2", long, env)]
    waiting_served_ratio: f32,
130

131
132
133
134
135
136
    /// Limits the number of tokens for the prefill operation.
    /// Since this operation take the most memory and is compute bound, it is interesting
    /// to limit the number of requests that can be sent.
    #[clap(default_value = "4096", long, env)]
    max_batch_prefill_tokens: u32,

137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
    /// **IMPORTANT** This is one critical control to allow maximum usage
    /// of the available hardware.
    ///
    /// This represents the total amount of potential tokens within a batch.
    /// When using padding (not recommended) this would be equivalent of
    /// `batch_size` * `max_total_tokens`.
    ///
    /// However in the non-padded (flash attention) version this can be much finer.
    ///
    /// For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100`
    /// or a single query of `1000` tokens.
    ///
    /// Overall this number should be the largest possible amount that fits the
    /// remaining memory (after the model is loaded). Since the actual memory overhead
    /// depends on other parameters like if you're using quantization, flash attention
    /// or the model implementation, text-generation-inference cannot infer this number
    /// automatically.
154
    #[clap(default_value = "16000", long, env)]
155
    max_batch_total_tokens: u32,
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

    /// This setting defines how many tokens can be passed before forcing the waiting
    /// queries to be put on the batch (if the size of the batch allows for it).
    /// New queries require 1 `prefill` forward, which is different from `decode`
    /// and therefore you need to pause the running batch in order to run `prefill`
    /// to create the correct values for the waiting queries to be able to join the batch.
    ///
    /// With a value too small, queries will always "steal" the compute to run `prefill`
    /// and running queries will be delayed by a lot.
    ///
    /// With a value too big, waiting queries could wait for a very long time
    /// before being allowed a slot in the running batch. If your server is busy
    /// that means that requests that could run in ~2s on an empty server could
    /// end up running in ~20s because the query had to wait for 18s.
    ///
    /// This number is expressed in number of tokens to make it a bit more
    /// "model" agnostic, but what should really matter is the overall latency
    /// for end users.
174
175
    #[clap(default_value = "20", long, env)]
    max_waiting_tokens: usize,
176
177

    /// The port to listen on.
178
    #[clap(default_value = "3000", long, short, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
179
    port: u16,
180
181
182

    /// The name of the socket for gRPC communication between the webserver
    /// and the shards.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
183
184
    #[clap(default_value = "/tmp/text-generation-server", long, env)]
    shard_uds_path: String,
185
186

    /// The address the master shard will listen on. (setting used by torch distributed)
187
    #[clap(default_value = "localhost", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
188
    master_addr: String,
189
190

    /// The address the master port will listen on. (setting used by torch distributed)
191
    #[clap(default_value = "29500", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
192
    master_port: usize,
193
194
195

    /// The location of the huggingface hub cache.
    /// Used to override the location if you want to provide a mounted disk for instance
196
    #[clap(long, env)]
197
    huggingface_hub_cache: Option<String>,
198
199
200

    /// The location of the huggingface hub cache.
    /// Used to override the location if you want to provide a mounted disk for instance
201
202
    #[clap(long, env)]
    weights_cache_override: Option<String>,
203
204
205
206
207

    /// For some models (like bloom), text-generation-inference implemented custom
    /// cuda kernels to speed up inference. Those kernels were only tested on A100.
    /// Use this flag to disable them if you're running on different hardware and
    /// encounter issues.
208
    #[clap(long, env)]
209
    disable_custom_kernels: bool,
210
211

    /// Outputs the logs in JSON format (useful for telemetry)
212
    #[clap(long, env)]
213
    json_output: bool,
214

215
216
    #[clap(long, env)]
    otlp_endpoint: Option<String>,
217

218
219
    #[clap(long, env)]
    cors_allow_origin: Vec<String>,
220
221
222
223
    #[clap(long, env)]
    watermark_gamma: Option<f32>,
    #[clap(long, env)]
    watermark_delta: Option<f32>,
224

225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
    /// Enable ngrok tunneling
    #[clap(long, env)]
    ngrok: bool,

    /// ngrok authentication token
    #[clap(long, env)]
    ngrok_authtoken: Option<String>,

    /// ngrok domain name where the axum webserver will be available at
    #[clap(long, env)]
    ngrok_domain: Option<String>,

    /// ngrok basic auth username
    #[clap(long, env)]
    ngrok_username: Option<String>,

    /// ngrok basic auth password
    #[clap(long, env)]
    ngrok_password: Option<String>,

245
246
247
    /// Display a lot of information about your runtime environment
    #[clap(long, short, action)]
    env: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
248
249
}

250
251
252
253
254
#[derive(Debug)]
enum ShardStatus {
    Ready,
    Failed((usize, String)),
}
255

256
257
258
259
#[allow(clippy::too_many_arguments)]
fn shard_manager(
    model_id: String,
    revision: Option<String>,
260
    quantize: Option<Quantization>,
261
    trust_remote_code: bool,
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
    uds_path: String,
    rank: usize,
    world_size: usize,
    master_addr: String,
    master_port: usize,
    huggingface_hub_cache: Option<String>,
    weights_cache_override: Option<String>,
    disable_custom_kernels: bool,
    watermark_gamma: Option<f32>,
    watermark_delta: Option<f32>,
    otlp_endpoint: Option<String>,
    status_sender: mpsc::Sender<ShardStatus>,
    shutdown: Arc<Mutex<bool>>,
    _shutdown_sender: mpsc::Sender<()>,
) {
    // Get UDS path
    let uds_string = format!("{uds_path}-{rank}");
    let uds = Path::new(&uds_string);
    // Clean previous runs
    fs::remove_file(uds).unwrap_or_default();

    // Process args
    let mut shard_argv = vec![
        "text-generation-server".to_string(),
        "serve".to_string(),
        model_id,
        "--uds-path".to_string(),
        uds_path,
        "--logger-level".to_string(),
        "INFO".to_string(),
        "--json-output".to_string(),
    ];

295
296
297
298
299
    // Activate trust remote code
    if trust_remote_code {
        shard_argv.push("--trust-remote-code".to_string());
    }

300
301
302
    // Activate tensor parallelism
    if world_size > 1 {
        shard_argv.push("--sharded".to_string());
303
304
    }

305
306
307
    if let Some(quantize) = quantize {
        shard_argv.push("--quantize".to_string());
        shard_argv.push(quantize.to_string())
308
    }
309

310
311
312
313
314
    // Model optional revision
    if let Some(revision) = revision {
        shard_argv.push("--revision".to_string());
        shard_argv.push(revision)
    }
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
315

316
317
318
319
320
321
322
323
324
    // OpenTelemetry
    if let Some(otlp_endpoint) = otlp_endpoint {
        shard_argv.push("--otlp-endpoint".to_string());
        shard_argv.push(otlp_endpoint);
    }

    // Copy current process env
    let mut env: Vec<(OsString, OsString)> = env::vars_os().collect();

325
326
327
328
329
330
    // Use cuda allocator. It leads to less memory fragmentation
    env.push((
        "PYTORCH_CUDA_ALLOC_CONF".into(),
        "backend:cudaMallocAsync".into(),
    ));

331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
    // Torch Distributed Env vars
    env.push(("RANK".into(), rank.to_string().into()));
    env.push(("WORLD_SIZE".into(), world_size.to_string().into()));
    env.push(("MASTER_ADDR".into(), master_addr.into()));
    env.push(("MASTER_PORT".into(), master_port.to_string().into()));
    env.push(("NCCL_ASYNC_ERROR_HANDLING".into(), "1".into()));

    // Safetensors load fast
    env.push(("SAFETENSORS_FAST_GPU".into(), "1".into()));

    // Enable hf transfer for insane download speeds
    let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
    env.push((
        "HF_HUB_ENABLE_HF_TRANSFER".into(),
        enable_hf_transfer.into(),
    ));

    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
        env.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
    };

    // If huggingface_hub_cache is some, pass it to the shard
    // Useful when running inside a docker container
    if let Some(huggingface_hub_cache) = huggingface_hub_cache {
        env.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
    };

    // If weights_cache_override is some, pass it to the shard
    // Useful when running inside a HuggingFace Inference Endpoint
    if let Some(weights_cache_override) = weights_cache_override {
        env.push((
            "WEIGHTS_CACHE_OVERRIDE".into(),
            weights_cache_override.into(),
        ));
    };

    // If disable_custom_kernels is true, pass it to the shard as an env var
    if disable_custom_kernels {
        env.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into()))
    }

    // Watermark Gamma
    if let Some(watermark_gamma) = watermark_gamma {
        env.push(("WATERMARK_GAMMA".into(), watermark_gamma.to_string().into()))
    }

    // Watermark Delta
    if let Some(watermark_delta) = watermark_delta {
        env.push(("WATERMARK_DELTA".into(), watermark_delta.to_string().into()))
    }

    // Start process
    tracing::info!("Starting shard {rank}");
    let mut p = match Popen::create(
        &shard_argv,
        PopenConfig {
            stdout: Redirection::Pipe,
            stderr: Redirection::Pipe,
            // Needed for the shutdown procedure
            setpgid: true,
            // NCCL env vars
            env: Some(env),
            ..Default::default()
        },
    ) {
        Ok(p) => p,
        Err(err) => {
            if let PopenError::IoError(ref err) = err {
                if err.kind() == io::ErrorKind::NotFound {
                    tracing::error!("text-generation-server not found in PATH");
                    tracing::error!("Please install it with `make install-server`")
403
404
                }
            }
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
            status_sender
                .send(ShardStatus::Failed((rank, err.to_string())))
                .unwrap();
            return;
        }
    };

    // Redirect STDOUT to the console
    let shard_stdout = p.stdout.take().unwrap();

    thread::spawn(move || {
        // Enter shard-manager tracing span
        let stdout = BufReader::new(shard_stdout);
        let _span = tracing::span!(tracing::Level::INFO, "shard-manager", rank = rank).entered();
        for line in stdout.lines() {
            // Parse loguru logs
            if let Ok(log) = serde_json::from_str::<PythonLogMessage>(&line.unwrap()) {
                log.trace();
            }
        }
    });

    let mut ready = false;
    let start_time = Instant::now();
    let mut wait_time = Instant::now();
    loop {
        // Process exited
432
        if let Some(exit_status) = p.poll() {
433
434
            let mut err = String::new();
            p.stderr.take().unwrap().read_to_string(&mut err).unwrap();
435
436
437
438
439

            if let ExitStatus::Signaled(signal) = exit_status {
                tracing::error!("Shard process was signaled to shutdown with signal {signal}");
            }

440
441
442
443
444
445
446
447
            status_sender
                .send(ShardStatus::Failed((rank, err)))
                .unwrap();
            return;
        }

        // We received a shutdown signal
        if *shutdown.lock().unwrap() {
448
            p.kill().unwrap();
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
            let _ = p.wait_timeout(Duration::from_secs(90));
            tracing::info!("Shard {rank} terminated");
            return;
        }

        // Shard is ready
        if uds.exists() && !ready {
            tracing::info!("Shard {rank} ready in {:?}", start_time.elapsed());
            status_sender.send(ShardStatus::Ready).unwrap();
            ready = true;
        } else if !ready && wait_time.elapsed() > Duration::from_secs(10) {
            tracing::info!("Waiting for shard {rank} to be ready...");
            wait_time = Instant::now();
        }
        sleep(Duration::from_millis(100));
    }
}

fn shutdown_shards(shutdown: Arc<Mutex<bool>>, shutdown_receiver: &mpsc::Receiver<()>) {
    tracing::info!("Shutting down shards");
    // Update shutdown value to true
    // This will be picked up by the shard manager
    {
        let mut shutdown = shutdown.lock().unwrap();
        *shutdown = true;
    }

    // Wait for shards to shutdown
    // This will block till all shutdown_sender are dropped
    let _ = shutdown_receiver.recv();
}

fn num_cuda_devices() -> Option<usize> {
482
483
484
485
    let devices = match env::var("CUDA_VISIBLE_DEVICES") {
        Ok(devices) => devices,
        Err(_) => env::var("NVIDIA_VISIBLE_DEVICES").ok()?,
    };
486
487
    let n_devices = devices.split(',').count();
    Some(n_devices)
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
}

#[derive(Deserialize)]
#[serde(rename_all = "UPPERCASE")]
enum PythonLogLevelEnum {
    Trace,
    Debug,
    Info,
    Success,
    Warning,
    Error,
    Critical,
}

#[derive(Deserialize)]
struct PythonLogLevel {
    name: PythonLogLevelEnum,
}

#[derive(Deserialize)]
struct PythonLogRecord {
    level: PythonLogLevel,
}

#[derive(Deserialize)]
struct PythonLogMessage {
    text: String,
    record: PythonLogRecord,
}

impl PythonLogMessage {
    fn trace(&self) {
        match self.record.level.name {
            PythonLogLevelEnum::Trace => tracing::trace!("{}", self.text),
            PythonLogLevelEnum::Debug => tracing::debug!("{}", self.text),
            PythonLogLevelEnum::Info => tracing::info!("{}", self.text),
            PythonLogLevelEnum::Success => tracing::info!("{}", self.text),
            PythonLogLevelEnum::Warning => tracing::warn!("{}", self.text),
            PythonLogLevelEnum::Error => tracing::error!("{}", self.text),
            PythonLogLevelEnum::Critical => tracing::error!("{}", self.text),
        }
    }
}

fn find_num_shards(sharded: Option<bool>, num_shard: Option<usize>) -> usize {
    // get the number of shards given `sharded` and `num_shard`
    let num_shard = match (sharded, num_shard) {
        (Some(true), None) => {
            // try to default to the number of available GPUs
537
538
539
            tracing::info!("Parsing num_shard from CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES");
            let n_devices = num_cuda_devices()
                .expect("--num-shard and CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES are not set");
540
541
            if n_devices <= 1 {
                panic!("`sharded` is true but only found {n_devices} CUDA devices");
542
            }
543
            n_devices
544
        }
545
546
547
548
549
550
        (Some(true), Some(num_shard)) => {
            // we can't have only one shard while sharded
            if num_shard <= 1 {
                panic!("`sharded` is true but `num_shard` <= 1");
            }
            num_shard
551
        }
552
553
554
555
        (Some(false), Some(num_shard)) => num_shard,
        (Some(false), None) => 1,
        (None, None) => num_cuda_devices().unwrap_or(1),
        (None, Some(num_shard)) => num_shard,
556
    };
557
558
559
    if num_shard < 1 {
        panic!("`num_shard` cannot be < 1");
    }
560
561
    num_shard
}
562

563
564
565
566
567
568
569
570
571
#[derive(Debug)]
enum LauncherError {
    DownloadError,
    ShardCannotStart,
    ShardDisconnected,
    ShardFailed,
    WebserverFailed,
    WebserverCannotStart,
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
572

573
fn download_convert_model(args: &Args, running: Arc<AtomicBool>) -> Result<(), LauncherError> {
574
575
576
577
578
579
580
581
582
583
    let mut download_argv = vec![
        "text-generation-server".to_string(),
        "download-weights".to_string(),
        args.model_id.to_string(),
        "--extension".to_string(),
        ".safetensors".to_string(),
        "--logger-level".to_string(),
        "INFO".to_string(),
        "--json-output".to_string(),
    ];
584

585
586
587
588
589
    // Model optional revision
    if let Some(revision) = &args.revision {
        download_argv.push("--revision".to_string());
        download_argv.push(revision.to_string())
    }
590

591
592
    // Copy current process env
    let mut env: Vec<(OsString, OsString)> = env::vars_os().collect();
593

594
    // If huggingface_hub_cache is set, pass it to the download process
595
596
597
598
    // Useful when running inside a docker container
    if let Some(ref huggingface_hub_cache) = args.huggingface_hub_cache {
        env.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
    };
599

600
601
602
603
604
605
    // Enable hf transfer for insane download speeds
    let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
    env.push((
        "HF_HUB_ENABLE_HF_TRANSFER".into(),
        enable_hf_transfer.into(),
    ));
606

607
608
609
610
    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
        env.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
    };
611

612
613
614
615
616
617
618
619
620
    // If args.weights_cache_override is some, pass it to the download process
    // Useful when running inside a HuggingFace Inference Endpoint
    if let Some(weights_cache_override) = &args.weights_cache_override {
        env.push((
            "WEIGHTS_CACHE_OVERRIDE".into(),
            weights_cache_override.into(),
        ));
    };

621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
    // Start process
    tracing::info!("Starting download process.");
    let mut download_process = match Popen::create(
        &download_argv,
        PopenConfig {
            stdout: Redirection::Pipe,
            stderr: Redirection::Pipe,
            // Needed for the shutdown procedure
            setpgid: true,
            env: Some(env),
            ..Default::default()
        },
    ) {
        Ok(p) => p,
        Err(err) => {
            if let PopenError::IoError(ref err) = err {
                if err.kind() == io::ErrorKind::NotFound {
                    tracing::error!("text-generation-server not found in PATH");
                    tracing::error!("Please install it with `make install-server`")
640
641
                }
            }
642
643
644
            return Err(LauncherError::DownloadError);
        }
    };
645

646
647
648
649
650
651
652
653
654
655
    // Redirect STDOUT to the console
    let download_stdout = download_process.stdout.take().unwrap();
    thread::spawn(move || {
        // Enter download tracing span
        let stdout = BufReader::new(download_stdout);
        let _span = tracing::span!(tracing::Level::INFO, "download").entered();
        for line in stdout.lines() {
            // Parse loguru logs
            if let Ok(log) = serde_json::from_str::<PythonLogMessage>(&line.unwrap()) {
                log.trace();
656
            }
657
658
        }
    });
659

660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
    loop {
        if let Some(status) = download_process.poll() {
            match status {
                ExitStatus::Exited(exit_code) => {
                    if exit_code == 0 {
                        tracing::info!("Successfully downloaded weights.");
                        break;
                    } else {
                        let mut err = String::new();
                        download_process
                            .stderr
                            .take()
                            .unwrap()
                            .read_to_string(&mut err)
                            .unwrap();
                        tracing::error!("Download encountered an error: {err}");
                        return Err(LauncherError::DownloadError);
677
678
                    }
                }
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
                ExitStatus::Signaled(signal) => {
                    let mut err = String::new();
                    download_process
                        .stderr
                        .take()
                        .unwrap()
                        .read_to_string(&mut err)
                        .unwrap();
                    tracing::error!(
                        "Download process was signaled to shutdown with signal {signal}: {err}"
                    );
                    return Err(LauncherError::DownloadError);
                }
                e => {
                    tracing::error!("Download process exited with an unknown status.: {e:?}");
694
695
                    return Err(LauncherError::DownloadError);
                }
696
697
            }
        }
698
699
700
701
702
703
704
705
706
707
        if !running.load(Ordering::SeqCst) {
            download_process.terminate().unwrap();
            tracing::info!("Waiting for download process to gracefully shutdown");
            download_process
                .wait_timeout(Duration::from_secs(90))
                .unwrap();
            tracing::info!("Download process terminated");
            return Ok(());
        }
        sleep(Duration::from_millis(100));
708
    }
709
710
    Ok(())
}
711

712
#[allow(clippy::too_many_arguments)]
713
714
715
716
717
718
719
720
721
722
fn spawn_shards(
    num_shard: usize,
    args: &Args,
    shutdown: Arc<Mutex<bool>>,
    shutdown_receiver: &mpsc::Receiver<()>,
    shutdown_sender: mpsc::Sender<()>,
    status_receiver: &mpsc::Receiver<ShardStatus>,
    status_sender: mpsc::Sender<ShardStatus>,
    running: Arc<AtomicBool>,
) -> Result<(), LauncherError> {
723
724
725
726
727
728
729
730
731
732
    if args.trust_remote_code {
        tracing::warn!(
            "`trust_remote_code` is set. Trusting that model `{}` do not contain malicious code.",
            args.model_id
        );
        if args.revision.is_none() {
            tracing::warn!("Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.");
        }
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
733
734
    // Start shard processes
    for rank in 0..num_shard {
735
736
737
738
739
740
        let model_id = args.model_id.clone();
        let revision = args.revision.clone();
        let uds_path = args.shard_uds_path.clone();
        let master_addr = args.master_addr.clone();
        let huggingface_hub_cache = args.huggingface_hub_cache.clone();
        let weights_cache_override = args.weights_cache_override.clone();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
741
742
743
        let status_sender = status_sender.clone();
        let shutdown = shutdown.clone();
        let shutdown_sender = shutdown_sender.clone();
744
        let otlp_endpoint = args.otlp_endpoint.clone();
745
        let quantize = args.quantize;
746
        let trust_remote_code = args.trust_remote_code;
747
748
749
750
        let master_port = args.master_port;
        let disable_custom_kernels = args.disable_custom_kernels;
        let watermark_gamma = args.watermark_gamma;
        let watermark_delta = args.watermark_delta;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
751
752
        thread::spawn(move || {
            shard_manager(
753
                model_id,
754
                revision,
755
                quantize,
756
                trust_remote_code,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
757
758
759
760
761
                uds_path,
                rank,
                num_shard,
                master_addr,
                master_port,
762
763
                huggingface_hub_cache,
                weights_cache_override,
764
                disable_custom_kernels,
765
766
                watermark_gamma,
                watermark_delta,
767
                otlp_endpoint,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
                status_sender,
                shutdown,
                shutdown_sender,
            )
        });
    }
    drop(shutdown_sender);

    // Wait for shard to start
    let mut shard_ready = 0;
    while running.load(Ordering::SeqCst) {
        match status_receiver.try_recv() {
            Ok(ShardStatus::Ready) => {
                shard_ready += 1;
                if shard_ready == num_shard {
                    break;
                }
            }
            Err(TryRecvError::Empty) => {
                sleep(Duration::from_millis(100));
            }
789
790
            Ok(ShardStatus::Failed((rank, err))) => {
                tracing::error!("Shard {} failed to start:\n{}", rank, err);
791
                shutdown_shards(shutdown, shutdown_receiver);
792
                return Err(LauncherError::ShardCannotStart);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
793
794
795
            }
            Err(TryRecvError::Disconnected) => {
                tracing::error!("Shard status channel disconnected");
796
                shutdown_shards(shutdown, shutdown_receiver);
797
                return Err(LauncherError::ShardDisconnected);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
798
799
800
            }
        }
    }
801
802
    Ok(())
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
803

804
805
806
807
808
fn spawn_webserver(
    args: Args,
    shutdown: Arc<Mutex<bool>>,
    shutdown_receiver: &mpsc::Receiver<()>,
) -> Result<Popen, LauncherError> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
809
810
811
    // All shard started
    // Start webserver
    tracing::info!("Starting Webserver");
812
813
814
    let mut argv = vec![
        "text-generation-router".to_string(),
        "--max-concurrent-requests".to_string(),
815
        args.max_concurrent_requests.to_string(),
816
        "--max-best-of".to_string(),
817
        args.max_best_of.to_string(),
818
        "--max-stop-sequences".to_string(),
819
        args.max_stop_sequences.to_string(),
820
        "--max-input-length".to_string(),
821
        args.max_input_length.to_string(),
822
        "--max-total-tokens".to_string(),
823
        args.max_total_tokens.to_string(),
824
825
826
827
        "--max-batch-prefill-tokens".to_string(),
        args.max_batch_prefill_tokens.to_string(),
        "--max-batch-total-tokens".to_string(),
        args.max_batch_total_tokens.to_string(),
828
        "--waiting-served-ratio".to_string(),
829
        args.waiting_served_ratio.to_string(),
830
        "--max-waiting-tokens".to_string(),
831
        args.max_waiting_tokens.to_string(),
832
        "--port".to_string(),
833
        args.port.to_string(),
834
        "--master-shard-uds-path".to_string(),
835
        format!("{}-0", args.shard_uds_path),
836
        "--tokenizer-name".to_string(),
837
        args.model_id,
838
839
    ];

840
841
842
843
    // Model optional revision
    if let Some(ref revision) = args.revision {
        argv.push("--revision".to_string());
        argv.push(revision.to_string())
844
845
    }

846
847
    if args.json_output {
        argv.push("--json-output".to_string());
848
849
    }

850
    // OpenTelemetry
851
852
853
854
855
856
857
858
859
    if let Some(otlp_endpoint) = args.otlp_endpoint {
        argv.push("--otlp-endpoint".to_string());
        argv.push(otlp_endpoint);
    }

    // CORS origins
    for origin in args.cors_allow_origin.into_iter() {
        argv.push("--cors-allow-origin".to_string());
        argv.push(origin);
860
861
    }

862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
    // Ngrok
    if args.ngrok {
        let authtoken = args.ngrok_authtoken.ok_or_else(|| {
            tracing::error!("`ngrok-authtoken` must be set when using ngrok tunneling");
            LauncherError::WebserverCannotStart
        })?;

        argv.push("--ngrok".to_string());
        argv.push("--ngrok-authtoken".to_string());
        argv.push(authtoken);

        if let Some(domain) = args.ngrok_domain {
            argv.push("--ngrok-domain".to_string());
            argv.push(domain);
        }

        if let (Some(username), Some(password)) = (args.ngrok_username, args.ngrok_password) {
            argv.push("--ngrok-username".to_string());
            argv.push(username);
            argv.push("--ngrok-password".to_string());
            argv.push(password);
        }
    }

886
887
888
    // Copy current process env
    let mut env: Vec<(OsString, OsString)> = env::vars_os().collect();

889
890
891
892
    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
        env.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
    };
893

894
895
    let mut webserver = match Popen::create(
        &argv,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
896
897
898
899
900
        PopenConfig {
            stdout: Redirection::Pipe,
            stderr: Redirection::Pipe,
            // Needed for the shutdown procedure
            setpgid: true,
Nicolas Patry's avatar
Nicolas Patry committed
901
            env: Some(env),
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
902
903
904
905
906
            ..Default::default()
        },
    ) {
        Ok(p) => p,
        Err(err) => {
907
908
            tracing::error!("Failed to start webserver: {}", err);
            if let PopenError::IoError(err) = err {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
909
                if err.kind() == io::ErrorKind::NotFound {
910
911
                    tracing::error!("text-generation-router not found in PATH");
                    tracing::error!("Please install it with `make install-router`")
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
912
                }
913
914
            } else {
                tracing::error!("{}", err);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
915
            }
916

917
            shutdown_shards(shutdown, shutdown_receiver);
918
            return Err(LauncherError::WebserverCannotStart);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
919
920
921
        }
    };

922
923
924
    // Redirect STDOUT and STDERR to the console
    let webserver_stdout = webserver.stdout.take().unwrap();
    let webserver_stderr = webserver.stderr.take().unwrap();
925
926

    thread::spawn(move || {
927
928
        let stdout = BufReader::new(webserver_stdout);
        let stderr = BufReader::new(webserver_stderr);
929
        for line in stdout.lines() {
930
            println!("{}", line.unwrap());
931
        }
932
933
        for line in stderr.lines() {
            println!("{}", line.unwrap());
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
934
        }
935
936
937
    });
    Ok(webserver)
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
938

939
940
941
fn main() -> Result<(), LauncherError> {
    // Pattern match configuration
    let args = Args::parse();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
942

943
944
945
946
    if args.json_output {
        tracing_subscriber::fmt().json().init();
    } else {
        tracing_subscriber::fmt().compact().init();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
947
948
    }

949
950
951
952
953
    if args.env {
        let env_runtime = env_runtime::Env::new();
        tracing::info!("{}", env_runtime);
    }

954
955
956
957
958
    tracing::info!("{:?}", args);

    let num_shard = find_num_shards(args.sharded, args.num_shard);
    if num_shard > 1 {
        tracing::info!("Sharding model on {num_shard} processes");
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
959
960
    }

961
962
963
964
965
966
967
    // Signal handler
    let running = Arc::new(AtomicBool::new(true));
    let r = running.clone();
    ctrlc::set_handler(move || {
        r.store(false, Ordering::SeqCst);
    })
    .expect("Error setting Ctrl-C handler");
968

969
    // Download and convert model weights
970
    download_convert_model(&args, running.clone())?;
971

972
973
974
975
976
    // Shared shutdown bool
    let shutdown = Arc::new(Mutex::new(false));
    // Shared shutdown channel
    // When shutting down, the main thread will wait for all senders to be dropped
    let (shutdown_sender, shutdown_receiver) = mpsc::channel();
977

978
979
    // Shared channel to track shard status
    let (status_sender, status_receiver) = mpsc::channel();
980

981
982
983
984
985
986
987
988
989
990
    spawn_shards(
        num_shard,
        &args,
        shutdown.clone(),
        &shutdown_receiver,
        shutdown_sender,
        &status_receiver,
        status_sender,
        running.clone(),
    )?;
991

992
993
994
995
996
    // We might have received a termination signal
    if !running.load(Ordering::SeqCst) {
        shutdown_shards(shutdown, &shutdown_receiver);
        return Ok(());
    }
997

998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
    let mut webserver = spawn_webserver(args, shutdown.clone(), &shutdown_receiver)?;

    // Default exit code
    let mut exit_code = Ok(());

    while running.load(Ordering::SeqCst) {
        if let Ok(ShardStatus::Failed((rank, err))) = status_receiver.try_recv() {
            tracing::error!("Shard {rank} failed:\n{err}");
            exit_code = Err(LauncherError::ShardFailed);
            break;
        };

        match webserver.poll() {
            Some(_) => {
                tracing::error!("Webserver Crashed");
                shutdown_shards(shutdown, &shutdown_receiver);
                return Err(LauncherError::WebserverFailed);
            }
            None => {
                sleep(Duration::from_millis(100));
            }
        };
1020
    }
1021
1022
1023
1024
1025
1026
1027
1028
1029

    // Graceful termination
    webserver.terminate().unwrap();
    tracing::info!("Waiting for webserver to gracefully shutdown");
    webserver.wait_timeout(Duration::from_secs(90)).unwrap();
    tracing::info!("Webserver terminated");
    shutdown_shards(shutdown, &shutdown_receiver);

    exit_code
1030
}