main.rs 45.2 KB
Newer Older
1
use clap::{Parser, ValueEnum};
2
3
use nix::sys::signal::{self, Signal};
use nix::unistd::Pid;
4
use serde::Deserialize;
Nicolas Patry's avatar
Nicolas Patry committed
5
use std::env;
6
use std::ffi::OsString;
7
use std::io::{BufRead, BufReader, Lines};
8
use std::os::unix::process::{CommandExt, ExitStatusExt};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
9
use std::path::Path;
OlivierDehaene's avatar
OlivierDehaene committed
10
use std::process::{Child, Command, ExitStatus, Stdio};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
11
12
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::TryRecvError;
13
use std::sync::{mpsc, Arc};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
14
15
16
17
use std::thread;
use std::thread::sleep;
use std::time::{Duration, Instant};
use std::{fs, io};
18
use tracing_subscriber::EnvFilter;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
19

20
21
mod env_runtime;

22
23
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Quantization {
24
    /// 4 bit quantization. Requires a specific AWQ quantized model:
25
    ///   https://hf.co/models?search=awq.
26
    /// Should replace GPTQ models wherever possible because of the better latency
27
28
29
30
31
32
    Awq,
    /// 8 bit quantization, doesn't require specific model.
    /// Should be a drop-in replacement to bitsandbytes with much better performance.
    /// Kernels are from https://github.com/NetEase-FuXi/EETQ.git
    Eetq,
    /// 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq.
33
    /// text-generation-inference will use exllama (faster) kernels wherever possible, and use
34
35
36
37
38
39
40
41
42
    /// triton kernel (wider support) when it's not.
    /// AWQ has faster kernels.
    Gptq,
    /// Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half,
    /// but it is known that the model will be much slower to run than the native f16.
    #[deprecated(
        since = "1.1.0",
        note = "Use `eetq` instead, which provides better latencies overall and is drop-in in most cases"
    )]
43
    Bitsandbytes,
44
45
    /// Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x,
    /// but it is known that the model will be much slower to run than the native f16.
Nicolas Patry's avatar
Nicolas Patry committed
46
    BitsandbytesNF4,
47
48
    /// Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better
    /// perplexity performance for you model
Nicolas Patry's avatar
Nicolas Patry committed
49
    BitsandbytesFP4,
50
51
52
53
54
55
}

impl std::fmt::Display for Quantization {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        // To keep in track with `server`.
        match self {
56
57
            #[allow(deprecated)]
            // Use `eetq` instead, which provides better latencies overall and is drop-in in most cases
58
59
60
            Quantization::Bitsandbytes => {
                write!(f, "bitsandbytes")
            }
Nicolas Patry's avatar
Nicolas Patry committed
61
62
63
64
65
66
            Quantization::BitsandbytesNF4 => {
                write!(f, "bitsandbytes-nf4")
            }
            Quantization::BitsandbytesFP4 => {
                write!(f, "bitsandbytes-fp4")
            }
67
68
69
            Quantization::Gptq => {
                write!(f, "gptq")
            }
70
71
72
            Quantization::Awq => {
                write!(f, "awq")
            }
73
74
75
            Quantization::Eetq => {
                write!(f, "eetq")
            }
76
77
78
79
        }
    }
}

80
81
82
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Dtype {
    Float16,
83
    #[clap(name = "bfloat16")]
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    BFloat16,
}

impl std::fmt::Display for Dtype {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        // To keep in track with `server`.
        match self {
            Dtype::Float16 => {
                write!(f, "float16")
            }
            Dtype::BFloat16 => {
                write!(f, "bfloat16")
            }
        }
    }
}

Nicolas Patry's avatar
Nicolas Patry committed
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#[derive(Clone, Copy, Debug, ValueEnum)]
enum RopeScaling {
    Linear,
    Dynamic,
}

impl std::fmt::Display for RopeScaling {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        // To keep in track with `server`.
        match self {
            RopeScaling::Linear => {
                write!(f, "linear")
            }
            RopeScaling::Dynamic => {
                write!(f, "dynamic")
            }
        }
    }
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
121
122
123
124
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
125
126
127
128
129
    /// The name of the model to load.
    /// Can be a MODEL_ID as listed on <https://hf.co/models> like
    /// `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`.
    /// Or it can be a local directory containing the necessary files
    /// as saved by `save_pretrained(...)` methods of transformers
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
130
    #[clap(default_value = "bigscience/bloom-560m", long, env)]
131
    model_id: String,
132
133
134

    /// The actual revision of the model if you're referring to a model
    /// on the hub. You can use a specific commit id or a branch like `refs/pr/2`.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
135
    #[clap(long, env)]
136
    revision: Option<String>,
137

138
139
140
141
142
    /// The number of tokenizer workers used for payload validation and truncation inside the
    /// router.
    #[clap(default_value = "2", long, env)]
    validation_workers: usize,

143
    /// Whether to shard the model across multiple GPUs
144
145
    /// By default text-generation-inference will use all available GPUs to run
    /// the model. Setting it to `false` deactivates `num_shard`.
146
147
    #[clap(long, env)]
    sharded: Option<bool>,
148
149

    /// The number of shards to use if you don't want to use all GPUs on a given machine.
150
151
    /// You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2`
    /// and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to
152
    /// launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance.
153
154
    #[clap(long, env)]
    num_shard: Option<usize>,
155

156
    /// Whether you want the model to be quantized.
157
158
    #[clap(long, env, value_enum)]
    quantize: Option<Quantization>,
159

Nicolas Patry's avatar
Nicolas Patry committed
160
161
162
163
164
165
166
    /// The number of input_ids to speculate on
    /// If using a medusa model, the heads will be picked up automatically
    /// Other wise, it will use n-gram speculation which is relatively free
    /// in terms of compute, but the speedup heavily depends on the task.
    #[clap(long, env)]
    speculate: Option<usize>,

167
168
169
170
    /// The dtype to be forced upon the model. This option cannot be used with `--quantize`.
    #[clap(long, env, value_enum)]
    dtype: Option<Dtype>,

171
172
173
174
175
176
    /// Whether you want to execute hub modelling code. Explicitly passing a `revision` is
    /// encouraged when loading a model with custom code to ensure no malicious code has been
    /// contributed in a newer revision.
    #[clap(long, env, value_enum)]
    trust_remote_code: bool,

177
178
179
    /// The maximum amount of concurrent requests for this particular deployment.
    /// Having a low limit will refuse clients requests instead of having them
    /// wait for too long and is usually good to handle backpressure correctly.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
180
181
    #[clap(default_value = "128", long, env)]
    max_concurrent_requests: usize,
182
183
184
185

    /// This is the maximum allowed value for clients to set `best_of`.
    /// Best of makes `n` generations at the same time, and return the best
    /// in terms of overall log probability over the entire generated sequence
186
187
    #[clap(default_value = "2", long, env)]
    max_best_of: usize,
188
189
190
191
192
193

    /// This is the maximum allowed value for clients to set `stop_sequences`.
    /// Stop sequences are used to allow the model to stop on more than just
    /// the EOS token, and enable more complex "prompting" where users can preprompt
    /// the model in a specific way and define their "own" stop token aligned with
    /// their prompt.
194
195
    #[clap(default_value = "4", long, env)]
    max_stop_sequences: usize,
196

Nicolas Patry's avatar
Nicolas Patry committed
197
198
199
200
201
202
203
204
    /// This is the maximum allowed value for clients to set `top_n_tokens`.
    /// `top_n_tokens is used to return information about the the `n` most likely
    /// tokens at each generation step, instead of just the sampled token. This
    /// information can be used for downstream tasks like for classification or
    /// ranking.
    #[clap(default_value = "5", long, env)]
    max_top_n_tokens: u32,

205
206
207
208
    /// This is the maximum allowed input length (expressed in number of tokens)
    /// for users. The larger this value, the longer prompt users can send which
    /// can impact the overall memory required to handle the load.
    /// Please note that some models have a finite range of sequence they can handle.
209
    #[clap(default_value = "1024", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
210
    max_input_length: usize,
211
212
213
214
215
216
217
218
219

    /// This is the most important value to set as it defines the "memory budget"
    /// of running clients requests.
    /// Clients will send input sequences and ask to generate `max_new_tokens`
    /// on top. with a value of `1512` users can send either a prompt of
    /// `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for
    /// `1511` max_new_tokens.
    /// The larger this value, the larger amount each request will be in your RAM
    /// and the less effective batching can be.
220
    #[clap(default_value = "2048", long, env)]
221
    max_total_tokens: usize,
222
223
224
225
226
227
228
229
230
231
232

    /// This represents the ratio of waiting queries vs running queries where
    /// you want to start considering pausing the running queries to include the waiting
    /// ones into the same batch.
    /// `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's
    /// only 10 queries left in the current batch we check if we can fit those 12
    /// waiting queries into the batching strategy, and if yes, then batching happens
    /// delaying the 10 running queries by a `prefill` run.
    ///
    /// This setting is only applied if there is room in the batch
    /// as defined by `max_batch_total_tokens`.
233
234
    #[clap(default_value = "1.2", long, env)]
    waiting_served_ratio: f32,
235

236
237
238
239
240
241
    /// Limits the number of tokens for the prefill operation.
    /// Since this operation take the most memory and is compute bound, it is interesting
    /// to limit the number of requests that can be sent.
    #[clap(default_value = "4096", long, env)]
    max_batch_prefill_tokens: u32,

242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
    /// **IMPORTANT** This is one critical control to allow maximum usage
    /// of the available hardware.
    ///
    /// This represents the total amount of potential tokens within a batch.
    /// When using padding (not recommended) this would be equivalent of
    /// `batch_size` * `max_total_tokens`.
    ///
    /// However in the non-padded (flash attention) version this can be much finer.
    ///
    /// For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100`
    /// or a single query of `1000` tokens.
    ///
    /// Overall this number should be the largest possible amount that fits the
    /// remaining memory (after the model is loaded). Since the actual memory overhead
    /// depends on other parameters like if you're using quantization, flash attention
    /// or the model implementation, text-generation-inference cannot infer this number
    /// automatically.
259
260
    #[clap(long, env)]
    max_batch_total_tokens: Option<u32>,
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278

    /// This setting defines how many tokens can be passed before forcing the waiting
    /// queries to be put on the batch (if the size of the batch allows for it).
    /// New queries require 1 `prefill` forward, which is different from `decode`
    /// and therefore you need to pause the running batch in order to run `prefill`
    /// to create the correct values for the waiting queries to be able to join the batch.
    ///
    /// With a value too small, queries will always "steal" the compute to run `prefill`
    /// and running queries will be delayed by a lot.
    ///
    /// With a value too big, waiting queries could wait for a very long time
    /// before being allowed a slot in the running batch. If your server is busy
    /// that means that requests that could run in ~2s on an empty server could
    /// end up running in ~20s because the query had to wait for 18s.
    ///
    /// This number is expressed in number of tokens to make it a bit more
    /// "model" agnostic, but what should really matter is the overall latency
    /// for end users.
279
280
    #[clap(default_value = "20", long, env)]
    max_waiting_tokens: usize,
281

282
283
284
285
    /// The IP address to listen on
    #[clap(default_value = "0.0.0.0", long, env)]
    hostname: String,

286
    /// The port to listen on.
287
    #[clap(default_value = "3000", long, short, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
288
    port: u16,
289
290
291

    /// The name of the socket for gRPC communication between the webserver
    /// and the shards.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
292
293
    #[clap(default_value = "/tmp/text-generation-server", long, env)]
    shard_uds_path: String,
294
295

    /// The address the master shard will listen on. (setting used by torch distributed)
296
    #[clap(default_value = "localhost", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
297
    master_addr: String,
298
299

    /// The address the master port will listen on. (setting used by torch distributed)
300
    #[clap(default_value = "29500", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
301
    master_port: usize,
302
303
304

    /// The location of the huggingface hub cache.
    /// Used to override the location if you want to provide a mounted disk for instance
305
    #[clap(long, env)]
306
    huggingface_hub_cache: Option<String>,
307
308
309

    /// The location of the huggingface hub cache.
    /// Used to override the location if you want to provide a mounted disk for instance
310
311
    #[clap(long, env)]
    weights_cache_override: Option<String>,
312
313
314
315
316

    /// For some models (like bloom), text-generation-inference implemented custom
    /// cuda kernels to speed up inference. Those kernels were only tested on A100.
    /// Use this flag to disable them if you're running on different hardware and
    /// encounter issues.
317
    #[clap(long, env)]
318
    disable_custom_kernels: bool,
319

320
321
322
323
324
    /// Limit the CUDA available memory.
    /// The allowed value equals the total visible memory multiplied by cuda-memory-fraction.
    #[clap(default_value = "1.0", long, env)]
    cuda_memory_fraction: f32,

Nicolas Patry's avatar
Nicolas Patry committed
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
    /// Rope scaling will only be used for RoPE models
    /// and allow rescaling the position rotary to accomodate for
    /// larger prompts.
    ///
    /// Goes together with `rope_factor`.
    ///
    /// `--rope-factor 2.0` gives linear scaling with a factor of 2.0
    /// `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0
    /// `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed
    /// basically)
    ///
    /// `--rope-scaling linear --rope-factor` fully describes the scaling you want
    #[clap(long, env)]
    rope_scaling: Option<RopeScaling>,

    /// Rope scaling will only be used for RoPE models
    /// See `rope_scaling`
    #[clap(long, env)]
    rope_factor: Option<f32>,

345
    /// Outputs the logs in JSON format (useful for telemetry)
346
    #[clap(long, env)]
347
    json_output: bool,
348

349
350
    #[clap(long, env)]
    otlp_endpoint: Option<String>,
351

352
353
    #[clap(long, env)]
    cors_allow_origin: Vec<String>,
354
355
356
357
    #[clap(long, env)]
    watermark_gamma: Option<f32>,
    #[clap(long, env)]
    watermark_delta: Option<f32>,
358

359
360
361
362
363
364
365
366
    /// Enable ngrok tunneling
    #[clap(long, env)]
    ngrok: bool,

    /// ngrok authentication token
    #[clap(long, env)]
    ngrok_authtoken: Option<String>,

367
    /// ngrok edge
368
    #[clap(long, env)]
369
    ngrok_edge: Option<String>,
370

371
372
373
374
375
    /// The path to the tokenizer config file. This path is used to load the tokenizer configuration which may
    /// include a `chat_template`. If not provided, the default config will be used from the model hub.
    #[clap(long, env)]
    tokenizer_config_path: Option<String>,

376
377
378
    /// Display a lot of information about your runtime environment
    #[clap(long, short, action)]
    env: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
379
380
}

381
382
383
#[derive(Debug)]
enum ShardStatus {
    Ready,
384
    Failed(usize),
385
}
386

387
388
389
390
#[allow(clippy::too_many_arguments)]
fn shard_manager(
    model_id: String,
    revision: Option<String>,
391
    quantize: Option<Quantization>,
Nicolas Patry's avatar
Nicolas Patry committed
392
    speculate: Option<usize>,
393
    dtype: Option<Dtype>,
394
    trust_remote_code: bool,
395
396
397
398
399
400
401
402
403
404
    uds_path: String,
    rank: usize,
    world_size: usize,
    master_addr: String,
    master_port: usize,
    huggingface_hub_cache: Option<String>,
    weights_cache_override: Option<String>,
    disable_custom_kernels: bool,
    watermark_gamma: Option<f32>,
    watermark_delta: Option<f32>,
405
    cuda_memory_fraction: f32,
Nicolas Patry's avatar
Nicolas Patry committed
406
407
    rope_scaling: Option<RopeScaling>,
    rope_factor: Option<f32>,
408
409
    otlp_endpoint: Option<String>,
    status_sender: mpsc::Sender<ShardStatus>,
410
    shutdown: Arc<AtomicBool>,
411
412
    _shutdown_sender: mpsc::Sender<()>,
) {
413
414
415
    // Enter shard-manager tracing span
    let _span = tracing::span!(tracing::Level::INFO, "shard-manager", rank = rank).entered();

416
417
418
419
    // Get UDS path
    let uds_string = format!("{uds_path}-{rank}");
    let uds = Path::new(&uds_string);
    // Clean previous runs
420
421
422
    if uds.exists() {
        fs::remove_file(uds).unwrap();
    }
423
424

    // Process args
OlivierDehaene's avatar
OlivierDehaene committed
425
    let mut shard_args = vec![
426
427
428
429
430
431
432
433
434
        "serve".to_string(),
        model_id,
        "--uds-path".to_string(),
        uds_path,
        "--logger-level".to_string(),
        "INFO".to_string(),
        "--json-output".to_string(),
    ];

435
436
    // Activate trust remote code
    if trust_remote_code {
OlivierDehaene's avatar
OlivierDehaene committed
437
        shard_args.push("--trust-remote-code".to_string());
438
439
    }

440
441
    // Activate tensor parallelism
    if world_size > 1 {
OlivierDehaene's avatar
OlivierDehaene committed
442
        shard_args.push("--sharded".to_string());
443
444
    }

445
    if let Some(quantize) = quantize {
OlivierDehaene's avatar
OlivierDehaene committed
446
447
        shard_args.push("--quantize".to_string());
        shard_args.push(quantize.to_string())
448
    }
449

Nicolas Patry's avatar
Nicolas Patry committed
450
451
452
453
454
    if let Some(speculate) = speculate {
        shard_args.push("--speculate".to_string());
        shard_args.push(speculate.to_string())
    }

455
    if let Some(dtype) = dtype {
OlivierDehaene's avatar
OlivierDehaene committed
456
457
        shard_args.push("--dtype".to_string());
        shard_args.push(dtype.to_string())
458
459
    }

460
461
    // Model optional revision
    if let Some(revision) = revision {
OlivierDehaene's avatar
OlivierDehaene committed
462
463
        shard_args.push("--revision".to_string());
        shard_args.push(revision)
464
    }
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
465

Nicolas Patry's avatar
Nicolas Patry committed
466
467
468
469
470
471
    let rope = match (rope_scaling, rope_factor) {
        (None, None) => None,
        (Some(scaling), None) => Some((scaling, 1.0)),
        (Some(scaling), Some(factor)) => Some((scaling, factor)),
        (None, Some(factor)) => Some((RopeScaling::Linear, factor)),
    };
472
473
    // OpenTelemetry
    if let Some(otlp_endpoint) = otlp_endpoint {
OlivierDehaene's avatar
OlivierDehaene committed
474
475
        shard_args.push("--otlp-endpoint".to_string());
        shard_args.push(otlp_endpoint);
476
477
478
    }

    // Copy current process env
OlivierDehaene's avatar
OlivierDehaene committed
479
    let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
480
481

    // Torch Distributed Env vars
OlivierDehaene's avatar
OlivierDehaene committed
482
483
484
485
486
    envs.push(("RANK".into(), rank.to_string().into()));
    envs.push(("WORLD_SIZE".into(), world_size.to_string().into()));
    envs.push(("MASTER_ADDR".into(), master_addr.into()));
    envs.push(("MASTER_PORT".into(), master_port.to_string().into()));
    envs.push(("NCCL_ASYNC_ERROR_HANDLING".into(), "1".into()));
487

488
489
490
491
492
493
    // CUDA memory fraction
    envs.push((
        "CUDA_MEMORY_FRACTION".into(),
        cuda_memory_fraction.to_string().into(),
    ));

494
    // Safetensors load fast
OlivierDehaene's avatar
OlivierDehaene committed
495
    envs.push(("SAFETENSORS_FAST_GPU".into(), "1".into()));
496

497
498
499
    // Disable progress bar
    envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into()));

500
501
    // Enable hf transfer for insane download speeds
    let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
OlivierDehaene's avatar
OlivierDehaene committed
502
    envs.push((
503
504
505
506
507
508
        "HF_HUB_ENABLE_HF_TRANSFER".into(),
        enable_hf_transfer.into(),
    ));

    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
OlivierDehaene's avatar
OlivierDehaene committed
509
        envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
510
511
    };

Nicolas Patry's avatar
Nicolas Patry committed
512
513
514
515
516
517
518
519
520
    // Detect rope scaling
    // Sending as env instead of CLI args to not bloat everything
    // those only can be used by RoPE models, so passing information around
    // for all models will complexify code unnecessarily
    if let Some((scaling, factor)) = rope {
        envs.push(("ROPE_SCALING".into(), scaling.to_string().into()));
        envs.push(("ROPE_FACTOR".into(), factor.to_string().into()));
    }

521
522
523
    // If huggingface_hub_cache is some, pass it to the shard
    // Useful when running inside a docker container
    if let Some(huggingface_hub_cache) = huggingface_hub_cache {
OlivierDehaene's avatar
OlivierDehaene committed
524
        envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
525
526
527
528
529
    };

    // If weights_cache_override is some, pass it to the shard
    // Useful when running inside a HuggingFace Inference Endpoint
    if let Some(weights_cache_override) = weights_cache_override {
OlivierDehaene's avatar
OlivierDehaene committed
530
        envs.push((
531
532
533
534
535
536
537
            "WEIGHTS_CACHE_OVERRIDE".into(),
            weights_cache_override.into(),
        ));
    };

    // If disable_custom_kernels is true, pass it to the shard as an env var
    if disable_custom_kernels {
OlivierDehaene's avatar
OlivierDehaene committed
538
        envs.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into()))
539
540
541
542
    }

    // Watermark Gamma
    if let Some(watermark_gamma) = watermark_gamma {
OlivierDehaene's avatar
OlivierDehaene committed
543
        envs.push(("WATERMARK_GAMMA".into(), watermark_gamma.to_string().into()))
544
545
546
547
    }

    // Watermark Delta
    if let Some(watermark_delta) = watermark_delta {
OlivierDehaene's avatar
OlivierDehaene committed
548
        envs.push(("WATERMARK_DELTA".into(), watermark_delta.to_string().into()))
549
550
551
    }

    // Start process
552
    tracing::info!("Starting shard");
553
    let mut p = match Command::new("text-generation-server")
OlivierDehaene's avatar
OlivierDehaene committed
554
555
        .args(shard_args)
        .envs(envs)
556
557
558
559
560
        .stdout(Stdio::piped())
        .stderr(Stdio::piped())
        .process_group(0)
        .spawn()
    {
561
562
        Ok(p) => p,
        Err(err) => {
563
564
565
            if err.kind() == io::ErrorKind::NotFound {
                tracing::error!("text-generation-server not found in PATH");
                tracing::error!("Please install it with `make install-server`")
566
567
            }
            {
568
                tracing::error!("{}", err);
569
            }
570

571
            status_sender.send(ShardStatus::Failed(rank)).unwrap();
572
573
574
575
576
            return;
        }
    };

    // Redirect STDOUT to the console
577
    let shard_stdout_reader = BufReader::new(p.stdout.take().unwrap());
578
    let shard_stderr_reader = BufReader::new(p.stderr.take().unwrap());
579

580
    //stdout tracing thread
581
    thread::spawn(move || {
582
        log_lines(shard_stdout_reader.lines());
583
    });
584
585
586
587
588
589
590
    // We read stderr in another thread as it seems that lines() can block in some cases
    let (err_sender, err_receiver) = mpsc::channel();
    thread::spawn(move || {
        for line in shard_stderr_reader.lines().flatten() {
            err_sender.send(line).unwrap_or(());
        }
    });
591
592
593
594
595
596

    let mut ready = false;
    let start_time = Instant::now();
    let mut wait_time = Instant::now();
    loop {
        // Process exited
597
        if let Some(exit_status) = p.try_wait().unwrap() {
598
599
600
601
            let mut err = String::new();
            while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) {
                err = err + "\n" + &line;
            }
602

603
            tracing::error!("Shard complete standard error output:\n{err}");
604

605
            if let Some(signal) = exit_status.signal() {
606
607
608
                tracing::error!("Shard process was signaled to shutdown with signal {signal}");
            }

609
            status_sender.send(ShardStatus::Failed(rank)).unwrap();
610
611
612
613
            return;
        }

        // We received a shutdown signal
614
        if shutdown.load(Ordering::SeqCst) {
615
            p.kill().unwrap();
616
            let _ = p.wait();
617
            tracing::info!("Shard terminated");
618
619
620
621
622
            return;
        }

        // Shard is ready
        if uds.exists() && !ready {
623
            tracing::info!("Shard ready in {:?}", start_time.elapsed());
624
625
626
            status_sender.send(ShardStatus::Ready).unwrap();
            ready = true;
        } else if !ready && wait_time.elapsed() > Duration::from_secs(10) {
627
            tracing::info!("Waiting for shard to be ready...");
628
629
630
631
632
633
            wait_time = Instant::now();
        }
        sleep(Duration::from_millis(100));
    }
}

634
fn shutdown_shards(shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>) {
635
636
637
    tracing::info!("Shutting down shards");
    // Update shutdown value to true
    // This will be picked up by the shard manager
638
    shutdown.store(true, Ordering::SeqCst);
639
640
641
642
643
644
645

    // Wait for shards to shutdown
    // This will block till all shutdown_sender are dropped
    let _ = shutdown_receiver.recv();
}

fn num_cuda_devices() -> Option<usize> {
646
647
648
649
    let devices = match env::var("CUDA_VISIBLE_DEVICES") {
        Ok(devices) => devices,
        Err(_) => env::var("NVIDIA_VISIBLE_DEVICES").ok()?,
    };
650
651
    let n_devices = devices.split(',').count();
    Some(n_devices)
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
}

#[derive(Deserialize)]
#[serde(rename_all = "UPPERCASE")]
enum PythonLogLevelEnum {
    Trace,
    Debug,
    Info,
    Success,
    Warning,
    Error,
    Critical,
}

#[derive(Deserialize)]
struct PythonLogLevel {
    name: PythonLogLevelEnum,
}

#[derive(Deserialize)]
struct PythonLogRecord {
    level: PythonLogLevel,
}

#[derive(Deserialize)]
struct PythonLogMessage {
    text: String,
    record: PythonLogRecord,
}

impl PythonLogMessage {
    fn trace(&self) {
        match self.record.level.name {
            PythonLogLevelEnum::Trace => tracing::trace!("{}", self.text),
            PythonLogLevelEnum::Debug => tracing::debug!("{}", self.text),
            PythonLogLevelEnum::Info => tracing::info!("{}", self.text),
            PythonLogLevelEnum::Success => tracing::info!("{}", self.text),
            PythonLogLevelEnum::Warning => tracing::warn!("{}", self.text),
            PythonLogLevelEnum::Error => tracing::error!("{}", self.text),
            PythonLogLevelEnum::Critical => tracing::error!("{}", self.text),
        }
    }
}

696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
impl TryFrom<&String> for PythonLogMessage {
    type Error = serde_json::Error;

    fn try_from(value: &String) -> Result<Self, Self::Error> {
        serde_json::from_str::<Self>(value)
    }
}

fn log_lines<S: Sized + BufRead>(lines: Lines<S>) {
    for line in lines.flatten() {
        match PythonLogMessage::try_from(&line) {
            Ok(log) => log.trace(),
            Err(_) => tracing::debug!("{line}"),
        }
    }
}

713
714
715
716
fn find_num_shards(
    sharded: Option<bool>,
    num_shard: Option<usize>,
) -> Result<usize, LauncherError> {
717
718
719
720
    // get the number of shards given `sharded` and `num_shard`
    let num_shard = match (sharded, num_shard) {
        (Some(true), None) => {
            // try to default to the number of available GPUs
721
722
723
            tracing::info!("Parsing num_shard from CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES");
            let n_devices = num_cuda_devices()
                .expect("--num-shard and CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES are not set");
724
            if n_devices <= 1 {
725
726
727
                return Err(LauncherError::NotEnoughCUDADevices(format!(
                    "`sharded` is true but only found {n_devices} CUDA devices"
                )));
728
            }
729
            n_devices
730
        }
731
732
733
        (Some(true), Some(num_shard)) => {
            // we can't have only one shard while sharded
            if num_shard <= 1 {
734
735
736
                return Err(LauncherError::ArgumentValidation(
                    "`sharded` is true but `num_shard` <= 1".to_string(),
                ));
737
738
            }
            num_shard
739
        }
740
741
742
743
        (Some(false), Some(num_shard)) => num_shard,
        (Some(false), None) => 1,
        (None, None) => num_cuda_devices().unwrap_or(1),
        (None, Some(num_shard)) => num_shard,
744
    };
745
    if num_shard < 1 {
746
747
748
        return Err(LauncherError::ArgumentValidation(
            "`num_shard` cannot be < 1".to_string(),
        ));
749
    }
750
    Ok(num_shard)
751
}
752

753
754
#[derive(Debug)]
enum LauncherError {
755
756
    ArgumentValidation(String),
    NotEnoughCUDADevices(String),
757
758
759
760
761
762
763
    DownloadError,
    ShardCannotStart,
    ShardDisconnected,
    ShardFailed,
    WebserverFailed,
    WebserverCannotStart,
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
764

765
fn download_convert_model(args: &Args, running: Arc<AtomicBool>) -> Result<(), LauncherError> {
766
767
768
    // Enter download tracing span
    let _span = tracing::span!(tracing::Level::INFO, "download").entered();

OlivierDehaene's avatar
OlivierDehaene committed
769
    let mut download_args = vec![
770
771
772
773
774
775
776
777
        "download-weights".to_string(),
        args.model_id.to_string(),
        "--extension".to_string(),
        ".safetensors".to_string(),
        "--logger-level".to_string(),
        "INFO".to_string(),
        "--json-output".to_string(),
    ];
778

779
780
    // Model optional revision
    if let Some(revision) = &args.revision {
OlivierDehaene's avatar
OlivierDehaene committed
781
782
        download_args.push("--revision".to_string());
        download_args.push(revision.to_string())
783
    }
784

785
786
787
788
789
    // Trust remote code for automatic peft fusion
    if args.trust_remote_code {
        download_args.push("--trust-remote-code".to_string());
    }

790
    // Copy current process env
OlivierDehaene's avatar
OlivierDehaene committed
791
    let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
792

793
794
795
    // Disable progress bar
    envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into()));

796
    // If huggingface_hub_cache is set, pass it to the download process
797
798
    // Useful when running inside a docker container
    if let Some(ref huggingface_hub_cache) = args.huggingface_hub_cache {
OlivierDehaene's avatar
OlivierDehaene committed
799
        envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
800
    };
801

802
803
    // Enable hf transfer for insane download speeds
    let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
OlivierDehaene's avatar
OlivierDehaene committed
804
    envs.push((
805
806
807
        "HF_HUB_ENABLE_HF_TRANSFER".into(),
        enable_hf_transfer.into(),
    ));
808

809
810
    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
OlivierDehaene's avatar
OlivierDehaene committed
811
        envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
812
    };
813

814
815
816
    // If args.weights_cache_override is some, pass it to the download process
    // Useful when running inside a HuggingFace Inference Endpoint
    if let Some(weights_cache_override) = &args.weights_cache_override {
OlivierDehaene's avatar
OlivierDehaene committed
817
        envs.push((
818
819
820
821
822
            "WEIGHTS_CACHE_OVERRIDE".into(),
            weights_cache_override.into(),
        ));
    };

823
824
    // Start process
    tracing::info!("Starting download process.");
825
    let mut download_process = match Command::new("text-generation-server")
OlivierDehaene's avatar
OlivierDehaene committed
826
827
        .args(download_args)
        .envs(envs)
828
829
830
831
832
        .stdout(Stdio::piped())
        .stderr(Stdio::piped())
        .process_group(0)
        .spawn()
    {
833
834
        Ok(p) => p,
        Err(err) => {
835
836
837
            if err.kind() == io::ErrorKind::NotFound {
                tracing::error!("text-generation-server not found in PATH");
                tracing::error!("Please install it with `make install-server`")
838
839
            } else {
                tracing::error!("{}", err);
840
            }
841

842
843
844
            return Err(LauncherError::DownloadError);
        }
    };
845

846
    let download_stdout = BufReader::new(download_process.stdout.take().unwrap());
847

848
    thread::spawn(move || {
849
850
851
852
853
854
855
856
857
858
859
        log_lines(download_stdout.lines());
    });

    let download_stderr = BufReader::new(download_process.stderr.take().unwrap());

    // We read stderr in another thread as it seems that lines() can block in some cases
    let (err_sender, err_receiver) = mpsc::channel();
    thread::spawn(move || {
        for line in download_stderr.lines().flatten() {
            err_sender.send(line).unwrap_or(());
        }
860
    });
861

862
    loop {
863
864
865
866
        if let Some(status) = download_process.try_wait().unwrap() {
            if status.success() {
                tracing::info!("Successfully downloaded weights.");
                break;
867
            }
868
869

            let mut err = String::new();
870
871
872
873
            while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) {
                err = err + "\n" + &line;
            }

874
875
876
877
878
879
880
881
882
            if let Some(signal) = status.signal() {
                tracing::error!(
                    "Download process was signaled to shutdown with signal {signal}: {err}"
                );
            } else {
                tracing::error!("Download encountered an error: {err}");
            }

            return Err(LauncherError::DownloadError);
883
        }
884
        if !running.load(Ordering::SeqCst) {
OlivierDehaene's avatar
OlivierDehaene committed
885
            terminate("download", download_process, Duration::from_secs(10)).unwrap();
886
887
888
            return Ok(());
        }
        sleep(Duration::from_millis(100));
889
    }
890
891
    Ok(())
}
892

893
#[allow(clippy::too_many_arguments)]
894
895
896
fn spawn_shards(
    num_shard: usize,
    args: &Args,
897
    shutdown: Arc<AtomicBool>,
898
899
900
901
902
903
    shutdown_receiver: &mpsc::Receiver<()>,
    shutdown_sender: mpsc::Sender<()>,
    status_receiver: &mpsc::Receiver<ShardStatus>,
    status_sender: mpsc::Sender<ShardStatus>,
    running: Arc<AtomicBool>,
) -> Result<(), LauncherError> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
904
905
    // Start shard processes
    for rank in 0..num_shard {
906
907
908
909
910
911
        let model_id = args.model_id.clone();
        let revision = args.revision.clone();
        let uds_path = args.shard_uds_path.clone();
        let master_addr = args.master_addr.clone();
        let huggingface_hub_cache = args.huggingface_hub_cache.clone();
        let weights_cache_override = args.weights_cache_override.clone();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
912
913
914
        let status_sender = status_sender.clone();
        let shutdown = shutdown.clone();
        let shutdown_sender = shutdown_sender.clone();
915
        let otlp_endpoint = args.otlp_endpoint.clone();
916
        let quantize = args.quantize;
Nicolas Patry's avatar
Nicolas Patry committed
917
        let speculate = args.speculate;
918
        let dtype = args.dtype;
919
        let trust_remote_code = args.trust_remote_code;
920
921
922
923
        let master_port = args.master_port;
        let disable_custom_kernels = args.disable_custom_kernels;
        let watermark_gamma = args.watermark_gamma;
        let watermark_delta = args.watermark_delta;
924
        let cuda_memory_fraction = args.cuda_memory_fraction;
Nicolas Patry's avatar
Nicolas Patry committed
925
926
        let rope_scaling = args.rope_scaling;
        let rope_factor = args.rope_factor;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
927
928
        thread::spawn(move || {
            shard_manager(
929
                model_id,
930
                revision,
931
                quantize,
Nicolas Patry's avatar
Nicolas Patry committed
932
                speculate,
933
                dtype,
934
                trust_remote_code,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
935
936
937
938
939
                uds_path,
                rank,
                num_shard,
                master_addr,
                master_port,
940
941
                huggingface_hub_cache,
                weights_cache_override,
942
                disable_custom_kernels,
943
944
                watermark_gamma,
                watermark_delta,
945
                cuda_memory_fraction,
Nicolas Patry's avatar
Nicolas Patry committed
946
947
                rope_scaling,
                rope_factor,
948
                otlp_endpoint,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
                status_sender,
                shutdown,
                shutdown_sender,
            )
        });
    }
    drop(shutdown_sender);

    // Wait for shard to start
    let mut shard_ready = 0;
    while running.load(Ordering::SeqCst) {
        match status_receiver.try_recv() {
            Ok(ShardStatus::Ready) => {
                shard_ready += 1;
                if shard_ready == num_shard {
                    break;
                }
            }
            Err(TryRecvError::Empty) => {
                sleep(Duration::from_millis(100));
            }
970
            Ok(ShardStatus::Failed(rank)) => {
971
                tracing::error!("Shard {rank} failed to start");
972
                shutdown_shards(shutdown, shutdown_receiver);
973
                return Err(LauncherError::ShardCannotStart);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
974
975
976
            }
            Err(TryRecvError::Disconnected) => {
                tracing::error!("Shard status channel disconnected");
977
                shutdown_shards(shutdown, shutdown_receiver);
978
                return Err(LauncherError::ShardDisconnected);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
979
980
981
            }
        }
    }
982
983
    Ok(())
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
984

985
986
987
988
989
990
991
992
993
994
995
996
fn compute_type(num_shard: usize) -> Option<String> {
    let output = Command::new("nvidia-smi")
        .args(["--query-gpu=gpu_name", "--format=csv"])
        .output()
        .ok()?;
    let output = String::from_utf8(output.stdout).ok()?;
    let fullname = output.split('\n').nth(1)?;
    let cardname = fullname.replace(' ', "-").to_lowercase();
    let compute_type = format!("{num_shard}-{cardname}");
    Some(compute_type)
}

997
fn spawn_webserver(
998
    num_shard: usize,
999
    args: Args,
1000
    shutdown: Arc<AtomicBool>,
1001
    shutdown_receiver: &mpsc::Receiver<()>,
1002
) -> Result<Child, LauncherError> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1003
1004
1005
    // All shard started
    // Start webserver
    tracing::info!("Starting Webserver");
OlivierDehaene's avatar
OlivierDehaene committed
1006
    let mut router_args = vec![
1007
        "--max-concurrent-requests".to_string(),
1008
        args.max_concurrent_requests.to_string(),
1009
        "--max-best-of".to_string(),
1010
        args.max_best_of.to_string(),
1011
        "--max-stop-sequences".to_string(),
1012
        args.max_stop_sequences.to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
1013
1014
        "--max-top-n-tokens".to_string(),
        args.max_top_n_tokens.to_string(),
1015
        "--max-input-length".to_string(),
1016
        args.max_input_length.to_string(),
1017
        "--max-total-tokens".to_string(),
1018
        args.max_total_tokens.to_string(),
1019
1020
        "--max-batch-prefill-tokens".to_string(),
        args.max_batch_prefill_tokens.to_string(),
1021
        "--waiting-served-ratio".to_string(),
1022
        args.waiting_served_ratio.to_string(),
1023
        "--max-waiting-tokens".to_string(),
1024
        args.max_waiting_tokens.to_string(),
1025
1026
        "--validation-workers".to_string(),
        args.validation_workers.to_string(),
1027
1028
        "--hostname".to_string(),
        args.hostname.to_string(),
1029
        "--port".to_string(),
1030
        args.port.to_string(),
1031
        "--master-shard-uds-path".to_string(),
1032
        format!("{}-0", args.shard_uds_path),
1033
        "--tokenizer-name".to_string(),
1034
        args.model_id,
1035
1036
    ];

1037
1038
1039
1040
1041
1042
    // Tokenizer config path
    if let Some(ref tokenizer_config_path) = args.tokenizer_config_path {
        router_args.push("--tokenizer-config-path".to_string());
        router_args.push(tokenizer_config_path.to_string());
    }

1043
1044
1045
1046
1047
1048
    // Model optional max batch total tokens
    if let Some(max_batch_total_tokens) = args.max_batch_total_tokens {
        router_args.push("--max-batch-total-tokens".to_string());
        router_args.push(max_batch_total_tokens.to_string());
    }

1049
1050
    // Model optional revision
    if let Some(ref revision) = args.revision {
OlivierDehaene's avatar
OlivierDehaene committed
1051
1052
        router_args.push("--revision".to_string());
        router_args.push(revision.to_string())
1053
1054
    }

1055
    if args.json_output {
OlivierDehaene's avatar
OlivierDehaene committed
1056
        router_args.push("--json-output".to_string());
1057
1058
    }

1059
    // OpenTelemetry
1060
    if let Some(otlp_endpoint) = args.otlp_endpoint {
OlivierDehaene's avatar
OlivierDehaene committed
1061
1062
        router_args.push("--otlp-endpoint".to_string());
        router_args.push(otlp_endpoint);
1063
1064
1065
1066
    }

    // CORS origins
    for origin in args.cors_allow_origin.into_iter() {
OlivierDehaene's avatar
OlivierDehaene committed
1067
1068
        router_args.push("--cors-allow-origin".to_string());
        router_args.push(origin);
1069
1070
    }

1071
1072
    // Ngrok
    if args.ngrok {
OlivierDehaene's avatar
OlivierDehaene committed
1073
1074
        router_args.push("--ngrok".to_string());
        router_args.push("--ngrok-authtoken".to_string());
1075
1076
1077
        router_args.push(args.ngrok_authtoken.unwrap());
        router_args.push("--ngrok-edge".to_string());
        router_args.push(args.ngrok_edge.unwrap());
1078
1079
    }

1080
    // Copy current process env
OlivierDehaene's avatar
OlivierDehaene committed
1081
    let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
1082

1083
1084
    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
OlivierDehaene's avatar
OlivierDehaene committed
1085
        envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
1086
    };
1087

1088
1089
1090
1091
1092
1093
1094
    // Parse Compute type
    if let Ok(compute_type) = env::var("COMPUTE_TYPE") {
        envs.push(("COMPUTE_TYPE".into(), compute_type.into()))
    } else if let Some(compute_type) = compute_type(num_shard) {
        envs.push(("COMPUTE_TYPE".into(), compute_type.into()))
    }

1095
    let mut webserver = match Command::new("text-generation-router")
OlivierDehaene's avatar
OlivierDehaene committed
1096
1097
        .args(router_args)
        .envs(envs)
1098
1099
1100
1101
1102
        .stdout(Stdio::piped())
        .stderr(Stdio::piped())
        .process_group(0)
        .spawn()
    {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1103
1104
        Ok(p) => p,
        Err(err) => {
1105
            tracing::error!("Failed to start webserver: {}", err);
1106
1107
1108
            if err.kind() == io::ErrorKind::NotFound {
                tracing::error!("text-generation-router not found in PATH");
                tracing::error!("Please install it with `make install-router`")
1109
1110
            } else {
                tracing::error!("{}", err);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1111
            }
1112

1113
            shutdown_shards(shutdown, shutdown_receiver);
1114
            return Err(LauncherError::WebserverCannotStart);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1115
1116
1117
        }
    };

1118
1119
1120
    // Redirect STDOUT and STDERR to the console
    let webserver_stdout = webserver.stdout.take().unwrap();
    let webserver_stderr = webserver.stderr.take().unwrap();
1121
1122

    thread::spawn(move || {
1123
1124
        let stdout = BufReader::new(webserver_stdout);
        let stderr = BufReader::new(webserver_stderr);
1125
        for line in stdout.lines() {
1126
            println!("{}", line.unwrap());
1127
        }
1128
1129
        for line in stderr.lines() {
            println!("{}", line.unwrap());
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1130
        }
1131
1132
1133
    });
    Ok(webserver)
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1134

OlivierDehaene's avatar
OlivierDehaene committed
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
fn terminate(process_name: &str, mut process: Child, timeout: Duration) -> io::Result<ExitStatus> {
    tracing::info!("Terminating {process_name}");

    let terminate_time = Instant::now();
    signal::kill(Pid::from_raw(process.id() as i32), Signal::SIGTERM).unwrap();

    tracing::info!("Waiting for {process_name} to gracefully shutdown");

    while terminate_time.elapsed() < timeout {
        if let Some(status) = process.try_wait()? {
            tracing::info!("{process_name} terminated");
            return Ok(status);
        }
        sleep(Duration::from_millis(100));
    }

    tracing::info!("Killing {process_name}");

    process.kill()?;
    let exit_status = process.wait()?;

    tracing::info!("{process_name} killed");
    Ok(exit_status)
}

1160
1161
fn main() -> Result<(), LauncherError> {
    // Pattern match configuration
1162
    let args: Args = Args::parse();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1163

1164
1165
1166
1167
    // Filter events with LOG_LEVEL
    let env_filter =
        EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));

1168
    if args.json_output {
1169
1170
1171
1172
        tracing_subscriber::fmt()
            .with_env_filter(env_filter)
            .json()
            .init();
1173
    } else {
1174
1175
1176
1177
        tracing_subscriber::fmt()
            .with_env_filter(env_filter)
            .compact()
            .init();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1178
1179
    }

1180
1181
1182
1183
1184
    if args.env {
        let env_runtime = env_runtime::Env::new();
        tracing::info!("{}", env_runtime);
    }

1185
1186
    tracing::info!("{:?}", args);

1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
    // Validate args
    if args.max_input_length >= args.max_total_tokens {
        return Err(LauncherError::ArgumentValidation(
            "`max_input_length` must be < `max_total_tokens`".to_string(),
        ));
    }
    if args.max_input_length as u32 > args.max_batch_prefill_tokens {
        return Err(LauncherError::ArgumentValidation(format!(
            "`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {} and {}",
            args.max_batch_prefill_tokens, args.max_input_length
        )));
    }
1199

1200
1201
1202
1203
1204
    if args.validation_workers == 0 {
        return Err(LauncherError::ArgumentValidation(
            "`validation_workers` must be > 0".to_string(),
        ));
    }
1205
1206
1207
1208
1209
1210
    if args.trust_remote_code {
        tracing::warn!(
            "`trust_remote_code` is set. Trusting that model `{}` do not contain malicious code.",
            args.model_id
        );
    }
1211
1212

    let num_shard = find_num_shards(args.sharded, args.num_shard)?;
1213
1214
    if num_shard > 1 {
        tracing::info!("Sharding model on {num_shard} processes");
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1215
1216
    }

1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
    if let Some(ref max_batch_total_tokens) = args.max_batch_total_tokens {
        if args.max_batch_prefill_tokens > *max_batch_total_tokens {
            return Err(LauncherError::ArgumentValidation(format!(
                "`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}",
                args.max_batch_prefill_tokens, max_batch_total_tokens
            )));
        }
        if args.max_total_tokens as u32 > *max_batch_total_tokens {
            return Err(LauncherError::ArgumentValidation(format!(
                "`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}",
                args.max_total_tokens, max_batch_total_tokens
            )));
        }
    }

1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
    if args.ngrok {
        if args.ngrok_authtoken.is_none() {
            return Err(LauncherError::ArgumentValidation(
                "`ngrok-authtoken` must be set when using ngrok tunneling".to_string(),
            ));
        }

        if args.ngrok_edge.is_none() {
            return Err(LauncherError::ArgumentValidation(
                "`ngrok-edge` must be set when using ngrok tunneling".to_string(),
            ));
        }
    }

1246
1247
1248
1249
1250
1251
1252
    // Signal handler
    let running = Arc::new(AtomicBool::new(true));
    let r = running.clone();
    ctrlc::set_handler(move || {
        r.store(false, Ordering::SeqCst);
    })
    .expect("Error setting Ctrl-C handler");
1253

1254
    // Download and convert model weights
1255
    download_convert_model(&args, running.clone())?;
1256

OlivierDehaene's avatar
OlivierDehaene committed
1257
1258
1259
1260
1261
    if !running.load(Ordering::SeqCst) {
        // Launcher was asked to stop
        return Ok(());
    }

1262
    // Shared shutdown bool
1263
    let shutdown = Arc::new(AtomicBool::new(false));
1264
1265
1266
    // Shared shutdown channel
    // When shutting down, the main thread will wait for all senders to be dropped
    let (shutdown_sender, shutdown_receiver) = mpsc::channel();
1267

1268
1269
    // Shared channel to track shard status
    let (status_sender, status_receiver) = mpsc::channel();
1270

1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
    spawn_shards(
        num_shard,
        &args,
        shutdown.clone(),
        &shutdown_receiver,
        shutdown_sender,
        &status_receiver,
        status_sender,
        running.clone(),
    )?;
1281

1282
1283
1284
1285
1286
    // We might have received a termination signal
    if !running.load(Ordering::SeqCst) {
        shutdown_shards(shutdown, &shutdown_receiver);
        return Ok(());
    }
1287

1288
1289
    let mut webserver = spawn_webserver(num_shard, args, shutdown.clone(), &shutdown_receiver)
        .map_err(|err| {
OlivierDehaene's avatar
OlivierDehaene committed
1290
1291
1292
            shutdown_shards(shutdown.clone(), &shutdown_receiver);
            err
        })?;
1293
1294
1295
1296
1297

    // Default exit code
    let mut exit_code = Ok(());

    while running.load(Ordering::SeqCst) {
1298
        if let Ok(ShardStatus::Failed(rank)) = status_receiver.try_recv() {
OlivierDehaene's avatar
OlivierDehaene committed
1299
            tracing::error!("Shard {rank} crashed");
1300
1301
1302
1303
            exit_code = Err(LauncherError::ShardFailed);
            break;
        };

1304
        match webserver.try_wait().unwrap() {
1305
1306
1307
1308
1309
1310
1311
1312
1313
            Some(_) => {
                tracing::error!("Webserver Crashed");
                shutdown_shards(shutdown, &shutdown_receiver);
                return Err(LauncherError::WebserverFailed);
            }
            None => {
                sleep(Duration::from_millis(100));
            }
        };
1314
    }
1315
1316

    // Graceful termination
OlivierDehaene's avatar
OlivierDehaene committed
1317
    terminate("webserver", webserver, Duration::from_secs(90)).unwrap();
1318
1319
1320
    shutdown_shards(shutdown, &shutdown_receiver);

    exit_code
1321
}