main.rs 44 KB
Newer Older
1
use clap::{Parser, ValueEnum};
2
3
use nix::sys::signal::{self, Signal};
use nix::unistd::Pid;
4
use serde::Deserialize;
Nicolas Patry's avatar
Nicolas Patry committed
5
use std::env;
6
use std::ffi::OsString;
7
use std::io::{BufRead, BufReader, Lines};
8
use std::os::unix::process::{CommandExt, ExitStatusExt};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
9
use std::path::Path;
OlivierDehaene's avatar
OlivierDehaene committed
10
use std::process::{Child, Command, ExitStatus, Stdio};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
11
12
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::TryRecvError;
13
use std::sync::{mpsc, Arc};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
14
15
16
17
use std::thread;
use std::thread::sleep;
use std::time::{Duration, Instant};
use std::{fs, io};
18
use tracing_subscriber::EnvFilter;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
19

20
21
mod env_runtime;

22
23
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Quantization {
24
    /// 4 bit quantization. Requires a specific AWQ quantized model:
25
    ///   https://hf.co/models?search=awq.
26
    /// Should replace GPTQ models wherever possible because of the better latency
27
28
29
30
31
32
    Awq,
    /// 8 bit quantization, doesn't require specific model.
    /// Should be a drop-in replacement to bitsandbytes with much better performance.
    /// Kernels are from https://github.com/NetEase-FuXi/EETQ.git
    Eetq,
    /// 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq.
33
    /// text-generation-inference will use exllama (faster) kernels wherever possible, and use
34
35
36
37
38
39
40
41
42
    /// triton kernel (wider support) when it's not.
    /// AWQ has faster kernels.
    Gptq,
    /// Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half,
    /// but it is known that the model will be much slower to run than the native f16.
    #[deprecated(
        since = "1.1.0",
        note = "Use `eetq` instead, which provides better latencies overall and is drop-in in most cases"
    )]
43
    Bitsandbytes,
44
45
    /// Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x,
    /// but it is known that the model will be much slower to run than the native f16.
Nicolas Patry's avatar
Nicolas Patry committed
46
    BitsandbytesNF4,
47
48
    /// Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better
    /// perplexity performance for you model
Nicolas Patry's avatar
Nicolas Patry committed
49
    BitsandbytesFP4,
50
51
52
53
54
55
}

impl std::fmt::Display for Quantization {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        // To keep in track with `server`.
        match self {
56
57
            #[allow(deprecated)]
            // Use `eetq` instead, which provides better latencies overall and is drop-in in most cases
58
59
60
            Quantization::Bitsandbytes => {
                write!(f, "bitsandbytes")
            }
Nicolas Patry's avatar
Nicolas Patry committed
61
62
63
64
65
66
            Quantization::BitsandbytesNF4 => {
                write!(f, "bitsandbytes-nf4")
            }
            Quantization::BitsandbytesFP4 => {
                write!(f, "bitsandbytes-fp4")
            }
67
68
69
            Quantization::Gptq => {
                write!(f, "gptq")
            }
70
71
72
            Quantization::Awq => {
                write!(f, "awq")
            }
73
74
75
            Quantization::Eetq => {
                write!(f, "eetq")
            }
76
77
78
79
        }
    }
}

80
81
82
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Dtype {
    Float16,
83
    #[clap(name = "bfloat16")]
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    BFloat16,
}

impl std::fmt::Display for Dtype {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        // To keep in track with `server`.
        match self {
            Dtype::Float16 => {
                write!(f, "float16")
            }
            Dtype::BFloat16 => {
                write!(f, "bfloat16")
            }
        }
    }
}

Nicolas Patry's avatar
Nicolas Patry committed
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#[derive(Clone, Copy, Debug, ValueEnum)]
enum RopeScaling {
    Linear,
    Dynamic,
}

impl std::fmt::Display for RopeScaling {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        // To keep in track with `server`.
        match self {
            RopeScaling::Linear => {
                write!(f, "linear")
            }
            RopeScaling::Dynamic => {
                write!(f, "dynamic")
            }
        }
    }
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
121
122
123
124
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
125
126
127
128
129
    /// The name of the model to load.
    /// Can be a MODEL_ID as listed on <https://hf.co/models> like
    /// `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`.
    /// Or it can be a local directory containing the necessary files
    /// as saved by `save_pretrained(...)` methods of transformers
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
130
    #[clap(default_value = "bigscience/bloom-560m", long, env)]
131
    model_id: String,
132
133
134

    /// The actual revision of the model if you're referring to a model
    /// on the hub. You can use a specific commit id or a branch like `refs/pr/2`.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
135
    #[clap(long, env)]
136
    revision: Option<String>,
137

138
139
140
141
142
    /// The number of tokenizer workers used for payload validation and truncation inside the
    /// router.
    #[clap(default_value = "2", long, env)]
    validation_workers: usize,

143
    /// Whether to shard the model across multiple GPUs
144
145
    /// By default text-generation-inference will use all available GPUs to run
    /// the model. Setting it to `false` deactivates `num_shard`.
146
147
    #[clap(long, env)]
    sharded: Option<bool>,
148
149

    /// The number of shards to use if you don't want to use all GPUs on a given machine.
150
151
    /// You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2`
    /// and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to
152
    /// launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance.
153
154
    #[clap(long, env)]
    num_shard: Option<usize>,
155

156
    /// Whether you want the model to be quantized.
157
158
    #[clap(long, env, value_enum)]
    quantize: Option<Quantization>,
159

Nicolas Patry's avatar
Nicolas Patry committed
160
161
162
163
164
165
166
    /// The number of input_ids to speculate on
    /// If using a medusa model, the heads will be picked up automatically
    /// Other wise, it will use n-gram speculation which is relatively free
    /// in terms of compute, but the speedup heavily depends on the task.
    #[clap(long, env)]
    speculate: Option<usize>,

167
168
169
170
    /// The dtype to be forced upon the model. This option cannot be used with `--quantize`.
    #[clap(long, env, value_enum)]
    dtype: Option<Dtype>,

171
172
173
174
175
176
    /// Whether you want to execute hub modelling code. Explicitly passing a `revision` is
    /// encouraged when loading a model with custom code to ensure no malicious code has been
    /// contributed in a newer revision.
    #[clap(long, env, value_enum)]
    trust_remote_code: bool,

177
178
179
    /// The maximum amount of concurrent requests for this particular deployment.
    /// Having a low limit will refuse clients requests instead of having them
    /// wait for too long and is usually good to handle backpressure correctly.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
180
181
    #[clap(default_value = "128", long, env)]
    max_concurrent_requests: usize,
182
183
184
185

    /// This is the maximum allowed value for clients to set `best_of`.
    /// Best of makes `n` generations at the same time, and return the best
    /// in terms of overall log probability over the entire generated sequence
186
187
    #[clap(default_value = "2", long, env)]
    max_best_of: usize,
188
189
190
191
192
193

    /// This is the maximum allowed value for clients to set `stop_sequences`.
    /// Stop sequences are used to allow the model to stop on more than just
    /// the EOS token, and enable more complex "prompting" where users can preprompt
    /// the model in a specific way and define their "own" stop token aligned with
    /// their prompt.
194
195
    #[clap(default_value = "4", long, env)]
    max_stop_sequences: usize,
196

Nicolas Patry's avatar
Nicolas Patry committed
197
198
199
200
201
202
203
204
    /// This is the maximum allowed value for clients to set `top_n_tokens`.
    /// `top_n_tokens is used to return information about the the `n` most likely
    /// tokens at each generation step, instead of just the sampled token. This
    /// information can be used for downstream tasks like for classification or
    /// ranking.
    #[clap(default_value = "5", long, env)]
    max_top_n_tokens: u32,

205
206
207
208
    /// This is the maximum allowed input length (expressed in number of tokens)
    /// for users. The larger this value, the longer prompt users can send which
    /// can impact the overall memory required to handle the load.
    /// Please note that some models have a finite range of sequence they can handle.
209
    #[clap(default_value = "1024", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
210
    max_input_length: usize,
211
212
213
214
215
216
217
218
219

    /// This is the most important value to set as it defines the "memory budget"
    /// of running clients requests.
    /// Clients will send input sequences and ask to generate `max_new_tokens`
    /// on top. with a value of `1512` users can send either a prompt of
    /// `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for
    /// `1511` max_new_tokens.
    /// The larger this value, the larger amount each request will be in your RAM
    /// and the less effective batching can be.
220
    #[clap(default_value = "2048", long, env)]
221
    max_total_tokens: usize,
222
223
224
225
226
227
228
229
230
231
232

    /// This represents the ratio of waiting queries vs running queries where
    /// you want to start considering pausing the running queries to include the waiting
    /// ones into the same batch.
    /// `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's
    /// only 10 queries left in the current batch we check if we can fit those 12
    /// waiting queries into the batching strategy, and if yes, then batching happens
    /// delaying the 10 running queries by a `prefill` run.
    ///
    /// This setting is only applied if there is room in the batch
    /// as defined by `max_batch_total_tokens`.
233
234
    #[clap(default_value = "1.2", long, env)]
    waiting_served_ratio: f32,
235

236
237
238
239
240
241
    /// Limits the number of tokens for the prefill operation.
    /// Since this operation take the most memory and is compute bound, it is interesting
    /// to limit the number of requests that can be sent.
    #[clap(default_value = "4096", long, env)]
    max_batch_prefill_tokens: u32,

242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
    /// **IMPORTANT** This is one critical control to allow maximum usage
    /// of the available hardware.
    ///
    /// This represents the total amount of potential tokens within a batch.
    /// When using padding (not recommended) this would be equivalent of
    /// `batch_size` * `max_total_tokens`.
    ///
    /// However in the non-padded (flash attention) version this can be much finer.
    ///
    /// For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100`
    /// or a single query of `1000` tokens.
    ///
    /// Overall this number should be the largest possible amount that fits the
    /// remaining memory (after the model is loaded). Since the actual memory overhead
    /// depends on other parameters like if you're using quantization, flash attention
    /// or the model implementation, text-generation-inference cannot infer this number
    /// automatically.
259
260
    #[clap(long, env)]
    max_batch_total_tokens: Option<u32>,
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278

    /// This setting defines how many tokens can be passed before forcing the waiting
    /// queries to be put on the batch (if the size of the batch allows for it).
    /// New queries require 1 `prefill` forward, which is different from `decode`
    /// and therefore you need to pause the running batch in order to run `prefill`
    /// to create the correct values for the waiting queries to be able to join the batch.
    ///
    /// With a value too small, queries will always "steal" the compute to run `prefill`
    /// and running queries will be delayed by a lot.
    ///
    /// With a value too big, waiting queries could wait for a very long time
    /// before being allowed a slot in the running batch. If your server is busy
    /// that means that requests that could run in ~2s on an empty server could
    /// end up running in ~20s because the query had to wait for 18s.
    ///
    /// This number is expressed in number of tokens to make it a bit more
    /// "model" agnostic, but what should really matter is the overall latency
    /// for end users.
279
280
    #[clap(default_value = "20", long, env)]
    max_waiting_tokens: usize,
281

282
283
284
285
    /// The IP address to listen on
    #[clap(default_value = "0.0.0.0", long, env)]
    hostname: String,

286
    /// The port to listen on.
287
    #[clap(default_value = "3000", long, short, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
288
    port: u16,
289
290
291

    /// The name of the socket for gRPC communication between the webserver
    /// and the shards.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
292
293
    #[clap(default_value = "/tmp/text-generation-server", long, env)]
    shard_uds_path: String,
294
295

    /// The address the master shard will listen on. (setting used by torch distributed)
296
    #[clap(default_value = "localhost", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
297
    master_addr: String,
298
299

    /// The address the master port will listen on. (setting used by torch distributed)
300
    #[clap(default_value = "29500", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
301
    master_port: usize,
302
303
304

    /// The location of the huggingface hub cache.
    /// Used to override the location if you want to provide a mounted disk for instance
305
    #[clap(long, env)]
306
    huggingface_hub_cache: Option<String>,
307
308
309

    /// The location of the huggingface hub cache.
    /// Used to override the location if you want to provide a mounted disk for instance
310
311
    #[clap(long, env)]
    weights_cache_override: Option<String>,
312
313
314
315
316

    /// For some models (like bloom), text-generation-inference implemented custom
    /// cuda kernels to speed up inference. Those kernels were only tested on A100.
    /// Use this flag to disable them if you're running on different hardware and
    /// encounter issues.
317
    #[clap(long, env)]
318
    disable_custom_kernels: bool,
319

320
321
322
323
324
    /// Limit the CUDA available memory.
    /// The allowed value equals the total visible memory multiplied by cuda-memory-fraction.
    #[clap(default_value = "1.0", long, env)]
    cuda_memory_fraction: f32,

Nicolas Patry's avatar
Nicolas Patry committed
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
    /// Rope scaling will only be used for RoPE models
    /// and allow rescaling the position rotary to accomodate for
    /// larger prompts.
    ///
    /// Goes together with `rope_factor`.
    ///
    /// `--rope-factor 2.0` gives linear scaling with a factor of 2.0
    /// `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0
    /// `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed
    /// basically)
    ///
    /// `--rope-scaling linear --rope-factor` fully describes the scaling you want
    #[clap(long, env)]
    rope_scaling: Option<RopeScaling>,

    /// Rope scaling will only be used for RoPE models
    /// See `rope_scaling`
    #[clap(long, env)]
    rope_factor: Option<f32>,

345
    /// Outputs the logs in JSON format (useful for telemetry)
346
    #[clap(long, env)]
347
    json_output: bool,
348

349
350
    #[clap(long, env)]
    otlp_endpoint: Option<String>,
351

352
353
    #[clap(long, env)]
    cors_allow_origin: Vec<String>,
354
355
356
357
    #[clap(long, env)]
    watermark_gamma: Option<f32>,
    #[clap(long, env)]
    watermark_delta: Option<f32>,
358

359
360
361
362
363
364
365
366
    /// Enable ngrok tunneling
    #[clap(long, env)]
    ngrok: bool,

    /// ngrok authentication token
    #[clap(long, env)]
    ngrok_authtoken: Option<String>,

367
    /// ngrok edge
368
    #[clap(long, env)]
369
    ngrok_edge: Option<String>,
370

371
372
373
    /// Display a lot of information about your runtime environment
    #[clap(long, short, action)]
    env: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
374
375
}

376
377
378
#[derive(Debug)]
enum ShardStatus {
    Ready,
379
    Failed(usize),
380
}
381

382
383
384
385
#[allow(clippy::too_many_arguments)]
fn shard_manager(
    model_id: String,
    revision: Option<String>,
386
    quantize: Option<Quantization>,
Nicolas Patry's avatar
Nicolas Patry committed
387
    speculate: Option<usize>,
388
    dtype: Option<Dtype>,
389
    trust_remote_code: bool,
390
391
392
393
394
395
396
397
398
399
    uds_path: String,
    rank: usize,
    world_size: usize,
    master_addr: String,
    master_port: usize,
    huggingface_hub_cache: Option<String>,
    weights_cache_override: Option<String>,
    disable_custom_kernels: bool,
    watermark_gamma: Option<f32>,
    watermark_delta: Option<f32>,
400
    cuda_memory_fraction: f32,
Nicolas Patry's avatar
Nicolas Patry committed
401
402
    rope_scaling: Option<RopeScaling>,
    rope_factor: Option<f32>,
403
404
    otlp_endpoint: Option<String>,
    status_sender: mpsc::Sender<ShardStatus>,
405
    shutdown: Arc<AtomicBool>,
406
407
    _shutdown_sender: mpsc::Sender<()>,
) {
408
409
410
    // Enter shard-manager tracing span
    let _span = tracing::span!(tracing::Level::INFO, "shard-manager", rank = rank).entered();

411
412
413
414
    // Get UDS path
    let uds_string = format!("{uds_path}-{rank}");
    let uds = Path::new(&uds_string);
    // Clean previous runs
415
416
417
    if uds.exists() {
        fs::remove_file(uds).unwrap();
    }
418
419

    // Process args
OlivierDehaene's avatar
OlivierDehaene committed
420
    let mut shard_args = vec![
421
422
423
424
425
426
427
428
429
        "serve".to_string(),
        model_id,
        "--uds-path".to_string(),
        uds_path,
        "--logger-level".to_string(),
        "INFO".to_string(),
        "--json-output".to_string(),
    ];

430
431
    // Activate trust remote code
    if trust_remote_code {
OlivierDehaene's avatar
OlivierDehaene committed
432
        shard_args.push("--trust-remote-code".to_string());
433
434
    }

435
436
    // Activate tensor parallelism
    if world_size > 1 {
OlivierDehaene's avatar
OlivierDehaene committed
437
        shard_args.push("--sharded".to_string());
438
439
    }

440
    if let Some(quantize) = quantize {
OlivierDehaene's avatar
OlivierDehaene committed
441
442
        shard_args.push("--quantize".to_string());
        shard_args.push(quantize.to_string())
443
    }
444

Nicolas Patry's avatar
Nicolas Patry committed
445
446
447
448
449
    if let Some(speculate) = speculate {
        shard_args.push("--speculate".to_string());
        shard_args.push(speculate.to_string())
    }

450
    if let Some(dtype) = dtype {
OlivierDehaene's avatar
OlivierDehaene committed
451
452
        shard_args.push("--dtype".to_string());
        shard_args.push(dtype.to_string())
453
454
    }

455
456
    // Model optional revision
    if let Some(revision) = revision {
OlivierDehaene's avatar
OlivierDehaene committed
457
458
        shard_args.push("--revision".to_string());
        shard_args.push(revision)
459
    }
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
460

Nicolas Patry's avatar
Nicolas Patry committed
461
462
463
464
465
466
    let rope = match (rope_scaling, rope_factor) {
        (None, None) => None,
        (Some(scaling), None) => Some((scaling, 1.0)),
        (Some(scaling), Some(factor)) => Some((scaling, factor)),
        (None, Some(factor)) => Some((RopeScaling::Linear, factor)),
    };
467
468
    // OpenTelemetry
    if let Some(otlp_endpoint) = otlp_endpoint {
OlivierDehaene's avatar
OlivierDehaene committed
469
470
        shard_args.push("--otlp-endpoint".to_string());
        shard_args.push(otlp_endpoint);
471
472
473
    }

    // Copy current process env
OlivierDehaene's avatar
OlivierDehaene committed
474
    let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
475
476

    // Torch Distributed Env vars
OlivierDehaene's avatar
OlivierDehaene committed
477
478
479
480
481
    envs.push(("RANK".into(), rank.to_string().into()));
    envs.push(("WORLD_SIZE".into(), world_size.to_string().into()));
    envs.push(("MASTER_ADDR".into(), master_addr.into()));
    envs.push(("MASTER_PORT".into(), master_port.to_string().into()));
    envs.push(("NCCL_ASYNC_ERROR_HANDLING".into(), "1".into()));
482

483
484
485
486
487
488
    // CUDA memory fraction
    envs.push((
        "CUDA_MEMORY_FRACTION".into(),
        cuda_memory_fraction.to_string().into(),
    ));

489
    // Safetensors load fast
OlivierDehaene's avatar
OlivierDehaene committed
490
    envs.push(("SAFETENSORS_FAST_GPU".into(), "1".into()));
491

492
493
494
    // Disable progress bar
    envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into()));

495
496
    // Enable hf transfer for insane download speeds
    let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
OlivierDehaene's avatar
OlivierDehaene committed
497
    envs.push((
498
499
500
501
502
503
        "HF_HUB_ENABLE_HF_TRANSFER".into(),
        enable_hf_transfer.into(),
    ));

    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
OlivierDehaene's avatar
OlivierDehaene committed
504
        envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
505
506
    };

Nicolas Patry's avatar
Nicolas Patry committed
507
508
509
510
511
512
513
514
515
    // Detect rope scaling
    // Sending as env instead of CLI args to not bloat everything
    // those only can be used by RoPE models, so passing information around
    // for all models will complexify code unnecessarily
    if let Some((scaling, factor)) = rope {
        envs.push(("ROPE_SCALING".into(), scaling.to_string().into()));
        envs.push(("ROPE_FACTOR".into(), factor.to_string().into()));
    }

516
517
518
    // If huggingface_hub_cache is some, pass it to the shard
    // Useful when running inside a docker container
    if let Some(huggingface_hub_cache) = huggingface_hub_cache {
OlivierDehaene's avatar
OlivierDehaene committed
519
        envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
520
521
522
523
524
    };

    // If weights_cache_override is some, pass it to the shard
    // Useful when running inside a HuggingFace Inference Endpoint
    if let Some(weights_cache_override) = weights_cache_override {
OlivierDehaene's avatar
OlivierDehaene committed
525
        envs.push((
526
527
528
529
530
531
532
            "WEIGHTS_CACHE_OVERRIDE".into(),
            weights_cache_override.into(),
        ));
    };

    // If disable_custom_kernels is true, pass it to the shard as an env var
    if disable_custom_kernels {
OlivierDehaene's avatar
OlivierDehaene committed
533
        envs.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into()))
534
535
536
537
    }

    // Watermark Gamma
    if let Some(watermark_gamma) = watermark_gamma {
OlivierDehaene's avatar
OlivierDehaene committed
538
        envs.push(("WATERMARK_GAMMA".into(), watermark_gamma.to_string().into()))
539
540
541
542
    }

    // Watermark Delta
    if let Some(watermark_delta) = watermark_delta {
OlivierDehaene's avatar
OlivierDehaene committed
543
        envs.push(("WATERMARK_DELTA".into(), watermark_delta.to_string().into()))
544
545
546
    }

    // Start process
547
    tracing::info!("Starting shard");
548
    let mut p = match Command::new("text-generation-server")
OlivierDehaene's avatar
OlivierDehaene committed
549
550
        .args(shard_args)
        .envs(envs)
551
552
553
554
555
        .stdout(Stdio::piped())
        .stderr(Stdio::piped())
        .process_group(0)
        .spawn()
    {
556
557
        Ok(p) => p,
        Err(err) => {
558
559
560
            if err.kind() == io::ErrorKind::NotFound {
                tracing::error!("text-generation-server not found in PATH");
                tracing::error!("Please install it with `make install-server`")
561
562
            }
            {
563
                tracing::error!("{}", err);
564
            }
565

566
            status_sender.send(ShardStatus::Failed(rank)).unwrap();
567
568
569
570
571
            return;
        }
    };

    // Redirect STDOUT to the console
572
    let shard_stdout_reader = BufReader::new(p.stdout.take().unwrap());
573
    let shard_stderr_reader = BufReader::new(p.stderr.take().unwrap());
574

575
    //stdout tracing thread
576
    thread::spawn(move || {
577
        log_lines(shard_stdout_reader.lines());
578
    });
579
580
581
582
583
584
585
    // We read stderr in another thread as it seems that lines() can block in some cases
    let (err_sender, err_receiver) = mpsc::channel();
    thread::spawn(move || {
        for line in shard_stderr_reader.lines().flatten() {
            err_sender.send(line).unwrap_or(());
        }
    });
586
587
588
589
590
591

    let mut ready = false;
    let start_time = Instant::now();
    let mut wait_time = Instant::now();
    loop {
        // Process exited
592
        if let Some(exit_status) = p.try_wait().unwrap() {
593
594
595
596
            let mut err = String::new();
            while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) {
                err = err + "\n" + &line;
            }
597

598
            tracing::error!("Shard complete standard error output:\n{err}");
599

600
            if let Some(signal) = exit_status.signal() {
601
602
603
                tracing::error!("Shard process was signaled to shutdown with signal {signal}");
            }

604
            status_sender.send(ShardStatus::Failed(rank)).unwrap();
605
606
607
608
            return;
        }

        // We received a shutdown signal
609
        if shutdown.load(Ordering::SeqCst) {
610
            p.kill().unwrap();
611
            let _ = p.wait();
612
            tracing::info!("Shard terminated");
613
614
615
616
617
            return;
        }

        // Shard is ready
        if uds.exists() && !ready {
618
            tracing::info!("Shard ready in {:?}", start_time.elapsed());
619
620
621
            status_sender.send(ShardStatus::Ready).unwrap();
            ready = true;
        } else if !ready && wait_time.elapsed() > Duration::from_secs(10) {
622
            tracing::info!("Waiting for shard to be ready...");
623
624
625
626
627
628
            wait_time = Instant::now();
        }
        sleep(Duration::from_millis(100));
    }
}

629
fn shutdown_shards(shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>) {
630
631
632
    tracing::info!("Shutting down shards");
    // Update shutdown value to true
    // This will be picked up by the shard manager
633
    shutdown.store(true, Ordering::SeqCst);
634
635
636
637
638
639
640

    // Wait for shards to shutdown
    // This will block till all shutdown_sender are dropped
    let _ = shutdown_receiver.recv();
}

fn num_cuda_devices() -> Option<usize> {
641
642
643
644
    let devices = match env::var("CUDA_VISIBLE_DEVICES") {
        Ok(devices) => devices,
        Err(_) => env::var("NVIDIA_VISIBLE_DEVICES").ok()?,
    };
645
646
    let n_devices = devices.split(',').count();
    Some(n_devices)
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
}

#[derive(Deserialize)]
#[serde(rename_all = "UPPERCASE")]
enum PythonLogLevelEnum {
    Trace,
    Debug,
    Info,
    Success,
    Warning,
    Error,
    Critical,
}

#[derive(Deserialize)]
struct PythonLogLevel {
    name: PythonLogLevelEnum,
}

#[derive(Deserialize)]
struct PythonLogRecord {
    level: PythonLogLevel,
}

#[derive(Deserialize)]
struct PythonLogMessage {
    text: String,
    record: PythonLogRecord,
}

impl PythonLogMessage {
    fn trace(&self) {
        match self.record.level.name {
            PythonLogLevelEnum::Trace => tracing::trace!("{}", self.text),
            PythonLogLevelEnum::Debug => tracing::debug!("{}", self.text),
            PythonLogLevelEnum::Info => tracing::info!("{}", self.text),
            PythonLogLevelEnum::Success => tracing::info!("{}", self.text),
            PythonLogLevelEnum::Warning => tracing::warn!("{}", self.text),
            PythonLogLevelEnum::Error => tracing::error!("{}", self.text),
            PythonLogLevelEnum::Critical => tracing::error!("{}", self.text),
        }
    }
}

691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
impl TryFrom<&String> for PythonLogMessage {
    type Error = serde_json::Error;

    fn try_from(value: &String) -> Result<Self, Self::Error> {
        serde_json::from_str::<Self>(value)
    }
}

fn log_lines<S: Sized + BufRead>(lines: Lines<S>) {
    for line in lines.flatten() {
        match PythonLogMessage::try_from(&line) {
            Ok(log) => log.trace(),
            Err(_) => tracing::debug!("{line}"),
        }
    }
}

708
709
710
711
fn find_num_shards(
    sharded: Option<bool>,
    num_shard: Option<usize>,
) -> Result<usize, LauncherError> {
712
713
714
715
    // get the number of shards given `sharded` and `num_shard`
    let num_shard = match (sharded, num_shard) {
        (Some(true), None) => {
            // try to default to the number of available GPUs
716
717
718
            tracing::info!("Parsing num_shard from CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES");
            let n_devices = num_cuda_devices()
                .expect("--num-shard and CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES are not set");
719
            if n_devices <= 1 {
720
721
722
                return Err(LauncherError::NotEnoughCUDADevices(format!(
                    "`sharded` is true but only found {n_devices} CUDA devices"
                )));
723
            }
724
            n_devices
725
        }
726
727
728
        (Some(true), Some(num_shard)) => {
            // we can't have only one shard while sharded
            if num_shard <= 1 {
729
730
731
                return Err(LauncherError::ArgumentValidation(
                    "`sharded` is true but `num_shard` <= 1".to_string(),
                ));
732
733
            }
            num_shard
734
        }
735
736
737
738
        (Some(false), Some(num_shard)) => num_shard,
        (Some(false), None) => 1,
        (None, None) => num_cuda_devices().unwrap_or(1),
        (None, Some(num_shard)) => num_shard,
739
    };
740
    if num_shard < 1 {
741
742
743
        return Err(LauncherError::ArgumentValidation(
            "`num_shard` cannot be < 1".to_string(),
        ));
744
    }
745
    Ok(num_shard)
746
}
747

748
749
#[derive(Debug)]
enum LauncherError {
750
751
    ArgumentValidation(String),
    NotEnoughCUDADevices(String),
752
753
754
755
756
757
758
    DownloadError,
    ShardCannotStart,
    ShardDisconnected,
    ShardFailed,
    WebserverFailed,
    WebserverCannotStart,
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
759

760
fn download_convert_model(args: &Args, running: Arc<AtomicBool>) -> Result<(), LauncherError> {
761
762
763
    // Enter download tracing span
    let _span = tracing::span!(tracing::Level::INFO, "download").entered();

OlivierDehaene's avatar
OlivierDehaene committed
764
    let mut download_args = vec![
765
766
767
768
769
770
771
772
        "download-weights".to_string(),
        args.model_id.to_string(),
        "--extension".to_string(),
        ".safetensors".to_string(),
        "--logger-level".to_string(),
        "INFO".to_string(),
        "--json-output".to_string(),
    ];
773

774
775
    // Model optional revision
    if let Some(revision) = &args.revision {
OlivierDehaene's avatar
OlivierDehaene committed
776
777
        download_args.push("--revision".to_string());
        download_args.push(revision.to_string())
778
    }
779

780
781
782
783
784
    // Trust remote code for automatic peft fusion
    if args.trust_remote_code {
        download_args.push("--trust-remote-code".to_string());
    }

785
    // Copy current process env
OlivierDehaene's avatar
OlivierDehaene committed
786
    let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
787

788
789
790
    // Disable progress bar
    envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into()));

791
    // If huggingface_hub_cache is set, pass it to the download process
792
793
    // Useful when running inside a docker container
    if let Some(ref huggingface_hub_cache) = args.huggingface_hub_cache {
OlivierDehaene's avatar
OlivierDehaene committed
794
        envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
795
    };
796

797
798
    // Enable hf transfer for insane download speeds
    let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
OlivierDehaene's avatar
OlivierDehaene committed
799
    envs.push((
800
801
802
        "HF_HUB_ENABLE_HF_TRANSFER".into(),
        enable_hf_transfer.into(),
    ));
803

804
805
    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
OlivierDehaene's avatar
OlivierDehaene committed
806
        envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
807
    };
808

809
810
811
    // If args.weights_cache_override is some, pass it to the download process
    // Useful when running inside a HuggingFace Inference Endpoint
    if let Some(weights_cache_override) = &args.weights_cache_override {
OlivierDehaene's avatar
OlivierDehaene committed
812
        envs.push((
813
814
815
816
817
            "WEIGHTS_CACHE_OVERRIDE".into(),
            weights_cache_override.into(),
        ));
    };

818
819
    // Start process
    tracing::info!("Starting download process.");
820
    let mut download_process = match Command::new("text-generation-server")
OlivierDehaene's avatar
OlivierDehaene committed
821
822
        .args(download_args)
        .envs(envs)
823
824
825
826
827
        .stdout(Stdio::piped())
        .stderr(Stdio::piped())
        .process_group(0)
        .spawn()
    {
828
829
        Ok(p) => p,
        Err(err) => {
830
831
832
            if err.kind() == io::ErrorKind::NotFound {
                tracing::error!("text-generation-server not found in PATH");
                tracing::error!("Please install it with `make install-server`")
833
834
            } else {
                tracing::error!("{}", err);
835
            }
836

837
838
839
            return Err(LauncherError::DownloadError);
        }
    };
840

841
    let download_stdout = BufReader::new(download_process.stdout.take().unwrap());
842

843
    thread::spawn(move || {
844
845
846
847
848
849
850
851
852
853
854
        log_lines(download_stdout.lines());
    });

    let download_stderr = BufReader::new(download_process.stderr.take().unwrap());

    // We read stderr in another thread as it seems that lines() can block in some cases
    let (err_sender, err_receiver) = mpsc::channel();
    thread::spawn(move || {
        for line in download_stderr.lines().flatten() {
            err_sender.send(line).unwrap_or(());
        }
855
    });
856

857
    loop {
858
859
860
861
        if let Some(status) = download_process.try_wait().unwrap() {
            if status.success() {
                tracing::info!("Successfully downloaded weights.");
                break;
862
            }
863
864

            let mut err = String::new();
865
866
867
868
            while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) {
                err = err + "\n" + &line;
            }

869
870
871
872
873
874
875
876
877
            if let Some(signal) = status.signal() {
                tracing::error!(
                    "Download process was signaled to shutdown with signal {signal}: {err}"
                );
            } else {
                tracing::error!("Download encountered an error: {err}");
            }

            return Err(LauncherError::DownloadError);
878
        }
879
        if !running.load(Ordering::SeqCst) {
OlivierDehaene's avatar
OlivierDehaene committed
880
            terminate("download", download_process, Duration::from_secs(10)).unwrap();
881
882
883
            return Ok(());
        }
        sleep(Duration::from_millis(100));
884
    }
885
886
    Ok(())
}
887

888
#[allow(clippy::too_many_arguments)]
889
890
891
fn spawn_shards(
    num_shard: usize,
    args: &Args,
892
    shutdown: Arc<AtomicBool>,
893
894
895
896
897
898
    shutdown_receiver: &mpsc::Receiver<()>,
    shutdown_sender: mpsc::Sender<()>,
    status_receiver: &mpsc::Receiver<ShardStatus>,
    status_sender: mpsc::Sender<ShardStatus>,
    running: Arc<AtomicBool>,
) -> Result<(), LauncherError> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
899
900
    // Start shard processes
    for rank in 0..num_shard {
901
902
903
904
905
906
        let model_id = args.model_id.clone();
        let revision = args.revision.clone();
        let uds_path = args.shard_uds_path.clone();
        let master_addr = args.master_addr.clone();
        let huggingface_hub_cache = args.huggingface_hub_cache.clone();
        let weights_cache_override = args.weights_cache_override.clone();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
907
908
909
        let status_sender = status_sender.clone();
        let shutdown = shutdown.clone();
        let shutdown_sender = shutdown_sender.clone();
910
        let otlp_endpoint = args.otlp_endpoint.clone();
911
        let quantize = args.quantize;
Nicolas Patry's avatar
Nicolas Patry committed
912
        let speculate = args.speculate;
913
        let dtype = args.dtype;
914
        let trust_remote_code = args.trust_remote_code;
915
916
917
918
        let master_port = args.master_port;
        let disable_custom_kernels = args.disable_custom_kernels;
        let watermark_gamma = args.watermark_gamma;
        let watermark_delta = args.watermark_delta;
919
        let cuda_memory_fraction = args.cuda_memory_fraction;
Nicolas Patry's avatar
Nicolas Patry committed
920
921
        let rope_scaling = args.rope_scaling;
        let rope_factor = args.rope_factor;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
922
923
        thread::spawn(move || {
            shard_manager(
924
                model_id,
925
                revision,
926
                quantize,
Nicolas Patry's avatar
Nicolas Patry committed
927
                speculate,
928
                dtype,
929
                trust_remote_code,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
930
931
932
933
934
                uds_path,
                rank,
                num_shard,
                master_addr,
                master_port,
935
936
                huggingface_hub_cache,
                weights_cache_override,
937
                disable_custom_kernels,
938
939
                watermark_gamma,
                watermark_delta,
940
                cuda_memory_fraction,
Nicolas Patry's avatar
Nicolas Patry committed
941
942
                rope_scaling,
                rope_factor,
943
                otlp_endpoint,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
                status_sender,
                shutdown,
                shutdown_sender,
            )
        });
    }
    drop(shutdown_sender);

    // Wait for shard to start
    let mut shard_ready = 0;
    while running.load(Ordering::SeqCst) {
        match status_receiver.try_recv() {
            Ok(ShardStatus::Ready) => {
                shard_ready += 1;
                if shard_ready == num_shard {
                    break;
                }
            }
            Err(TryRecvError::Empty) => {
                sleep(Duration::from_millis(100));
            }
965
            Ok(ShardStatus::Failed(rank)) => {
966
                tracing::error!("Shard {rank} failed to start");
967
                shutdown_shards(shutdown, shutdown_receiver);
968
                return Err(LauncherError::ShardCannotStart);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
969
970
971
            }
            Err(TryRecvError::Disconnected) => {
                tracing::error!("Shard status channel disconnected");
972
                shutdown_shards(shutdown, shutdown_receiver);
973
                return Err(LauncherError::ShardDisconnected);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
974
975
976
            }
        }
    }
977
978
    Ok(())
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
979

980
981
fn spawn_webserver(
    args: Args,
982
    shutdown: Arc<AtomicBool>,
983
    shutdown_receiver: &mpsc::Receiver<()>,
984
) -> Result<Child, LauncherError> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
985
986
987
    // All shard started
    // Start webserver
    tracing::info!("Starting Webserver");
OlivierDehaene's avatar
OlivierDehaene committed
988
    let mut router_args = vec![
989
        "--max-concurrent-requests".to_string(),
990
        args.max_concurrent_requests.to_string(),
991
        "--max-best-of".to_string(),
992
        args.max_best_of.to_string(),
993
        "--max-stop-sequences".to_string(),
994
        args.max_stop_sequences.to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
995
996
        "--max-top-n-tokens".to_string(),
        args.max_top_n_tokens.to_string(),
997
        "--max-input-length".to_string(),
998
        args.max_input_length.to_string(),
999
        "--max-total-tokens".to_string(),
1000
        args.max_total_tokens.to_string(),
1001
1002
        "--max-batch-prefill-tokens".to_string(),
        args.max_batch_prefill_tokens.to_string(),
1003
        "--waiting-served-ratio".to_string(),
1004
        args.waiting_served_ratio.to_string(),
1005
        "--max-waiting-tokens".to_string(),
1006
        args.max_waiting_tokens.to_string(),
1007
1008
        "--validation-workers".to_string(),
        args.validation_workers.to_string(),
1009
1010
        "--hostname".to_string(),
        args.hostname.to_string(),
1011
        "--port".to_string(),
1012
        args.port.to_string(),
1013
        "--master-shard-uds-path".to_string(),
1014
        format!("{}-0", args.shard_uds_path),
1015
        "--tokenizer-name".to_string(),
1016
        args.model_id,
1017
1018
    ];

1019
1020
1021
1022
1023
1024
    // Model optional max batch total tokens
    if let Some(max_batch_total_tokens) = args.max_batch_total_tokens {
        router_args.push("--max-batch-total-tokens".to_string());
        router_args.push(max_batch_total_tokens.to_string());
    }

1025
1026
    // Model optional revision
    if let Some(ref revision) = args.revision {
OlivierDehaene's avatar
OlivierDehaene committed
1027
1028
        router_args.push("--revision".to_string());
        router_args.push(revision.to_string())
1029
1030
    }

1031
    if args.json_output {
OlivierDehaene's avatar
OlivierDehaene committed
1032
        router_args.push("--json-output".to_string());
1033
1034
    }

1035
    // OpenTelemetry
1036
    if let Some(otlp_endpoint) = args.otlp_endpoint {
OlivierDehaene's avatar
OlivierDehaene committed
1037
1038
        router_args.push("--otlp-endpoint".to_string());
        router_args.push(otlp_endpoint);
1039
1040
1041
1042
    }

    // CORS origins
    for origin in args.cors_allow_origin.into_iter() {
OlivierDehaene's avatar
OlivierDehaene committed
1043
1044
        router_args.push("--cors-allow-origin".to_string());
        router_args.push(origin);
1045
1046
    }

1047
1048
    // Ngrok
    if args.ngrok {
OlivierDehaene's avatar
OlivierDehaene committed
1049
1050
        router_args.push("--ngrok".to_string());
        router_args.push("--ngrok-authtoken".to_string());
1051
1052
1053
        router_args.push(args.ngrok_authtoken.unwrap());
        router_args.push("--ngrok-edge".to_string());
        router_args.push(args.ngrok_edge.unwrap());
1054
1055
    }

1056
    // Copy current process env
OlivierDehaene's avatar
OlivierDehaene committed
1057
    let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
1058

1059
1060
    // Parse Inference API token
    if let Ok(api_token) = env::var("HF_API_TOKEN") {
OlivierDehaene's avatar
OlivierDehaene committed
1061
        envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
1062
    };
1063

1064
    let mut webserver = match Command::new("text-generation-router")
OlivierDehaene's avatar
OlivierDehaene committed
1065
1066
        .args(router_args)
        .envs(envs)
1067
1068
1069
1070
1071
        .stdout(Stdio::piped())
        .stderr(Stdio::piped())
        .process_group(0)
        .spawn()
    {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1072
1073
        Ok(p) => p,
        Err(err) => {
1074
            tracing::error!("Failed to start webserver: {}", err);
1075
1076
1077
            if err.kind() == io::ErrorKind::NotFound {
                tracing::error!("text-generation-router not found in PATH");
                tracing::error!("Please install it with `make install-router`")
1078
1079
            } else {
                tracing::error!("{}", err);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1080
            }
1081

1082
            shutdown_shards(shutdown, shutdown_receiver);
1083
            return Err(LauncherError::WebserverCannotStart);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1084
1085
1086
        }
    };

1087
1088
1089
    // Redirect STDOUT and STDERR to the console
    let webserver_stdout = webserver.stdout.take().unwrap();
    let webserver_stderr = webserver.stderr.take().unwrap();
1090
1091

    thread::spawn(move || {
1092
1093
        let stdout = BufReader::new(webserver_stdout);
        let stderr = BufReader::new(webserver_stderr);
1094
        for line in stdout.lines() {
1095
            println!("{}", line.unwrap());
1096
        }
1097
1098
        for line in stderr.lines() {
            println!("{}", line.unwrap());
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1099
        }
1100
1101
1102
    });
    Ok(webserver)
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1103

OlivierDehaene's avatar
OlivierDehaene committed
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
fn terminate(process_name: &str, mut process: Child, timeout: Duration) -> io::Result<ExitStatus> {
    tracing::info!("Terminating {process_name}");

    let terminate_time = Instant::now();
    signal::kill(Pid::from_raw(process.id() as i32), Signal::SIGTERM).unwrap();

    tracing::info!("Waiting for {process_name} to gracefully shutdown");

    while terminate_time.elapsed() < timeout {
        if let Some(status) = process.try_wait()? {
            tracing::info!("{process_name} terminated");
            return Ok(status);
        }
        sleep(Duration::from_millis(100));
    }

    tracing::info!("Killing {process_name}");

    process.kill()?;
    let exit_status = process.wait()?;

    tracing::info!("{process_name} killed");
    Ok(exit_status)
}

1129
1130
fn main() -> Result<(), LauncherError> {
    // Pattern match configuration
1131
    let args: Args = Args::parse();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1132

1133
1134
1135
1136
    // Filter events with LOG_LEVEL
    let env_filter =
        EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));

1137
    if args.json_output {
1138
1139
1140
1141
        tracing_subscriber::fmt()
            .with_env_filter(env_filter)
            .json()
            .init();
1142
    } else {
1143
1144
1145
1146
        tracing_subscriber::fmt()
            .with_env_filter(env_filter)
            .compact()
            .init();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1147
1148
    }

1149
1150
1151
1152
1153
    if args.env {
        let env_runtime = env_runtime::Env::new();
        tracing::info!("{}", env_runtime);
    }

1154
1155
    tracing::info!("{:?}", args);

1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
    // Validate args
    if args.max_input_length >= args.max_total_tokens {
        return Err(LauncherError::ArgumentValidation(
            "`max_input_length` must be < `max_total_tokens`".to_string(),
        ));
    }
    if args.max_input_length as u32 > args.max_batch_prefill_tokens {
        return Err(LauncherError::ArgumentValidation(format!(
            "`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {} and {}",
            args.max_batch_prefill_tokens, args.max_input_length
        )));
    }
1168

1169
1170
1171
1172
1173
    if args.validation_workers == 0 {
        return Err(LauncherError::ArgumentValidation(
            "`validation_workers` must be > 0".to_string(),
        ));
    }
1174
1175
1176
1177
1178
1179
    if args.trust_remote_code {
        tracing::warn!(
            "`trust_remote_code` is set. Trusting that model `{}` do not contain malicious code.",
            args.model_id
        );
    }
1180
1181

    let num_shard = find_num_shards(args.sharded, args.num_shard)?;
1182
1183
    if num_shard > 1 {
        tracing::info!("Sharding model on {num_shard} processes");
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1184
1185
    }

1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
    if let Some(ref max_batch_total_tokens) = args.max_batch_total_tokens {
        if args.max_batch_prefill_tokens > *max_batch_total_tokens {
            return Err(LauncherError::ArgumentValidation(format!(
                "`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}",
                args.max_batch_prefill_tokens, max_batch_total_tokens
            )));
        }
        if args.max_total_tokens as u32 > *max_batch_total_tokens {
            return Err(LauncherError::ArgumentValidation(format!(
                "`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}",
                args.max_total_tokens, max_batch_total_tokens
            )));
        }
    }

1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
    if args.ngrok {
        if args.ngrok_authtoken.is_none() {
            return Err(LauncherError::ArgumentValidation(
                "`ngrok-authtoken` must be set when using ngrok tunneling".to_string(),
            ));
        }

        if args.ngrok_edge.is_none() {
            return Err(LauncherError::ArgumentValidation(
                "`ngrok-edge` must be set when using ngrok tunneling".to_string(),
            ));
        }
    }

1215
1216
1217
1218
1219
1220
1221
    // Signal handler
    let running = Arc::new(AtomicBool::new(true));
    let r = running.clone();
    ctrlc::set_handler(move || {
        r.store(false, Ordering::SeqCst);
    })
    .expect("Error setting Ctrl-C handler");
1222

1223
    // Download and convert model weights
1224
    download_convert_model(&args, running.clone())?;
1225

OlivierDehaene's avatar
OlivierDehaene committed
1226
1227
1228
1229
1230
    if !running.load(Ordering::SeqCst) {
        // Launcher was asked to stop
        return Ok(());
    }

1231
    // Shared shutdown bool
1232
    let shutdown = Arc::new(AtomicBool::new(false));
1233
1234
1235
    // Shared shutdown channel
    // When shutting down, the main thread will wait for all senders to be dropped
    let (shutdown_sender, shutdown_receiver) = mpsc::channel();
1236

1237
1238
    // Shared channel to track shard status
    let (status_sender, status_receiver) = mpsc::channel();
1239

1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
    spawn_shards(
        num_shard,
        &args,
        shutdown.clone(),
        &shutdown_receiver,
        shutdown_sender,
        &status_receiver,
        status_sender,
        running.clone(),
    )?;
1250

1251
1252
1253
1254
1255
    // We might have received a termination signal
    if !running.load(Ordering::SeqCst) {
        shutdown_shards(shutdown, &shutdown_receiver);
        return Ok(());
    }
1256

OlivierDehaene's avatar
OlivierDehaene committed
1257
1258
1259
1260
1261
    let mut webserver =
        spawn_webserver(args, shutdown.clone(), &shutdown_receiver).map_err(|err| {
            shutdown_shards(shutdown.clone(), &shutdown_receiver);
            err
        })?;
1262
1263
1264
1265
1266

    // Default exit code
    let mut exit_code = Ok(());

    while running.load(Ordering::SeqCst) {
1267
        if let Ok(ShardStatus::Failed(rank)) = status_receiver.try_recv() {
OlivierDehaene's avatar
OlivierDehaene committed
1268
            tracing::error!("Shard {rank} crashed");
1269
1270
1271
1272
            exit_code = Err(LauncherError::ShardFailed);
            break;
        };

1273
        match webserver.try_wait().unwrap() {
1274
1275
1276
1277
1278
1279
1280
1281
1282
            Some(_) => {
                tracing::error!("Webserver Crashed");
                shutdown_shards(shutdown, &shutdown_receiver);
                return Err(LauncherError::WebserverFailed);
            }
            None => {
                sleep(Duration::from_millis(100));
            }
        };
1283
    }
1284
1285

    // Graceful termination
OlivierDehaene's avatar
OlivierDehaene committed
1286
    terminate("webserver", webserver, Duration::from_secs(90)).unwrap();
1287
1288
1289
    shutdown_shards(shutdown, &shutdown_receiver);

    exit_code
1290
}