main.rs 8.3 KB
Newer Older
1
/// Text Generation Inference webserver entrypoint
2
use axum::http::HeaderValue;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
3
use clap::Parser;
4
5
6
7
8
9
use opentelemetry::sdk::propagation::TraceContextPropagator;
use opentelemetry::sdk::trace;
use opentelemetry::sdk::trace::Sampler;
use opentelemetry::sdk::Resource;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
Olivier Dehaene's avatar
Olivier Dehaene committed
10
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
11
use std::path::Path;
12
use text_generation_client::ShardedClient;
13
14
use text_generation_router::{server, ModelInfo};
use tokenizers::{FromPretrainedParameters, Tokenizer};
15
use tower_http::cors::AllowOrigin;
16
17
18
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{EnvFilter, Layer};
Olivier Dehaene's avatar
Olivier Dehaene committed
19
20
21
22
23

/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
24
25
    #[clap(default_value = "128", long, env)]
    max_concurrent_requests: usize,
26
27
    #[clap(default_value = "2", long, env)]
    max_best_of: usize,
28
29
    #[clap(default_value = "4", long, env)]
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
30
31
    #[clap(default_value = "1000", long, env)]
    max_input_length: usize,
32
33
    #[clap(default_value = "1512", long, env)]
    max_total_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
34
    #[clap(default_value = "32", long, env)]
Olivier Dehaene's avatar
Olivier Dehaene committed
35
    max_batch_size: usize,
36
37
    #[clap(default_value = "20", long, env)]
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
Olivier Dehaene committed
38
39
    #[clap(default_value = "3000", long, short, env)]
    port: u16,
40
    #[clap(default_value = "/tmp/text-generation-server-0", long, env)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
41
    master_shard_uds_path: String,
Olivier Dehaene's avatar
Olivier Dehaene committed
42
43
    #[clap(default_value = "bigscience/bloom", long, env)]
    tokenizer_name: String,
44
45
    #[clap(default_value = "main", long, env)]
    revision: String,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
46
47
    #[clap(default_value = "2", long, env)]
    validation_workers: usize,
48
49
    #[clap(long, env)]
    json_output: bool,
50
51
    #[clap(long, env)]
    otlp_endpoint: Option<String>,
52
53
    #[clap(long, env)]
    cors_allow_origin: Option<Vec<String>>,
Olivier Dehaene's avatar
Olivier Dehaene committed
54
}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
55

Olivier Dehaene's avatar
Olivier Dehaene committed
56
fn main() -> Result<(), std::io::Error> {
Olivier Dehaene's avatar
Olivier Dehaene committed
57
58
    // Get args
    let args = Args::parse();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
59
    // Pattern match configuration
Olivier Dehaene's avatar
Olivier Dehaene committed
60
    let Args {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
61
        max_concurrent_requests,
62
        max_best_of,
63
        max_stop_sequences,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
64
        max_input_length,
65
        max_total_tokens,
Olivier Dehaene's avatar
Olivier Dehaene committed
66
        max_batch_size,
67
        max_waiting_tokens,
Olivier Dehaene's avatar
Olivier Dehaene committed
68
        port,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
69
        master_shard_uds_path,
Olivier Dehaene's avatar
Olivier Dehaene committed
70
        tokenizer_name,
71
        revision,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
72
        validation_workers,
73
        json_output,
74
        otlp_endpoint,
75
        cors_allow_origin,
Olivier Dehaene's avatar
Olivier Dehaene committed
76
77
    } = args;

78
    if validation_workers == 0 {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
79
80
81
        panic!("validation_workers must be > 0");
    }

82
83
84
85
86
87
88
89
90
91
92
    // CORS allowed origins
    // map to go inside the option and then map to parse from String to HeaderValue
    // Finally, convert to AllowOrigin
    let cors_allow_origin: Option<AllowOrigin> = cors_allow_origin.map(|cors_allow_origin| {
        AllowOrigin::list(
            cors_allow_origin
                .iter()
                .map(|origin| origin.parse::<HeaderValue>().unwrap()),
        )
    });

93
    // Tokenizer instance
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
94
    // This will only be used to validate payloads
95
    let local_path = Path::new(&tokenizer_name);
96
97
98
99
100
101
102
103
104
105
    let local_model = local_path.exists() && local_path.is_dir();
    let tokenizer = if local_model {
        // Load local tokenizer
        Tokenizer::from_file(local_path.join("tokenizer.json")).ok()
    } else {
        // Download and instantiate tokenizer
        // We need to download it outside of the Tokio runtime
        let params = FromPretrainedParameters {
            revision: revision.clone(),
            ..Default::default()
106
        };
107
108
        Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).ok()
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
109

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
110
    // Launch Tokio runtime
Olivier Dehaene's avatar
Olivier Dehaene committed
111
112
113
114
115
    tokio::runtime::Builder::new_multi_thread()
        .enable_all()
        .build()
        .unwrap()
        .block_on(async {
OlivierDehaene's avatar
OlivierDehaene committed
116
117
            init_logging(otlp_endpoint, json_output);

118
119
120
121
122
123
124
            if tokenizer.is_none() {
                tracing::warn!(
                    "Could not find a fast tokenizer implementation for {tokenizer_name}"
                );
                tracing::warn!("Rust input length validation and truncation is disabled");
            }

125
126
127
128
129
130
131
132
133
            // Get Model info
            let model_info = match local_model {
                true => ModelInfo {
                    model_id: tokenizer_name.clone(),
                    sha: None,
                    pipeline_tag: None,
                },
                false => get_model_info(&tokenizer_name, &revision).await,
            };
134
135

            // if pipeline-tag == text-generation we default to return_full_text = true
136
            let compat_return_full_text = match &model_info.pipeline_tag {
137
138
139
140
                None => {
                    tracing::warn!("no pipeline tag found for model {tokenizer_name}");
                    false
                }
141
                Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation",
142
143
            };

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
144
            // Instantiate sharded client from the master unix socket
145
            let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
Olivier Dehaene's avatar
Olivier Dehaene committed
146
147
                .await
                .expect("Could not connect to server");
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
148
            // Clear the cache; useful if the webserver rebooted
Olivier Dehaene's avatar
Olivier Dehaene committed
149
            sharded_client
150
                .clear_cache(None)
Olivier Dehaene's avatar
Olivier Dehaene committed
151
152
153
                .await
                .expect("Unable to clear cache");
            tracing::info!("Connected");
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
154

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
155
            // Binds on localhost
Olivier Dehaene's avatar
Olivier Dehaene committed
156
            let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port);
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
157

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
158
159
            // Run server
            server::run(
160
                model_info,
161
                compat_return_full_text,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
162
                max_concurrent_requests,
163
                max_best_of,
164
                max_stop_sequences,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
165
                max_input_length,
166
                max_total_tokens,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
167
                max_batch_size,
168
                max_waiting_tokens,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
169
170
171
172
                sharded_client,
                tokenizer,
                validation_workers,
                addr,
173
                cors_allow_origin,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
174
175
            )
            .await;
Olivier Dehaene's avatar
Olivier Dehaene committed
176
            Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
177
        })
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
178
}
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233

/// Init logging using env variables LOG_LEVEL and LOG_FORMAT:
///     - otlp_endpoint is an optional URL to an Open Telemetry collector
///     - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO)
///     - LOG_FORMAT may be TEXT or JSON (default to TEXT)
fn init_logging(otlp_endpoint: Option<String>, json_output: bool) {
    let mut layers = Vec::new();

    // STDOUT/STDERR layer
    let fmt_layer = tracing_subscriber::fmt::layer()
        .with_file(true)
        .with_line_number(true);

    let fmt_layer = match json_output {
        true => fmt_layer.json().flatten_event(true).boxed(),
        false => fmt_layer.boxed(),
    };
    layers.push(fmt_layer);

    // OpenTelemetry tracing layer
    if let Some(otlp_endpoint) = otlp_endpoint {
        global::set_text_map_propagator(TraceContextPropagator::new());

        let tracer = opentelemetry_otlp::new_pipeline()
            .tracing()
            .with_exporter(
                opentelemetry_otlp::new_exporter()
                    .tonic()
                    .with_endpoint(otlp_endpoint),
            )
            .with_trace_config(
                trace::config()
                    .with_resource(Resource::new(vec![KeyValue::new(
                        "service.name",
                        "text-generation-inference.router",
                    )]))
                    .with_sampler(Sampler::AlwaysOn),
            )
            .install_batch(opentelemetry::runtime::Tokio);

        if let Ok(tracer) = tracer {
            layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed());
            axum_tracing_opentelemetry::init_propagator().unwrap();
        };
    }

    // Filter events with LOG_LEVEL
    let env_filter =
        EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));

    tracing_subscriber::registry()
        .with(env_filter)
        .with(layers)
        .init();
}
234
235
236
237
238
239
240
241
242
243
244
245
246

/// get model info from the Huggingface Hub
pub async fn get_model_info(model_id: &str, revision: &str) -> ModelInfo {
    let model_info = reqwest::get(format!(
        "https://huggingface.co/api/models/{model_id}/revision/{revision}"
    ))
    .await
    .expect("Could not connect to hf.co")
    .text()
    .await
    .expect("error when retrieving model info from hf.co");
    serde_json::from_str(&model_info).expect("unable to parse model info")
}