client.rs 10.3 KB
Newer Older
OlivierDehaene's avatar
OlivierDehaene committed
1
2
use crate::v3::{pb, Chunk};
use crate::{ClientError, Result, WARMUP_IMAGE_BASE64};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
3
/// Single shard Client
4
5
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
6
use grpc_metadata::InjectTelemetryContext;
OlivierDehaene's avatar
OlivierDehaene committed
7
8
use pb::generate::v3::text_generation_service_client::TextGenerationServiceClient;
use pb::generate::v3::*;
9
use std::cmp::min;
10
use std::time::Duration;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
11
use tonic::transport::{Channel, Uri};
12
use tracing::instrument;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
13

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
14
/// Text Generation Inference gRPC client
15
#[derive(Debug, Clone)]
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
16
pub struct Client {
Olivier Dehaene's avatar
Olivier Dehaene committed
17
    stub: TextGenerationServiceClient<Channel>,
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
18
19
20
}

impl Client {
Olivier Dehaene's avatar
Olivier Dehaene committed
21
22
23
    /// Returns a client connected to the given url
    pub async fn connect(uri: Uri) -> Result<Self> {
        let channel = Channel::builder(uri).connect().await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
24

Olivier Dehaene's avatar
Olivier Dehaene committed
25
26
27
        Ok(Self {
            stub: TextGenerationServiceClient::new(channel),
        })
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
28
29
    }

Olivier Dehaene's avatar
Olivier Dehaene committed
30
31
    /// Returns a client connected to the given unix socket
    pub async fn connect_uds(path: String) -> Result<Self> {
Olivier Dehaene's avatar
Olivier Dehaene committed
32
        let channel = Channel::from_shared("http://[::]:50051".to_string())
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
33
34
35
36
            .unwrap()
            .connect_with_connector(tower::service_fn(move |_: Uri| {
                tokio::net::UnixStream::connect(path.clone())
            }))
Olivier Dehaene's avatar
Olivier Dehaene committed
37
            .await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
38

Olivier Dehaene's avatar
Olivier Dehaene committed
39
40
41
        Ok(Self {
            stub: TextGenerationServiceClient::new(channel),
        })
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
42
43
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
44
    /// Returns a list of uris or unix sockets of all shards
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
45
46
    #[instrument(skip(self))]
    pub async fn service_discovery(&mut self) -> Result<Vec<String>> {
47
        let request = tonic::Request::new(ServiceDiscoveryRequest {}).inject_context();
OlivierDehaene's avatar
OlivierDehaene committed
48
49
50
        let response = self.stub.service_discovery(request).await.map_err(|_| {
            ClientError::Connection("Server does not support v3 interface".to_string())
        })?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
51
52
53
54
        let urls = response
            .into_inner()
            .urls
            .into_iter()
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
55
            // Remove unix socket prefix
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
56
57
58
59
60
61
62
63
            .map(|url| match url.strip_prefix("unix://") {
                None => url,
                Some(stripped_url) => stripped_url.to_string(),
            })
            .collect();
        Ok(urls)
    }

64
65
66
67
68
69
70
71
    /// Get model info
    #[instrument(skip(self))]
    pub async fn info(&mut self) -> Result<InfoResponse> {
        let request = tonic::Request::new(InfoRequest {}).inject_context();
        let response = self.stub.info(request).await?.into_inner();
        Ok(response)
    }

72
73
74
75
76
77
78
79
    /// Get model health
    #[instrument(skip(self))]
    pub async fn health(&mut self) -> Result<HealthResponse> {
        let request = tonic::Request::new(HealthRequest {}).inject_context();
        let response = self.stub.health(request).await?.into_inner();
        Ok(response)
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
80
    /// Clear the past generations cache
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
81
    #[instrument(skip(self))]
82
83
    pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> {
        let request = tonic::Request::new(ClearCacheRequest { id: batch_id }).inject_context();
84
        self.stub.clear_cache(request).await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
85
86
87
        Ok(())
    }

88
89
90
91
92
    /// Filter a cached batch
    #[instrument(skip(self))]
    pub async fn filter_batch(
        &mut self,
        batch_id: u64,
93
94
        request_ids: Vec<u64>,
    ) -> Result<Option<CachedBatch>> {
95
96
        let request = tonic::Request::new(FilterBatchRequest {
            batch_id,
97
            request_ids,
98
99
100
101
102
103
        })
        .inject_context();
        let filtered_batch = self.stub.filter_batch(request).await?.into_inner();
        Ok(filtered_batch.batch)
    }

104
105
106
    /// Warmup on a max size batch
    ///
    /// Returns the maximum amount of tokens supported by the hardware
107
    #[instrument(skip_all)]
108
109
    pub async fn warmup(
        &mut self,
110
        max_input_tokens: Option<u32>,
111
        max_prefill_tokens: u32,
112
        max_total_tokens: Option<u32>,
113
        max_batch_size: Option<usize>,
114
    ) -> Result<(Option<u32>, u32, u32)> {
115
116
117
118
        let mut n_tokens = 0;
        let mut requests = Vec::new();
        // Create requests
        while n_tokens < max_prefill_tokens {
119
120
121
122
            let mut truncate = max_prefill_tokens - n_tokens;
            if let Some(max_input_tokens) = max_input_tokens {
                truncate = min(max_input_tokens, truncate);
            }
123

124
            let mut input_chunks = Vec::new();
125
            input_chunks.push(Chunk::Text("_test ".to_string().repeat(truncate as usize)).into());
126
127
128
129
130
131
132
133
134
135
136
137
138
            if n_tokens == 0 {
                input_chunks.push(
                    Chunk::Image(Image {
                        // Safe unwrap, because we control the data.
                        data: STANDARD.decode(WARMUP_IMAGE_BASE64).unwrap(),
                        mimetype: "image/jpeg;base64".to_string(),
                    })
                    .into(),
                );
            }

            // Send stringly-typed inputs for compatibility for backends that haven't
            // been updated to support chunks.
OlivierDehaene's avatar
OlivierDehaene committed
139

140
            let mut inputs = String::new();
141
            inputs.push_str(&"_test ".to_string().repeat(truncate as usize));
Nicolas Patry's avatar
Nicolas Patry committed
142
143
144
            if n_tokens == 0 {
                // 1 request is enough to test vision heads.
                // Sending images on other queries messes up easily with truncation.
145
146
147
                inputs.push_str(&format!(
                    "![](data:image/jpeg;base64,{WARMUP_IMAGE_BASE64})",
                ));
Nicolas Patry's avatar
Nicolas Patry committed
148
            }
149

150
151
152
153
154
155
            let max_new_tokens = if let Some(max_total_tokens) = max_total_tokens {
                max_total_tokens - truncate
            } else {
                1
            };

156
157
            requests.push(Request {
                id: 0,
OlivierDehaene's avatar
OlivierDehaene committed
158
                inputs,
159
160
161
162
                input_chunks: Some(Input {
                    chunks: input_chunks,
                }),
                // We truncate the input on the server side to be sure that it has the correct size
OlivierDehaene's avatar
OlivierDehaene committed
163
                truncate,
164
165
                // Most request will have that
                add_special_tokens: true,
166
167
168
                // Blocks and slots will be set on the server side if we use paged attention
                blocks: vec![],
                slots: vec![],
169
170
                cache_len: 0,
                chunk_len: None,
171
172
173
174
175
176
177
178
179
                // Set sampling parameters to also take these ops into account in the max memory
                parameters: Some(NextTokenChooserParameters {
                    temperature: 0.9,
                    top_k: 10,
                    top_p: 0.9,
                    typical_p: 0.9,
                    do_sample: false,
                    seed: 0,
                    repetition_penalty: 1.2,
180
                    frequency_penalty: 0.1,
181
                    watermark: true,
drbh's avatar
drbh committed
182
183
                    grammar: String::new(),
                    grammar_type: GrammarType::None as i32,
184
185
                }),
                stopping_parameters: Some(StoppingCriteriaParameters {
186
                    max_new_tokens,
187
                    stop_sequences: vec![],
OlivierDehaene's avatar
OlivierDehaene committed
188
                    ignore_eos_token: true,
189
190
                }),
                prefill_logprobs: true,
Nicolas Patry's avatar
Nicolas Patry committed
191
                top_n_tokens: 20,
drbh's avatar
drbh committed
192
                adapter_id: None,
193
            });
194
            n_tokens += truncate;
195
196
197
198
199

            // Check max_batch_size
            if Some(requests.len()) == max_batch_size {
                break;
            }
200
201
202
203
204
205
        }

        let batch = Batch {
            id: 0,
            size: requests.len() as u32,
            requests,
206
            max_tokens: max_input_tokens.unwrap_or(0),
207
            max_blocks: 0,
208
209
        };

210
211
        let request = tonic::Request::new(WarmupRequest {
            batch: Some(batch),
212
            max_input_tokens,
213
214
215
216
            max_prefill_tokens,
            max_total_tokens,
        })
        .inject_context();
217
        let response = self.stub.warmup(request).await?.into_inner();
218
219
220
221
222
        Ok((
            response.max_supported_total_tokens,
            response.max_input_tokens,
            response.max_total_tokens,
        ))
223
224
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
225
226
    /// Generate one token for each request in the given batch
    ///
227
    /// Returns Generation for each request in batch
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
228
    /// and the next cached batch
229
    #[instrument(skip_all, fields(id = &batch.id, size = &batch.size))]
230
231
232
    pub async fn prefill(
        &mut self,
        batch: Batch,
233
        cached_batch: Option<CachedBatch>,
234
    ) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> {
235
236
237
238
239
        let request = tonic::Request::new(PrefillRequest {
            batch: Some(batch),
            cached_batch,
        })
        .inject_context();
240
        let response = self.stub.prefill(request).await?.into_inner();
241
242
243
244
245
        Ok((
            response.generations,
            response.batch,
            PrefillTimings::new(response.forward_ns, response.decode_ns, response.total_ns),
        ))
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
246
247
    }

248
    /// Generate one token for each request in the given cached batches
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
249
    ///
250
    /// Returns Generation for each request in batches
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
251
    /// and the next cached batch
252
    #[instrument(skip_all, fields(size = batches.iter().map(|batch|{batch.size}).sum::<u32>()))]
253
    pub async fn decode(
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
254
        &mut self,
255
        batches: Vec<CachedBatch>,
256
    ) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> {
257
258
        let request = tonic::Request::new(DecodeRequest { batches }).inject_context();
        let response = self.stub.decode(request).await?.into_inner();
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
        Ok((
            response.generations,
            response.batch,
            DecodeTimings::new(
                response.concat_ns,
                response.forward_ns,
                response.decode_ns,
                response.total_ns,
            ),
        ))
    }
}

pub struct PrefillTimings {
    pub forward: Duration,
    pub decode: Duration,
    pub total: Duration,
}

impl PrefillTimings {
    fn new(forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
        Self {
            forward: Duration::from_nanos(forward_ns),
            decode: Duration::from_nanos(decode_ns),
            total: Duration::from_nanos(total_ns),
        }
    }
}

pub struct DecodeTimings {
    pub concat: Option<Duration>,
    pub forward: Duration,
    pub decode: Duration,
    pub total: Duration,
}

impl DecodeTimings {
    fn new(concat_ns: Option<u64>, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
        Self {
298
            concat: concat_ns.map(Duration::from_nanos),
299
300
301
302
            forward: Duration::from_nanos(forward_ns),
            decode: Duration::from_nanos(decode_ns),
            total: Duration::from_nanos(total_ns),
        }
Olivier Dehaene's avatar
Olivier Dehaene committed
303
    }
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
304
}