client.rs 8.01 KB
Newer Older
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1
/// Single shard Client
Nicolas Patry's avatar
Nicolas Patry committed
2
3
use crate::pb::generate::v2::text_generation_service_client::TextGenerationServiceClient;
use crate::pb::generate::v2::*;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
4
use crate::Result;
5
use grpc_metadata::InjectTelemetryContext;
6
use std::cmp::min;
7
use std::time::Duration;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
8
use tonic::transport::{Channel, Uri};
9
use tracing::instrument;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
10

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
11
/// Text Generation Inference gRPC client
12
#[derive(Debug, Clone)]
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
13
pub struct Client {
Olivier Dehaene's avatar
Olivier Dehaene committed
14
    stub: TextGenerationServiceClient<Channel>,
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
15
16
17
}

impl Client {
Olivier Dehaene's avatar
Olivier Dehaene committed
18
19
20
    /// Returns a client connected to the given url
    pub async fn connect(uri: Uri) -> Result<Self> {
        let channel = Channel::builder(uri).connect().await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
21

Olivier Dehaene's avatar
Olivier Dehaene committed
22
23
24
        Ok(Self {
            stub: TextGenerationServiceClient::new(channel),
        })
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
25
26
    }

Olivier Dehaene's avatar
Olivier Dehaene committed
27
28
    /// Returns a client connected to the given unix socket
    pub async fn connect_uds(path: String) -> Result<Self> {
Olivier Dehaene's avatar
Olivier Dehaene committed
29
        let channel = Channel::from_shared("http://[::]:50051".to_string())
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
30
31
32
33
            .unwrap()
            .connect_with_connector(tower::service_fn(move |_: Uri| {
                tokio::net::UnixStream::connect(path.clone())
            }))
Olivier Dehaene's avatar
Olivier Dehaene committed
34
            .await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
35

Olivier Dehaene's avatar
Olivier Dehaene committed
36
37
38
        Ok(Self {
            stub: TextGenerationServiceClient::new(channel),
        })
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
39
40
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
41
    /// Returns a list of uris or unix sockets of all shards
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
42
43
    #[instrument(skip(self))]
    pub async fn service_discovery(&mut self) -> Result<Vec<String>> {
44
45
        let request = tonic::Request::new(ServiceDiscoveryRequest {}).inject_context();
        let response = self.stub.service_discovery(request).await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
46
47
48
49
        let urls = response
            .into_inner()
            .urls
            .into_iter()
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
50
            // Remove unix socket prefix
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
51
52
53
54
55
56
57
58
            .map(|url| match url.strip_prefix("unix://") {
                None => url,
                Some(stripped_url) => stripped_url.to_string(),
            })
            .collect();
        Ok(urls)
    }

59
60
61
62
63
64
65
66
    /// Get model info
    #[instrument(skip(self))]
    pub async fn info(&mut self) -> Result<InfoResponse> {
        let request = tonic::Request::new(InfoRequest {}).inject_context();
        let response = self.stub.info(request).await?.into_inner();
        Ok(response)
    }

67
68
69
70
71
72
73
74
    /// Get model health
    #[instrument(skip(self))]
    pub async fn health(&mut self) -> Result<HealthResponse> {
        let request = tonic::Request::new(HealthRequest {}).inject_context();
        let response = self.stub.health(request).await?.into_inner();
        Ok(response)
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
75
    /// Clear the past generations cache
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
76
    #[instrument(skip(self))]
77
78
    pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> {
        let request = tonic::Request::new(ClearCacheRequest { id: batch_id }).inject_context();
79
        self.stub.clear_cache(request).await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
80
81
82
        Ok(())
    }

83
84
85
86
87
    /// Filter a cached batch
    #[instrument(skip(self))]
    pub async fn filter_batch(
        &mut self,
        batch_id: u64,
88
89
        request_ids: Vec<u64>,
    ) -> Result<Option<CachedBatch>> {
90
91
        let request = tonic::Request::new(FilterBatchRequest {
            batch_id,
92
            request_ids,
93
94
95
96
97
98
        })
        .inject_context();
        let filtered_batch = self.stub.filter_batch(request).await?.into_inner();
        Ok(filtered_batch.batch)
    }

99
100
101
    /// Warmup on a max size batch
    ///
    /// Returns the maximum amount of tokens supported by the hardware
102
    #[instrument(skip_all)]
103
104
105
106
    pub async fn warmup(
        &mut self,
        max_input_length: u32,
        max_prefill_tokens: u32,
OlivierDehaene's avatar
OlivierDehaene committed
107
        max_total_tokens: u32,
108
        max_batch_size: Option<usize>,
109
    ) -> Result<Option<u32>> {
110
111
112
113
        let mut n_tokens = 0;
        let mut requests = Vec::new();
        // Create requests
        while n_tokens < max_prefill_tokens {
OlivierDehaene's avatar
OlivierDehaene committed
114
            let truncate = min(max_input_length, max_prefill_tokens - n_tokens);
115
116
117
118
            requests.push(Request {
                id: 0,
                // We truncate the input on the server side to be sure that it has the correct size
                inputs: "_test ".to_string().repeat(max_input_length as usize),
OlivierDehaene's avatar
OlivierDehaene committed
119
                truncate,
120
121
122
123
124
125
126
127
128
                // Set sampling parameters to also take these ops into account in the max memory
                parameters: Some(NextTokenChooserParameters {
                    temperature: 0.9,
                    top_k: 10,
                    top_p: 0.9,
                    typical_p: 0.9,
                    do_sample: false,
                    seed: 0,
                    repetition_penalty: 1.2,
129
                    frequency_penalty: 0.1,
130
                    watermark: true,
drbh's avatar
drbh committed
131
132
                    grammar: String::new(),
                    grammar_type: GrammarType::None as i32,
133
134
                }),
                stopping_parameters: Some(StoppingCriteriaParameters {
OlivierDehaene's avatar
OlivierDehaene committed
135
                    max_new_tokens: max_total_tokens - truncate,
136
                    stop_sequences: vec![],
OlivierDehaene's avatar
OlivierDehaene committed
137
                    ignore_eos_token: true,
138
139
                }),
                prefill_logprobs: true,
Nicolas Patry's avatar
Nicolas Patry committed
140
                top_n_tokens: 20,
141
142
            });
            n_tokens += max_input_length;
143
144
145
146
147

            // Check max_batch_size
            if Some(requests.len()) == max_batch_size {
                break;
            }
148
149
150
151
152
153
154
155
156
        }

        let batch = Batch {
            id: 0,
            size: requests.len() as u32,
            requests,
            max_tokens: 0,
        };

157
158
159
160
161
162
163
        let request = tonic::Request::new(WarmupRequest {
            batch: Some(batch),
            max_input_length,
            max_prefill_tokens,
            max_total_tokens,
        })
        .inject_context();
164
165
        let response = self.stub.warmup(request).await?.into_inner();
        Ok(response.max_supported_total_tokens)
166
167
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
168
169
    /// Generate one token for each request in the given batch
    ///
170
    /// Returns Generation for each request in batch
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
171
    /// and the next cached batch
172
    #[instrument(skip_all, fields(id = &batch.id, size = &batch.size))]
173
174
175
    pub async fn prefill(
        &mut self,
        batch: Batch,
176
    ) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> {
177
178
        let request = tonic::Request::new(PrefillRequest { batch: Some(batch) }).inject_context();
        let response = self.stub.prefill(request).await?.into_inner();
179
180
181
182
183
        Ok((
            response.generations,
            response.batch,
            PrefillTimings::new(response.forward_ns, response.decode_ns, response.total_ns),
        ))
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
184
185
    }

186
    /// Generate one token for each request in the given cached batches
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
187
    ///
188
    /// Returns Generation for each request in batches
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
189
    /// and the next cached batch
190
    #[instrument(skip_all, fields(size = batches.iter().map(|batch|{batch.size}).sum::<u32>()))]
191
    pub async fn decode(
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
192
        &mut self,
193
        batches: Vec<CachedBatch>,
194
    ) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> {
195
196
        let request = tonic::Request::new(DecodeRequest { batches }).inject_context();
        let response = self.stub.decode(request).await?.into_inner();
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
        Ok((
            response.generations,
            response.batch,
            DecodeTimings::new(
                response.concat_ns,
                response.forward_ns,
                response.decode_ns,
                response.total_ns,
            ),
        ))
    }
}

pub struct PrefillTimings {
    pub forward: Duration,
    pub decode: Duration,
    pub total: Duration,
}

impl PrefillTimings {
    fn new(forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
        Self {
            forward: Duration::from_nanos(forward_ns),
            decode: Duration::from_nanos(decode_ns),
            total: Duration::from_nanos(total_ns),
        }
    }
}

pub struct DecodeTimings {
    pub concat: Option<Duration>,
    pub forward: Duration,
    pub decode: Duration,
    pub total: Duration,
}

impl DecodeTimings {
    fn new(concat_ns: Option<u64>, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
        Self {
236
            concat: concat_ns.map(Duration::from_nanos),
237
238
239
240
            forward: Duration::from_nanos(forward_ns),
            decode: Duration::from_nanos(decode_ns),
            total: Duration::from_nanos(total_ns),
        }
Olivier Dehaene's avatar
Olivier Dehaene committed
241
    }
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
242
}