client.rs 7.75 KB
Newer Older
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1
/// Single shard Client
Nicolas Patry's avatar
Nicolas Patry committed
2
3
use crate::pb::generate::v2::text_generation_service_client::TextGenerationServiceClient;
use crate::pb::generate::v2::*;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
4
use crate::Result;
5
use grpc_metadata::InjectTelemetryContext;
6
use std::cmp::min;
7
use std::time::Duration;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
8
use tonic::transport::{Channel, Uri};
9
use tracing::instrument;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
10

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
11
/// Text Generation Inference gRPC client
12
#[derive(Debug, Clone)]
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
13
pub struct Client {
Olivier Dehaene's avatar
Olivier Dehaene committed
14
    stub: TextGenerationServiceClient<Channel>,
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
15
16
17
}

impl Client {
Olivier Dehaene's avatar
Olivier Dehaene committed
18
19
20
    /// Returns a client connected to the given url
    pub async fn connect(uri: Uri) -> Result<Self> {
        let channel = Channel::builder(uri).connect().await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
21

Olivier Dehaene's avatar
Olivier Dehaene committed
22
23
24
        Ok(Self {
            stub: TextGenerationServiceClient::new(channel),
        })
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
25
26
    }

Olivier Dehaene's avatar
Olivier Dehaene committed
27
28
    /// Returns a client connected to the given unix socket
    pub async fn connect_uds(path: String) -> Result<Self> {
Olivier Dehaene's avatar
Olivier Dehaene committed
29
        let channel = Channel::from_shared("http://[::]:50051".to_string())
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
30
31
32
33
            .unwrap()
            .connect_with_connector(tower::service_fn(move |_: Uri| {
                tokio::net::UnixStream::connect(path.clone())
            }))
Olivier Dehaene's avatar
Olivier Dehaene committed
34
            .await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
35

Olivier Dehaene's avatar
Olivier Dehaene committed
36
37
38
        Ok(Self {
            stub: TextGenerationServiceClient::new(channel),
        })
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
39
40
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
41
    /// Returns a list of uris or unix sockets of all shards
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
42
43
    #[instrument(skip(self))]
    pub async fn service_discovery(&mut self) -> Result<Vec<String>> {
44
45
        let request = tonic::Request::new(ServiceDiscoveryRequest {}).inject_context();
        let response = self.stub.service_discovery(request).await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
46
47
48
49
        let urls = response
            .into_inner()
            .urls
            .into_iter()
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
50
            // Remove unix socket prefix
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
51
52
53
54
55
56
57
58
            .map(|url| match url.strip_prefix("unix://") {
                None => url,
                Some(stripped_url) => stripped_url.to_string(),
            })
            .collect();
        Ok(urls)
    }

59
60
61
62
63
64
65
66
    /// Get model info
    #[instrument(skip(self))]
    pub async fn info(&mut self) -> Result<InfoResponse> {
        let request = tonic::Request::new(InfoRequest {}).inject_context();
        let response = self.stub.info(request).await?.into_inner();
        Ok(response)
    }

67
68
69
70
71
72
73
74
    /// Get model health
    #[instrument(skip(self))]
    pub async fn health(&mut self) -> Result<HealthResponse> {
        let request = tonic::Request::new(HealthRequest {}).inject_context();
        let response = self.stub.health(request).await?.into_inner();
        Ok(response)
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
75
    /// Clear the past generations cache
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
76
    #[instrument(skip(self))]
77
78
    pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> {
        let request = tonic::Request::new(ClearCacheRequest { id: batch_id }).inject_context();
79
        self.stub.clear_cache(request).await?;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
80
81
82
        Ok(())
    }

83
84
85
86
87
    /// Filter a cached batch
    #[instrument(skip(self))]
    pub async fn filter_batch(
        &mut self,
        batch_id: u64,
88
89
        request_ids: Vec<u64>,
    ) -> Result<Option<CachedBatch>> {
90
91
        let request = tonic::Request::new(FilterBatchRequest {
            batch_id,
92
            request_ids,
93
94
95
96
97
98
        })
        .inject_context();
        let filtered_batch = self.stub.filter_batch(request).await?.into_inner();
        Ok(filtered_batch.batch)
    }

99
100
101
    /// Warmup on a max size batch
    ///
    /// Returns the maximum amount of tokens supported by the hardware
102
    #[instrument(skip_all)]
103
104
105
106
    pub async fn warmup(
        &mut self,
        max_input_length: u32,
        max_prefill_tokens: u32,
OlivierDehaene's avatar
OlivierDehaene committed
107
        max_total_tokens: u32,
108
    ) -> Result<Option<u32>> {
109
110
111
112
        let mut n_tokens = 0;
        let mut requests = Vec::new();
        // Create requests
        while n_tokens < max_prefill_tokens {
OlivierDehaene's avatar
OlivierDehaene committed
113
            let truncate = min(max_input_length, max_prefill_tokens - n_tokens);
114
115
116
117
            requests.push(Request {
                id: 0,
                // We truncate the input on the server side to be sure that it has the correct size
                inputs: "_test ".to_string().repeat(max_input_length as usize),
OlivierDehaene's avatar
OlivierDehaene committed
118
                truncate,
119
120
121
122
123
124
125
126
127
                // Set sampling parameters to also take these ops into account in the max memory
                parameters: Some(NextTokenChooserParameters {
                    temperature: 0.9,
                    top_k: 10,
                    top_p: 0.9,
                    typical_p: 0.9,
                    do_sample: false,
                    seed: 0,
                    repetition_penalty: 1.2,
128
                    frequency_penalty: 0.1,
129
130
131
                    watermark: true,
                }),
                stopping_parameters: Some(StoppingCriteriaParameters {
OlivierDehaene's avatar
OlivierDehaene committed
132
                    max_new_tokens: max_total_tokens - truncate,
133
                    stop_sequences: vec![],
OlivierDehaene's avatar
OlivierDehaene committed
134
                    ignore_eos_token: true,
135
136
                }),
                prefill_logprobs: true,
Nicolas Patry's avatar
Nicolas Patry committed
137
                top_n_tokens: 20,
138
139
140
141
142
143
144
145
146
147
148
            });
            n_tokens += max_input_length;
        }

        let batch = Batch {
            id: 0,
            size: requests.len() as u32,
            requests,
            max_tokens: 0,
        };

149
150
151
152
153
154
155
        let request = tonic::Request::new(WarmupRequest {
            batch: Some(batch),
            max_input_length,
            max_prefill_tokens,
            max_total_tokens,
        })
        .inject_context();
156
157
        let response = self.stub.warmup(request).await?.into_inner();
        Ok(response.max_supported_total_tokens)
158
159
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
160
161
    /// Generate one token for each request in the given batch
    ///
162
    /// Returns Generation for each request in batch
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
163
    /// and the next cached batch
164
    #[instrument(skip_all, fields(id = &batch.id, size = &batch.size))]
165
166
167
    pub async fn prefill(
        &mut self,
        batch: Batch,
168
    ) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> {
169
170
        let request = tonic::Request::new(PrefillRequest { batch: Some(batch) }).inject_context();
        let response = self.stub.prefill(request).await?.into_inner();
171
172
173
174
175
        Ok((
            response.generations,
            response.batch,
            PrefillTimings::new(response.forward_ns, response.decode_ns, response.total_ns),
        ))
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
176
177
    }

178
    /// Generate one token for each request in the given cached batches
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
179
    ///
180
    /// Returns Generation for each request in batches
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
181
    /// and the next cached batch
182
    #[instrument(skip_all, fields(size = batches.iter().map(|batch|{batch.size}).sum::<u32>()))]
183
    pub async fn decode(
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
184
        &mut self,
185
        batches: Vec<CachedBatch>,
186
    ) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> {
187
188
        let request = tonic::Request::new(DecodeRequest { batches }).inject_context();
        let response = self.stub.decode(request).await?.into_inner();
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
        Ok((
            response.generations,
            response.batch,
            DecodeTimings::new(
                response.concat_ns,
                response.forward_ns,
                response.decode_ns,
                response.total_ns,
            ),
        ))
    }
}

pub struct PrefillTimings {
    pub forward: Duration,
    pub decode: Duration,
    pub total: Duration,
}

impl PrefillTimings {
    fn new(forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
        Self {
            forward: Duration::from_nanos(forward_ns),
            decode: Duration::from_nanos(decode_ns),
            total: Duration::from_nanos(total_ns),
        }
    }
}

pub struct DecodeTimings {
    pub concat: Option<Duration>,
    pub forward: Duration,
    pub decode: Duration,
    pub total: Duration,
}

impl DecodeTimings {
    fn new(concat_ns: Option<u64>, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
        Self {
228
            concat: concat_ns.map(Duration::from_nanos),
229
230
231
232
            forward: Duration::from_nanos(forward_ns),
            decode: Duration::from_nanos(decode_ns),
            total: Duration::from_nanos(total_ns),
        }
Olivier Dehaene's avatar
Olivier Dehaene committed
233
    }
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
234
}