generation.rs 7.44 KB
Newer Older
1
use std::time::{Duration, Instant};
OlivierDehaene's avatar
OlivierDehaene committed
2
3
4
use text_generation_client::v3::{
    Batch, CachedBatch, NextTokenChooserParameters, Request, ShardedClient,
    StoppingCriteriaParameters,
5
};
OlivierDehaene's avatar
OlivierDehaene committed
6
use text_generation_client::{Chunk, ClientError, Input};
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
use tokenizers::{Tokenizer, TruncationDirection};
use tokio::sync::{broadcast, mpsc};

const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";

#[derive(Debug, Clone)]
pub(crate) struct Prefill {
    pub(crate) latency: Duration,
    pub(crate) throughput: f64,
}

#[derive(Debug, Clone)]
pub(crate) struct Decode {
    pub(crate) latency: Duration,
    pub(crate) token_latency: Duration,
    pub(crate) throughput: f64,
}

#[derive(Debug)]
pub(crate) enum Message {
    Warmup,
    Prefill(Prefill),
    Decode(Decode),
    EndRun,
    EndBatch,
}

/// Benchmarking task
#[allow(clippy::too_many_arguments)]
pub(crate) async fn generation_task(
    tokenizer: Tokenizer,
    batch_size: Vec<u32>,
    sequence_length: u32,
    decode_length: u32,
Nicolas Patry's avatar
Nicolas Patry committed
41
    top_n_tokens: Option<u32>,
42
43
    n_runs: usize,
    warmups: usize,
44
    parameters: NextTokenChooserParameters,
45
46
47
48
49
50
51
52
    client: ShardedClient,
    run_sender: mpsc::Sender<Result<Message, ClientError>>,
    mut shutdown_receiver: broadcast::Receiver<()>,
    _shutdown_guard_sender: mpsc::Sender<()>,
) {
    // End task if a message is received on shutdown_receiver
    // _shutdown_guard_sender will be dropped once the task is finished
    tokio::select! {
Nicolas Patry's avatar
Nicolas Patry committed
53
        res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone())  => {
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
            if let Err(err) = res {
                run_sender.send(Err(err)).await.unwrap_or(());
            }
        },
        _ = shutdown_receiver.recv() => {}
    }
}

/// Benchmark prefill/decode
#[allow(clippy::too_many_arguments)]
async fn generate_runs(
    tokenizer: Tokenizer,
    batch_size: Vec<u32>,
    sequence_length: u32,
    decode_length: u32,
Nicolas Patry's avatar
Nicolas Patry committed
69
    top_n_tokens: Option<u32>,
70
71
    n_runs: usize,
    warmups: usize,
72
    parameters: NextTokenChooserParameters,
73
74
75
76
77
78
79
80
81
    mut client: ShardedClient,
    run_sender: mpsc::Sender<Result<Message, ClientError>>,
) -> Result<(), ClientError> {
    // Create a dummy sequence
    let sequence = create_sequence(sequence_length, tokenizer);

    for b in batch_size {
        // Warmups on batch size
        for _ in 0..warmups {
82
83
84
85
86
            let (_, decode_batch) = prefill(
                sequence.clone(),
                sequence_length,
                b,
                decode_length,
87
                parameters.clone(),
Nicolas Patry's avatar
Nicolas Patry committed
88
                top_n_tokens,
89
90
91
                &mut client,
            )
            .await?;
92
93
94
95
96
97
            let _ = decode(decode_batch, &mut client).await?;
            // Send warmup message
            run_sender.send(Ok(Message::Warmup)).await.unwrap_or(());
        }

        for _ in 0..n_runs {
98
99
100
101
102
            let (prefill, decode_batch) = prefill(
                sequence.clone(),
                sequence_length,
                b,
                decode_length,
103
                parameters.clone(),
Nicolas Patry's avatar
Nicolas Patry committed
104
                top_n_tokens,
105
106
107
                &mut client,
            )
            .await?;
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
            // Send prefill message
            run_sender
                .send(Ok(Message::Prefill(prefill)))
                .await
                .unwrap_or(());

            let decode = decode(decode_batch, &mut client).await?;

            // Send decode message
            run_sender
                .send(Ok(Message::Decode(decode)))
                .await
                .unwrap_or(());

            // Send run ended message
            run_sender.send(Ok(Message::EndRun)).await.unwrap_or(());
        }
        // Batch ended
        run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(());
    }
    Ok(())
}

// Run a prefill step
async fn prefill(
    sequence: String,
134
    sequence_length: u32,
135
136
    batch_size: u32,
    decode_length: u32,
137
    parameters: NextTokenChooserParameters,
Nicolas Patry's avatar
Nicolas Patry committed
138
    top_n_tokens: Option<u32>,
139
    client: &mut ShardedClient,
140
) -> Result<(Prefill, CachedBatch), ClientError> {
141
142
143
144
    // Create requests
    let requests = (0..batch_size)
        .map(|id| Request {
            id: id.into(),
145
            prefill_logprobs: false,
146
147
148
            input_chunks: Some(Input {
                chunks: vec![Chunk::Text(sequence.clone()).into()],
            }),
149
            inputs: sequence.clone(),
150
            truncate: sequence_length,
151
            add_special_tokens: true,
152
            parameters: Some(parameters.clone()),
153
154
155
156
157
            stopping_parameters: Some(StoppingCriteriaParameters {
                max_new_tokens: decode_length,
                stop_sequences: vec![],
                ignore_eos_token: true, // Will not stop even if a eos token is generated
            }),
Nicolas Patry's avatar
Nicolas Patry committed
158
            top_n_tokens: top_n_tokens.unwrap_or(0),
159
160
            blocks: vec![],
            slots: vec![],
161
            prefix_len: 0,
drbh's avatar
drbh committed
162
            adapter_id: None,
163
164
165
166
167
168
169
        })
        .collect();

    let batch = Batch {
        id: 0,
        requests,
        size: batch_size,
170
        max_tokens: batch_size * (sequence_length + decode_length),
171
        max_blocks: 0,
172
173
174
175
    };

    // Run prefill
    let start_time = Instant::now();
176
    let (_, decode_batch, _) = client.prefill(batch.clone()).await?;
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195

    // Get latency
    let latency = start_time.elapsed();

    // Compute throughput from latency and batch size
    let throughput = batch_size as f64 / latency.as_secs_f64();

    // Decode batch cannot be empty
    let decode_batch = decode_batch.expect("decode_batch is None. This is a bug.");

    let step = Prefill {
        latency,
        throughput,
    };

    Ok((step, decode_batch))
}

/// Run a full decode
196
async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> {
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
    let mut decode_length = 0;
    let batch_size = batch.size;

    let start_time = Instant::now();

    // Full decode over decode length
    let mut next_batch = Some(batch);
    while let Some(batch) = next_batch {
        let result = client.decode(vec![batch]).await?;
        next_batch = result.1;
        decode_length += 1;
    }

    // Get latency
    let latency = start_time.elapsed();
    let token_latency = latency / decode_length;

    // Compute throughput from latency, batch size and decode length
    let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64();

    let step = Decode {
        latency,
        token_latency,
        throughput,
    };
    Ok(step)
}

/// Create a dummy sequence of the correct length
fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String {
    let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len();
    // Repeat lorem ipsum to cover sequence length
    let string_sequence =
        LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len());
    // Encode sequence
    let mut encoding = tokenizer.encode(string_sequence, true).unwrap();
    // Truncate to sequence_length
    encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left);
    // Decode
236
    tokenizer.decode(encoding.get_ids(), false).unwrap()
237
}