generation.rs 7.23 KB
Newer Older
1
2
use std::time::{Duration, Instant};
use text_generation_client::{
3
4
    Batch, CachedBatch, Chunk, ClientError, Input, NextTokenChooserParameters, Request,
    ShardedClient, StoppingCriteriaParameters,
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
};
use tokenizers::{Tokenizer, TruncationDirection};
use tokio::sync::{broadcast, mpsc};

const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";

#[derive(Debug, Clone)]
pub(crate) struct Prefill {
    pub(crate) latency: Duration,
    pub(crate) throughput: f64,
}

#[derive(Debug, Clone)]
pub(crate) struct Decode {
    pub(crate) latency: Duration,
    pub(crate) token_latency: Duration,
    pub(crate) throughput: f64,
}

#[derive(Debug)]
pub(crate) enum Message {
    Warmup,
    Prefill(Prefill),
    Decode(Decode),
    EndRun,
    EndBatch,
}

/// Benchmarking task
#[allow(clippy::too_many_arguments)]
pub(crate) async fn generation_task(
    tokenizer: Tokenizer,
    batch_size: Vec<u32>,
    sequence_length: u32,
    decode_length: u32,
Nicolas Patry's avatar
Nicolas Patry committed
40
    top_n_tokens: Option<u32>,
41
42
    n_runs: usize,
    warmups: usize,
43
    parameters: NextTokenChooserParameters,
44
45
46
47
48
49
50
51
    client: ShardedClient,
    run_sender: mpsc::Sender<Result<Message, ClientError>>,
    mut shutdown_receiver: broadcast::Receiver<()>,
    _shutdown_guard_sender: mpsc::Sender<()>,
) {
    // End task if a message is received on shutdown_receiver
    // _shutdown_guard_sender will be dropped once the task is finished
    tokio::select! {
Nicolas Patry's avatar
Nicolas Patry committed
52
        res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone())  => {
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
            if let Err(err) = res {
                run_sender.send(Err(err)).await.unwrap_or(());
            }
        },
        _ = shutdown_receiver.recv() => {}
    }
}

/// Benchmark prefill/decode
#[allow(clippy::too_many_arguments)]
async fn generate_runs(
    tokenizer: Tokenizer,
    batch_size: Vec<u32>,
    sequence_length: u32,
    decode_length: u32,
Nicolas Patry's avatar
Nicolas Patry committed
68
    top_n_tokens: Option<u32>,
69
70
    n_runs: usize,
    warmups: usize,
71
    parameters: NextTokenChooserParameters,
72
73
74
75
76
77
78
79
80
    mut client: ShardedClient,
    run_sender: mpsc::Sender<Result<Message, ClientError>>,
) -> Result<(), ClientError> {
    // Create a dummy sequence
    let sequence = create_sequence(sequence_length, tokenizer);

    for b in batch_size {
        // Warmups on batch size
        for _ in 0..warmups {
81
82
83
84
85
            let (_, decode_batch) = prefill(
                sequence.clone(),
                sequence_length,
                b,
                decode_length,
86
                parameters.clone(),
Nicolas Patry's avatar
Nicolas Patry committed
87
                top_n_tokens,
88
89
90
                &mut client,
            )
            .await?;
91
92
93
94
95
96
            let _ = decode(decode_batch, &mut client).await?;
            // Send warmup message
            run_sender.send(Ok(Message::Warmup)).await.unwrap_or(());
        }

        for _ in 0..n_runs {
97
98
99
100
101
            let (prefill, decode_batch) = prefill(
                sequence.clone(),
                sequence_length,
                b,
                decode_length,
102
                parameters.clone(),
Nicolas Patry's avatar
Nicolas Patry committed
103
                top_n_tokens,
104
105
106
                &mut client,
            )
            .await?;
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
            // Send prefill message
            run_sender
                .send(Ok(Message::Prefill(prefill)))
                .await
                .unwrap_or(());

            let decode = decode(decode_batch, &mut client).await?;

            // Send decode message
            run_sender
                .send(Ok(Message::Decode(decode)))
                .await
                .unwrap_or(());

            // Send run ended message
            run_sender.send(Ok(Message::EndRun)).await.unwrap_or(());
        }
        // Batch ended
        run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(());
    }
    Ok(())
}

// Run a prefill step
async fn prefill(
    sequence: String,
133
    sequence_length: u32,
134
135
    batch_size: u32,
    decode_length: u32,
136
    parameters: NextTokenChooserParameters,
Nicolas Patry's avatar
Nicolas Patry committed
137
    top_n_tokens: Option<u32>,
138
    client: &mut ShardedClient,
139
) -> Result<(Prefill, CachedBatch), ClientError> {
140
141
142
143
    // Create requests
    let requests = (0..batch_size)
        .map(|id| Request {
            id: id.into(),
144
            prefill_logprobs: false,
145
146
147
            input_chunks: Some(Input {
                chunks: vec![Chunk::Text(sequence.clone()).into()],
            }),
148
            inputs: sequence.clone(),
149
            truncate: sequence_length,
150
            parameters: Some(parameters.clone()),
151
152
153
154
155
            stopping_parameters: Some(StoppingCriteriaParameters {
                max_new_tokens: decode_length,
                stop_sequences: vec![],
                ignore_eos_token: true, // Will not stop even if a eos token is generated
            }),
Nicolas Patry's avatar
Nicolas Patry committed
156
            top_n_tokens: top_n_tokens.unwrap_or(0),
157
158
159
160
161
162
163
        })
        .collect();

    let batch = Batch {
        id: 0,
        requests,
        size: batch_size,
164
        max_tokens: batch_size * (sequence_length + decode_length),
165
166
167
168
    };

    // Run prefill
    let start_time = Instant::now();
169
    let (_, decode_batch, _) = client.prefill(batch.clone()).await?;
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188

    // Get latency
    let latency = start_time.elapsed();

    // Compute throughput from latency and batch size
    let throughput = batch_size as f64 / latency.as_secs_f64();

    // Decode batch cannot be empty
    let decode_batch = decode_batch.expect("decode_batch is None. This is a bug.");

    let step = Prefill {
        latency,
        throughput,
    };

    Ok((step, decode_batch))
}

/// Run a full decode
189
async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> {
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
    let mut decode_length = 0;
    let batch_size = batch.size;

    let start_time = Instant::now();

    // Full decode over decode length
    let mut next_batch = Some(batch);
    while let Some(batch) = next_batch {
        let result = client.decode(vec![batch]).await?;
        next_batch = result.1;
        decode_length += 1;
    }

    // Get latency
    let latency = start_time.elapsed();
    let token_latency = latency / decode_length;

    // Compute throughput from latency, batch size and decode length
    let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64();

    let step = Decode {
        latency,
        token_latency,
        throughput,
    };
    Ok(step)
}

/// Create a dummy sequence of the correct length
fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String {
    let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len();
    // Repeat lorem ipsum to cover sequence length
    let string_sequence =
        LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len());
    // Encode sequence
    let mut encoding = tokenizer.encode(string_sequence, true).unwrap();
    // Truncate to sequence_length
    encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left);
    // Decode
229
    tokenizer.decode(encoding.get_ids(), false).unwrap()
230
}