scheduler.rs 61.2 KB
Newer Older
1
/// Batching and inference logic
OlivierDehaene's avatar
OlivierDehaene committed
2
3
use crate::infer::v2::queue::{Entry, Queue};
use crate::infer::{
4
    Attention, Backend, GenerateStreamResponse, GeneratedText, InferError, InferStreamResponse,
5
};
OlivierDehaene's avatar
OlivierDehaene committed
6
use crate::validation::ValidGenerateRequest;
7
use crate::{Attention, FinishReason, PrefillToken, Token};
8
use nohash_hasher::IntMap;
9
10
11
12
use std::sync::{
    atomic::{AtomicBool, Ordering},
    Arc,
};
OlivierDehaene's avatar
OlivierDehaene committed
13
14
use text_generation_client::v2::{Batch, CachedBatch, Generation, ShardedClient};
use text_generation_client::ClientError;
OlivierDehaene's avatar
OlivierDehaene committed
15
use tokio::sync::mpsc::error::SendError;
OlivierDehaene's avatar
OlivierDehaene committed
16
use tokio::sync::{mpsc, Notify, OwnedSemaphorePermit};
17
use tokio::time::Instant;
OlivierDehaene's avatar
OlivierDehaene committed
18
use tokio_stream::wrappers::UnboundedReceiverStream;
19
use tracing::{info_span, instrument, Instrument, Span};
20

Nicolas Patry's avatar
Nicolas Patry committed
21
pub(crate) struct BackendV2 {
22
23
    /// Request queue
    queue: Queue,
OlivierDehaene's avatar
OlivierDehaene committed
24
25
    /// Notify batcher on queue appends
    batching_task_notifier: Arc<Notify>,
26
27
}

Nicolas Patry's avatar
Nicolas Patry committed
28
impl BackendV2 {
29
    #[allow(clippy::too_many_arguments)]
30
31
    pub(crate) fn new(
        client: ShardedClient,
32
        waiting_served_ratio: f32,
33
        max_batch_prefill_tokens: u32,
34
        max_batch_total_tokens: u32,
35
        max_waiting_tokens: usize,
36
        max_batch_size: Option<usize>,
37
        requires_padding: bool,
38
        window_size: Option<u32>,
Nicolas Patry's avatar
Nicolas Patry committed
39
        speculate: u32,
40
        generation_health: Arc<AtomicBool>,
41
    ) -> Self {
42
        // Infer shared state
43
44
45
46
        let attention = if let Ok(attention) = std::env::var("ATTENTION") {
            attention
                .parse()
                .expect(&format!("Invalid attention was specified :`{attention}`"))
47
        } else {
48
49
50
51
52
53
            Attention::Paged
        };
        let block_size = if attention == Attention::FlashDecoding {
            256
        } else {
            16
54
55
        };
        let queue = Queue::new(requires_padding, block_size, window_size, speculate);
OlivierDehaene's avatar
OlivierDehaene committed
56
        let batching_task_notifier = Arc::new(Notify::new());
57
58
59
60

        // Spawn batching background task that contains all the inference logic
        tokio::spawn(batching_task(
            client,
61
            waiting_served_ratio,
62
            max_batch_prefill_tokens,
63
            max_batch_total_tokens,
64
            max_waiting_tokens,
65
            max_batch_size,
66
            queue.clone(),
OlivierDehaene's avatar
OlivierDehaene committed
67
            batching_task_notifier.clone(),
68
            generation_health,
69
70
71
        ));

        Self {
72
            queue,
OlivierDehaene's avatar
OlivierDehaene committed
73
            batching_task_notifier,
74
75
        }
    }
OlivierDehaene's avatar
OlivierDehaene committed
76
}
77

Nicolas Patry's avatar
Nicolas Patry committed
78
impl Backend for BackendV2 {
79
    #[instrument(skip_all)]
OlivierDehaene's avatar
OlivierDehaene committed
80
    fn schedule(
81
        &self,
OlivierDehaene's avatar
OlivierDehaene committed
82
83
        request: ValidGenerateRequest,
        permit: OwnedSemaphorePermit,
84
    ) -> Result<GenerateStreamResponse, InferError> {
85
        // MPSC channel to communicate with the background batching task
OlivierDehaene's avatar
OlivierDehaene committed
86
        let (response_tx, response_rx) = mpsc::unbounded_channel();
OlivierDehaene's avatar
OlivierDehaene committed
87
        let input_length = request.input_length;
88

89
90
        // Append the request to the queue
        self.queue.append(Entry {
OlivierDehaene's avatar
OlivierDehaene committed
91
            request,
92
            response_tx,
93
94
95
            span: Span::current(),
            temp_span: None,
            queue_time: Instant::now(),
96
97
98
            batch_time: None,
        });

99
        // Notify the background task that we have a new entry in the queue that needs
100
        // to be batched
OlivierDehaene's avatar
OlivierDehaene committed
101
        self.batching_task_notifier.notify_one();
102
103

        // Return stream
104
105
106
107
108
        Ok((
            permit,
            input_length,
            UnboundedReceiverStream::new(response_rx),
        ))
109
    }
110
111
}

112
113
114
115
/// Batching logic
/// Will be launched in a background Tokio task
///
/// Batches requests and sends them to the inference server
116
#[allow(clippy::too_many_arguments)]
OlivierDehaene's avatar
OlivierDehaene committed
117
pub(crate) async fn batching_task(
118
    mut client: ShardedClient,
119
    waiting_served_ratio: f32,
120
    max_batch_prefill_tokens: u32,
121
    max_batch_total_tokens: u32,
122
    max_waiting_tokens: usize,
123
    max_batch_size: Option<usize>,
124
    queue: Queue,
OlivierDehaene's avatar
OlivierDehaene committed
125
    notifier: Arc<Notify>,
126
    generation_health: Arc<AtomicBool>,
127
128
129
130
) {
    // Infinite loop
    loop {
        // Wait for a notification from the Infer struct
OlivierDehaene's avatar
OlivierDehaene committed
131
        notifier.notified().await;
132

133
        // Get the next batch from the queue
134
        // This batch might be smaller than the maximum batch size if there are not enough requests
135
        // waiting in the queue
136
        while let Some((mut entries, batch, span)) = queue
137
138
139
140
141
142
            .next_batch(
                None,
                max_batch_size,
                max_batch_prefill_tokens,
                max_batch_total_tokens,
            )
143
            .await
144
        {
145
            let mut cached_batch = prefill(&mut client, batch, &mut entries, &generation_health)
146
147
                .instrument(span)
                .await;
148
149
150
151
152
153
154
            let mut waiting_tokens = 1;

            // We loop until we do not receive any cached batch from the inference server (== until
            // all requests have met their stopping criteria)
            while let Some(batch) = cached_batch {
                // Get current batch info
                let batch_size = batch.size;
155
                let batch_max_tokens = batch.max_tokens;
156
                let mut batches = vec![batch];
157
158
                metrics::gauge!("tgi_batch_current_size").set(batch_size as f64);
                metrics::gauge!("tgi_batch_current_max_tokens").set(batch_max_tokens as f64);
159
160
161
162
163
164
165
166
167
168

                let min_size = if waiting_tokens >= max_waiting_tokens {
                    // If we didn't onboard any new requests since >= max_waiting_tokens, we try
                    // to add a new batch even though its size might be small
                    None
                } else {
                    // Minimum batch size
                    Some((batch_size as f32 * waiting_served_ratio).floor() as usize)
                };

169
                let token_budget = max_batch_total_tokens.saturating_sub(batch_max_tokens);
drbh's avatar
drbh committed
170
171
                let max_size =
                    max_batch_size.map(|max_size| max_size.saturating_sub(batch_size as usize));
172
                // Try to get a new batch
173
                if let Some((mut new_entries, new_batch, span)) = queue
174
                    .next_batch(min_size, max_size, max_batch_prefill_tokens, token_budget)
175
                    .await
176
177
178
                {
                    // Tracking metrics
                    if min_size.is_some() {
179
180
                        metrics::counter!("tgi_batch_concat", "reason" => "backpressure")
                            .increment(1);
181
                    } else {
182
183
                        metrics::counter!("tgi_batch_concat", "reason" => "wait_exceeded")
                            .increment(1);
184
                    }
185

186
187
188
189
190
191
192
193
194
195
196
197
                    entries.iter_mut().for_each(|(_, entry)| {
                        // Create a new span to add the info that this entry is waiting
                        // because a new batch is being computed
                        let entry_waiting_span = info_span!(parent: &entry.span, "waiting");
                        // Add relationships
                        span.follows_from(&entry_waiting_span);
                        entry_waiting_span.follows_from(&span);
                        // Update entry
                        entry.temp_span = Some(entry_waiting_span);
                    });

                    // Generate one token for this new batch to have the attention past in cache
198
199
200
201
                    let new_cached_batch =
                        prefill(&mut client, new_batch, &mut new_entries, &generation_health)
                            .instrument(span)
                            .await;
202
203
204
205
206
207
                    // Reset waiting counter
                    waiting_tokens = 1;
                    // Extend current batch with the new batch
                    if let Some(new_cached_batch) = new_cached_batch {
                        entries.extend(new_entries);
                        batches.push(new_cached_batch);
208
209
                    }
                }
210

211
212
213
214
215
216
                // Create span for this batch to add context to inference calls
                let next_batch_size = entries.len();
                let next_batch_span =
                    info_span!(parent: None, "batch", batch_size = next_batch_size);
                entries.iter_mut().for_each(|(_, entry)| {
                    // Create a new span to link the batch back to this entry
217
                    let entry_batch_span = info_span!(parent: &entry.span, "infer");
218
219
                    // Add relationships
                    next_batch_span.follows_from(&entry_batch_span);
220
221
222
223
                    entry_batch_span.follows_from(&next_batch_span);
                    // Update entry
                    entry.temp_span = Some(entry_batch_span);
                });
224

225
                cached_batch = decode(&mut client, batches, &mut entries, &generation_health)
226
227
                    .instrument(next_batch_span)
                    .await;
228
229
                waiting_tokens += 1;
            }
230
231
            metrics::gauge!("tgi_batch_current_size").set(0.0);
            metrics::gauge!("tgi_batch_current_max_tokens").set(0.0);
232
233
234
235
        }
    }
}

236
#[instrument(skip_all)]
237
238
239
async fn prefill(
    client: &mut ShardedClient,
    batch: Batch,
240
    entries: &mut IntMap<u64, Entry>,
241
    generation_health: &Arc<AtomicBool>,
242
) -> Option<CachedBatch> {
243
    let start_time = Instant::now();
244
    let batch_id = batch.id;
245
    metrics::counter!("tgi_batch_inference_count", "method" => "prefill").increment(1);
246
247

    match client.prefill(batch).await {
248
        Ok((generations, next_batch, timings)) => {
249
250
            // Update health
            generation_health.store(true, Ordering::SeqCst);
251
252

            let start_filtering_time = Instant::now();
253
            // Send generated tokens and filter stopped entries
254
255
256
            filter_send_generations(generations, entries);

            // Filter next batch and remove requests that were stopped
257
            let next_batch = filter_batch(client, next_batch, entries).await;
258

259
260
261
262
263
264
265
266
267
            metrics::histogram!("tgi_batch_forward_duration","method" => "prefill")
                .record(timings.forward.as_secs_f64());
            metrics::histogram!("tgi_batch_decode_duration", "method" => "prefill")
                .record(timings.decode.as_secs_f64());
            metrics::histogram!("tgi_batch_filter_duration", "method" => "prefill")
                .record(start_filtering_time.elapsed().as_secs_f64());
            metrics::histogram!("tgi_batch_inference_duration","method" => "prefill")
                .record(start_time.elapsed().as_secs_f64());
            metrics::counter!("tgi_batch_inference_success", "method" => "prefill").increment(1);
268
269
270
271
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
272
273
            // Update health
            generation_health.store(false, Ordering::SeqCst);
274
            let _ = client.clear_cache(Some(batch_id)).await;
275
            send_errors(err, entries);
276
            metrics::counter!("tgi_batch_inference_failure", "method" => "prefill").increment(1);
277
278
279
280
281
282
283
284
            None
        }
    }
}

#[instrument(skip_all)]
async fn decode(
    client: &mut ShardedClient,
285
    batches: Vec<CachedBatch>,
286
    entries: &mut IntMap<u64, Entry>,
287
    generation_health: &Arc<AtomicBool>,
288
) -> Option<CachedBatch> {
289
    let start_time = Instant::now();
290
    let batch_ids: Vec<u64> = batches.iter().map(|b| b.id).collect();
291
    metrics::counter!("tgi_batch_inference_count", "method" => "decode").increment(1);
292
293

    match client.decode(batches).await {
294
        Ok((generations, next_batch, timings)) => {
295
296
            // Update health
            generation_health.store(true, Ordering::SeqCst);
297
298

            let start_filtering_time = Instant::now();
299
            // Send generated tokens and filter stopped entries
300
301
302
            filter_send_generations(generations, entries);

            // Filter next batch and remove requests that were stopped
303
            let next_batch = filter_batch(client, next_batch, entries).await;
304

305
            if let Some(concat_duration) = timings.concat {
306
307
                metrics::histogram!("tgi_batch_concat_duration", "method" => "decode")
                    .record(concat_duration.as_secs_f64());
308
            }
309
310
311
312
313
314
315
316
317
            metrics::histogram!("tgi_batch_forward_duration", "method" => "decode")
                .record(timings.forward.as_secs_f64());
            metrics::histogram!("tgi_batch_decode_duration", "method" => "decode")
                .record(timings.decode.as_secs_f64());
            metrics::histogram!("tgi_batch_filter_duration", "method" => "decode")
                .record(start_filtering_time.elapsed().as_secs_f64());
            metrics::histogram!("tgi_batch_inference_duration", "method" => "decode")
                .record(start_time.elapsed().as_secs_f64());
            metrics::counter!("tgi_batch_inference_success", "method" => "decode").increment(1);
318
319
320
321
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
322
            generation_health.store(false, Ordering::SeqCst);
323
324
325
            for id in batch_ids {
                let _ = client.clear_cache(Some(id)).await;
            }
326
            send_errors(err, entries);
327
            metrics::counter!("tgi_batch_inference_failure", "method" => "decode").increment(1);
328
329
330
331
332
            None
        }
    }
}

333
334
/// Filter a `batch` and remove all requests not present in `entries`
#[instrument(skip_all)]
335
336
async fn filter_batch(
    client: &mut ShardedClient,
337
    next_batch: Option<CachedBatch>,
338
    entries: &IntMap<u64, Entry>,
339
) -> Option<CachedBatch> {
340
341
342
343
344
345
346
347
348
349
    let mut batch = next_batch?;

    // No need to filter
    if batch.size as usize == entries.len() {
        return Some(batch);
    }

    let id = batch.id;

    // Retain only requests that are still in entries
350
    batch.request_ids.retain(|id| entries.contains_key(id));
351

352
    if batch.request_ids.is_empty() {
353
354
355
356
357
358
359
360
361
        // All requests have been filtered out
        // Next batch is now empty
        // Clear it from the Python shards cache
        // We unwrap here as we need to panic since we cannot recover if this method fails
        client.clear_cache(Some(id)).await.unwrap();
        None
    } else {
        // Filter Python shard cache
        // We unwrap here as we need to panic since we cannot recover if this method fails
362
        client.filter_batch(id, batch.request_ids).await.unwrap()
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
    }
}

/// Send one or multiple `InferStreamResponse` to Infer for all `entries`
/// and filter entries
#[instrument(skip_all)]
fn filter_send_generations(generations: Vec<Generation>, entries: &mut IntMap<u64, Entry>) {
    generations.into_iter().for_each(|generation| {
        let id = generation.request_id;
        // Get entry
        // We can `expect` here as the request id should always be in the entries
        let entry = entries
            .get(&id)
            .expect("ID not found in entries. This is a bug.");

        // Create and enter a span to link this function back to the entry
        let _span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_generation", generation = ?generation).entered();
        // Send generation responses back to the infer task
        // If the receive an error from the Flume channel, it means that the client dropped the
        // request and we need to stop generating hence why we unwrap_or(true)
        let stopped = send_responses(generation, entry).map_err(|err| {
OlivierDehaene's avatar
OlivierDehaene committed
384
            tracing::error!("Entry response channel error.");
385
            metrics::counter!("tgi_request_failure", "err" => "dropped").increment(1);
386
387
388
389
390
391
392
393
394
395
396
397
            err
        }).unwrap_or(true);
        if stopped {
            entries.remove(&id).expect("ID not found in entries. This is a bug.");
        }
    });
}

/// Send responses through the `entry` response channel
fn send_responses(
    generation: Generation,
    entry: &Entry,
OlivierDehaene's avatar
OlivierDehaene committed
398
) -> Result<bool, Box<SendError<Result<InferStreamResponse, InferError>>>> {
399
    // Return directly if the channel is disconnected
OlivierDehaene's avatar
OlivierDehaene committed
400
    if entry.response_tx.is_closed() {
401
        metrics::counter!("tgi_request_failure", "err" => "dropped").increment(1);
402
403
404
        return Ok(true);
    }

405
406
407
    let mut stopped = false;

    if let Some(prefill_tokens) = generation.prefill_tokens {
OlivierDehaene's avatar
OlivierDehaene committed
408
409
410
411
412
413
414
415
416
417
        // Create Token objects
        // We do that here instead of in the Python code as Rust for loops are faster
        let prefill_tokens = prefill_tokens
            .ids
            .into_iter()
            .zip(prefill_tokens.logprobs)
            .zip(prefill_tokens.texts)
            .map(|((id, logprob), text)| PrefillToken { id, text, logprob })
            .collect();

418
        // Send message
OlivierDehaene's avatar
OlivierDehaene committed
419
420
421
        entry
            .response_tx
            .send(Ok(InferStreamResponse::Prefill(prefill_tokens)))?;
422
423
424
    }

    // Create last Token
Nicolas Patry's avatar
Nicolas Patry committed
425
426
    let tokens_ = generation.tokens.expect("Non empty tokens in generation");
    let n = tokens_.ids.len();
427
    metrics::histogram!("tgi_request_skipped_tokens").record((n - 1) as f64);
Nicolas Patry's avatar
Nicolas Patry committed
428
429
430
    let mut iterator = tokens_
        .ids
        .into_iter()
431
432
433
        .zip(tokens_.logprobs)
        .zip(tokens_.texts)
        .zip(tokens_.is_special)
Nicolas Patry's avatar
Nicolas Patry committed
434
435
436
437
438
439
440
441
442
443
        .enumerate()
        .peekable();
    while let Some((i, (((id, logprob), text), special))) = iterator.next() {
        let token = Token {
            id,
            text,
            logprob,
            special,
        };
        let top_tokens = if let Some(top_tokens_) = generation.top_tokens.get(i) {
Nicolas Patry's avatar
Nicolas Patry committed
444
445
            top_tokens_
                .ids
Nicolas Patry's avatar
Nicolas Patry committed
446
447
448
449
450
                .iter()
                .zip(top_tokens_.logprobs.iter())
                .zip(top_tokens_.texts.iter())
                .zip(top_tokens_.is_special.iter())
                .map(|(((&id, &logprob), text), &special)| Token {
Nicolas Patry's avatar
Nicolas Patry committed
451
                    id,
Nicolas Patry's avatar
Nicolas Patry committed
452
                    text: text.to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
453
454
                    logprob,
                    special,
Nicolas Patry's avatar
Nicolas Patry committed
455
456
457
458
459
460
461
462
463
464
465
466
467
                })
                .collect()
        } else {
            vec![]
        };
        match (&generation.generated_text, iterator.peek()) {
            (Some(generated_text), None) => {
                // Generation has ended
                stopped = true;
                // Send message
                entry.response_tx.send(Ok(InferStreamResponse::End {
                    token,
                    top_tokens,
OlivierDehaene's avatar
OlivierDehaene committed
468
                    generated_text: GeneratedText::from(generated_text.clone()),
Nicolas Patry's avatar
Nicolas Patry committed
469
470
471
472
473
474
475
476
477
478
479
                    queued: entry.queue_time,
                    start: entry.batch_time.unwrap(),
                }))?;
            }
            _ => {
                // Send message
                entry
                    .response_tx
                    .send(Ok(InferStreamResponse::Intermediate { token, top_tokens }))?;
            }
        }
Nicolas Patry's avatar
Nicolas Patry committed
480
481
    }

482
483
484
    Ok(stopped)
}

485
/// Send errors to Infer for all `entries`
486
487
#[instrument(skip_all)]
fn send_errors(error: ClientError, entries: &mut IntMap<u64, Entry>) {
488
    entries.drain().for_each(|(_, entry)| {
489
490
491
        // Create and enter a span to link this function back to the entry
        let _send_error_span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_error").entered();
        let err = InferError::GenerationError(error.to_string());
492
        metrics::counter!("tgi_request_failure", "err" => "generation").increment(1);
493
494
        tracing::error!("{err}");

495
496
497
        // unwrap_or is valid here as we don't care if the receiver is gone.
        entry
            .response_tx
OlivierDehaene's avatar
OlivierDehaene committed
498
            .send(Err(err))
499
500
501
502
            .unwrap_or(());
    });
}

OlivierDehaene's avatar
OlivierDehaene committed
503
504
505
506
507
508
509
510
511
impl From<text_generation_client::v2::GeneratedText> for GeneratedText {
    fn from(value: text_generation_client::v2::GeneratedText) -> Self {
        let v2_finish_reason =
            text_generation_client::v2::FinishReason::try_from(value.finish_reason).unwrap();
        let finish_reason = match v2_finish_reason {
            text_generation_client::v2::FinishReason::Length => FinishReason::Length,
            text_generation_client::v2::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::v2::FinishReason::StopSequence => FinishReason::StopSequence,
        };
512

OlivierDehaene's avatar
OlivierDehaene committed
513
514
515
516
517
        Self {
            text: value.text,
            generated_tokens: value.generated_tokens,
            finish_reason,
            seed: value.seed,
518
519
520
        }
    }
}
521
522
523
524
525

// tests
#[cfg(test)]
mod tests {
    use crate::infer::raise_exception;
Nicolas Patry's avatar
Nicolas Patry committed
526
    use crate::{ChatTemplateInputs, TextMessage};
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
    use minijinja::Environment;

    #[test]
    fn test_chat_template() {
        let env = Environment::new();

        let source = r#"
        {% for message in messages %}
            {% if message['role'] == 'system' %}
                {% if message['content']%}
                    {{'### System:\n' + message['content']+'\n\n'}}
                {% endif %}
            {% elif message['role'] == 'user' %}
                {{'### User:\n' + message['content']+'\n\n'}}
            {% elif message['role'] == 'assistant' %}
                {{'### Assistant:\n'  + message['content']}}
            {% endif %}
            {% if loop.last and add_generation_prompt %}
                {{ '### Assistant:\n' }}
            {% endif %}
        {% endfor %}"#;

        // trim all the whitespace
        let source = source
            .lines()
            .map(|line| line.trim())
            .collect::<Vec<&str>>()
            .join("");

        let tmpl = env.template_from_str(&source);

        let chat_template_inputs = ChatTemplateInputs {
            messages: vec![
Nicolas Patry's avatar
Nicolas Patry committed
560
                TextMessage {
561
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
562
                    content: "Hi!".to_string(),
563
                },
Nicolas Patry's avatar
Nicolas Patry committed
564
                TextMessage {
565
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
566
                    content: "Hello how can I help?".to_string(),
567
                },
Nicolas Patry's avatar
Nicolas Patry committed
568
                TextMessage {
569
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
570
                    content: "What is Deep Learning?".to_string(),
571
                },
Nicolas Patry's avatar
Nicolas Patry committed
572
                TextMessage {
573
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
574
                    content: "magic!".to_string(),
575
576
577
578
                },
            ],
            bos_token: Some("[BOS]"),
            eos_token: Some("[EOS]"),
579
            add_generation_prompt: true,
580
            ..Default::default()
581
582
583
584
585
586
        };

        let result = tmpl.unwrap().render(chat_template_inputs).unwrap();

        assert_eq!(
            result,
587
            "### User:\nHi!\n\n### Assistant:\nHello how can I help?### User:\nWhat is Deep Learning?\n\n### Assistant:\nmagic!### Assistant:\n"
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
        );
    }

    #[test]
    fn test_chat_template_invalid_with_raise() {
        let mut env = Environment::new();
        env.add_function("raise_exception", raise_exception);

        let source = r#"
        {{ bos_token }}
        {% for message in messages %}
        {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}
        {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}
        {% endif %}
        {% if message['role'] == 'user' %}
        {{ '[INST] ' + message['content'] + ' [/INST]' }}
        {% elif message['role'] == 'assistant' %}
        {{ message['content'] + eos_token}}
        {% else %}
        {{ raise_exception('Only user and assistant roles are supported!') }}
        {% endif %}
        {% endfor %}"#;

        // trim all the whitespace
        let source = source
            .lines()
            .map(|line| line.trim())
            .collect::<Vec<&str>>()
            .join("");

        let tmpl = env.template_from_str(&source);

        let chat_template_inputs = ChatTemplateInputs {
            messages: vec![
Nicolas Patry's avatar
Nicolas Patry committed
622
                TextMessage {
623
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
624
                    content: "Hi!".to_string(),
625
                },
Nicolas Patry's avatar
Nicolas Patry committed
626
                TextMessage {
627
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
628
                    content: "Hi again!".to_string(),
629
                },
Nicolas Patry's avatar
Nicolas Patry committed
630
                TextMessage {
631
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
632
                    content: "Hello how can I help?".to_string(),
633
                },
Nicolas Patry's avatar
Nicolas Patry committed
634
                TextMessage {
635
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
636
                    content: "What is Deep Learning?".to_string(),
637
                },
Nicolas Patry's avatar
Nicolas Patry committed
638
                TextMessage {
639
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
640
                    content: "magic!".to_string(),
641
642
643
644
                },
            ],
            bos_token: Some("[BOS]"),
            eos_token: Some("[EOS]"),
645
            add_generation_prompt: true,
646
            ..Default::default()
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
        };

        let result = tmpl.unwrap().render(chat_template_inputs); //.err().unwrap();

        match result {
            Ok(_) => panic!("Should have failed"),
            Err(e) => {
                assert_eq!(
                    e.detail().unwrap(),
                    "Conversation roles must alternate user/assistant/user/assistant/..."
                );
            }
        }
    }

    #[test]
    fn test_chat_template_valid_with_raise() {
        let mut env = Environment::new();
        env.add_function("raise_exception", raise_exception);

        let source = r#"
        {{ bos_token }}
        {% for message in messages %}
        {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}
        {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}
        {% endif %}
        {% if message['role'] == 'user' %}
        {{ '[INST] ' + message['content'] + ' [/INST]' }}
        {% elif message['role'] == 'assistant' %}
        {{ message['content'] + eos_token}}
        {% else %}
        {{ raise_exception('Only user and assistant roles are supported!') }}
        {% endif %}
        {% endfor %}"#;

        // trim all the whitespace
        let source = source
            .lines()
            .map(|line| line.trim())
            .collect::<Vec<&str>>()
            .join("");

        let tmpl = env.template_from_str(&source);

        let chat_template_inputs = ChatTemplateInputs {
            messages: vec![
Nicolas Patry's avatar
Nicolas Patry committed
693
                TextMessage {
694
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
695
                    content: "Hi!".to_string(),
696
                },
Nicolas Patry's avatar
Nicolas Patry committed
697
                TextMessage {
698
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
699
                    content: "Hello how can I help?".to_string(),
700
                },
Nicolas Patry's avatar
Nicolas Patry committed
701
                TextMessage {
702
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
703
                    content: "What is Deep Learning?".to_string(),
704
                },
Nicolas Patry's avatar
Nicolas Patry committed
705
                TextMessage {
706
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
707
                    content: "magic!".to_string(),
708
709
710
711
                },
            ],
            bos_token: Some("[BOS]"),
            eos_token: Some("[EOS]"),
712
            add_generation_prompt: true,
713
            ..Default::default()
714
715
716
717
718
        };

        let result = tmpl.unwrap().render(chat_template_inputs).unwrap();
        assert_eq!(result, "[BOS][INST] Hi! [/INST]Hello how can I help?[EOS][INST] What is Deep Learning? [/INST]magic![EOS]");
    }
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743

    #[test]
    fn test_chat_template_valid_with_add_generation_prompt() {
        let mut env = Environment::new();
        env.add_function("raise_exception", raise_exception);

        let source = r#"
        {% for message in messages %}
        {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
        {% endfor %}
        {% if add_generation_prompt %}
            {{ '<|im_start|>assistant\n' }}
        {% endif %}"#;

        // trim all the whitespace
        let source = source
            .lines()
            .map(|line| line.trim())
            .collect::<Vec<&str>>()
            .join("");

        let tmpl = env.template_from_str(&source);

        let chat_template_inputs = ChatTemplateInputs {
            messages: vec![
Nicolas Patry's avatar
Nicolas Patry committed
744
                TextMessage {
745
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
746
                    content: "Hi!".to_string(),
747
                },
Nicolas Patry's avatar
Nicolas Patry committed
748
                TextMessage {
749
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
750
                    content: "Hello how can I help?".to_string(),
751
                },
Nicolas Patry's avatar
Nicolas Patry committed
752
                TextMessage {
753
                    role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
754
                    content: "What is Deep Learning?".to_string(),
755
                },
Nicolas Patry's avatar
Nicolas Patry committed
756
                TextMessage {
757
                    role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
758
                    content: "magic!".to_string(),
759
760
761
762
763
                },
            ],
            bos_token: Some("[BOS]"),
            eos_token: Some("[EOS]"),
            add_generation_prompt: true,
764
            ..Default::default()
765
766
767
768
769
        };

        let result = tmpl.unwrap().render(chat_template_inputs).unwrap();
        assert_eq!(result, "<|im_start|>user\nHi!<|im_end|>\n<|im_start|>assistant\nHello how can I help?<|im_end|>\n<|im_start|>user\nWhat is Deep Learning?<|im_end|>\n<|im_start|>assistant\nmagic!<|im_end|>\n<|im_start|>assistant\n");
    }
770
771
772
773
774
775
776
777
778
779
780

    struct ChatTemplateTestItem {
        name: &'static str,
        chat_template: &'static str,
        input: ChatTemplateInputs<'static>,
        target: &'static str,
    }

    #[test]
    fn test_many_chat_templates() {
        let example_chat = vec![
Nicolas Patry's avatar
Nicolas Patry committed
781
            TextMessage {
782
                role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
783
                content: "Hello, how are you?".to_string(),
784
            },
Nicolas Patry's avatar
Nicolas Patry committed
785
            TextMessage {
786
                role: "assistant".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
787
                content: "I'm doing great. How can I help you today?".to_string(),
788
            },
Nicolas Patry's avatar
Nicolas Patry committed
789
            TextMessage {
790
                role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
791
                content: "I'd like to show off how chat templating works!".to_string(),
792
793
794
            },
        ];

Nicolas Patry's avatar
Nicolas Patry committed
795
        let example_chat_with_system = [TextMessage {
796
            role: "system".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
797
798
            content: "You are a friendly chatbot who always responds in the style of a pirate"
                .to_string(),
799
800
801
802
803
804
805
806
807
808
        }]
        .iter()
        .chain(&example_chat)
        .cloned()
        .collect::<Vec<_>>();

        let test_default_templates = vec![
            ChatTemplateTestItem {
                name: "_base",
                chat_template: "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}",
809
                input: ChatTemplateInputs {
810
811
812
813
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some(""),
                    eos_token: Some(""),
814
                    ..Default::default()
815
816
817
818
819
820
                },
                target: "<|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
            },
            ChatTemplateTestItem {
                name: "blenderbot",
                chat_template: "{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ '  ' }}{% endif %}{% endfor %}{{ eos_token }}",
821
                input: ChatTemplateInputs {
822
823
824
825
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some(""),
                    eos_token: Some("</s>"),
826
                    ..Default::default()
827
828
829
830
831
832
                },
                target: " Hello, how are you?  I'm doing great. How can I help you today?   I'd like to show off how chat templating works!</s>",
            },
            ChatTemplateTestItem {
                name: "blenderbot_small",
                chat_template: "{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ '  ' }}{% endif %}{% endfor %}{{ eos_token }}",
833
                input: ChatTemplateInputs {
834
835
836
837
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some(""),
                    eos_token: Some("</s>"),
838
                    ..Default::default()
839
840
841
842
843
844
                },
                target: " Hello, how are you?  I'm doing great. How can I help you today?   I'd like to show off how chat templating works!</s>",
            },
            ChatTemplateTestItem {
                name: "bloom",
                chat_template: "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
845
                input: ChatTemplateInputs {
846
847
848
849
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some(""),
                    eos_token: Some("</s>"),
850
                    ..Default::default()
851
852
853
854
855
856
                },
                target: "Hello, how are you?</s>I'm doing great. How can I help you today?</s>I'd like to show off how chat templating works!</s>",
            },
            ChatTemplateTestItem {
                name: "gpt_neox",
                chat_template: "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
857
                input: ChatTemplateInputs {
858
859
860
861
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some(""),
                    eos_token: Some("<|endoftext|>"),
862
                    ..Default::default()
863
864
865
866
867
868
                },
                target: "Hello, how are you?<|endoftext|>I'm doing great. How can I help you today?<|endoftext|>I'd like to show off how chat templating works!<|endoftext|>",
            },
            ChatTemplateTestItem {
                name: "gpt2",
                chat_template: "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
869
870
871
872
873
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some(""),
                    eos_token: Some("<|endoftext|>"),
874
                    ..Default::default()
875
                },
876
                target: "Hello, how are you?<|endoftext|>I'm doing great. How can I help you today?<|endoftext|>I'd like to show off how chat templating works!<|endoftext|>",
877
878
879
880
881
            },
            ChatTemplateTestItem {
                name: "llama",
                // NOTE: the `.strip()` has been replaced with `| trim` in the following template
                chat_template: "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token +'[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\\n' + content | trim + '\\n<</SYS>>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}",
882
883
884
885
886
                input: ChatTemplateInputs {
                    messages: example_chat_with_system.clone(),
                    add_generation_prompt: true,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
887
                    ..Default::default()
888
                },
889
                target: "<s>[INST] <<SYS>>\nYou are a friendly chatbot who always responds in the style of a pirate\n<</SYS>>\n\nHello, how are you? [/INST] I'm doing great. How can I help you today? </s><s>[INST] I'd like to show off how chat templating works! [/INST]",
890
891
892
893
            },
            ChatTemplateTestItem {
                name: "whisper",
                chat_template: "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
894
895
896
897
898
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: true,
                    bos_token: Some(""),
                    eos_token: Some("<|endoftext|>"),
899
                    ..Default::default()
900
                },
901
902
                target: "Hello, how are you?<|endoftext|>I'm doing great. How can I help you today?<|endoftext|>I'd like to show off how chat templating works!<|endoftext|>",
            },
903
904
905
906
907
908
909
910
911
912
913
914
        ];

        #[allow(unused_variables)] // name is unused
        for ChatTemplateTestItem {
            name,
            chat_template,
            input,
            target,
        } in test_default_templates
        {
            let mut env = Environment::new();
            env.add_function("raise_exception", raise_exception);
915
            let tmpl = env.template_from_str(chat_template);
916
917
918
919
920
921
922
923
924
925
926
927
            let result = tmpl.unwrap().render(input).unwrap();
            assert_eq!(result, target);
        }

        let test_custom_templates = vec![
            ChatTemplateTestItem {
                name: "HuggingFaceH4/zephyr-7b-beta (add_generation_prompt=false)",
                chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\\n'  + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
                input: ChatTemplateInputs {
                    messages: example_chat_with_system.clone(),
                    add_generation_prompt: false,
                    bos_token: Some(""),
928
                    eos_token: Some("</s>"),
929
                    ..Default::default()
930
931
932
933
934
935
936
937
                },
                target: "<|system|>\nYou are a friendly chatbot who always responds in the style of a pirate</s><|user|>\nHello, how are you?</s><|assistant|>\nI'm doing great. How can I help you today?</s><|user|>\nI'd like to show off how chat templating works!</s>",
            },
            ChatTemplateTestItem {
                name: "HuggingFaceH4/zephyr-7b-beta (add_generation_prompt=true)",
                chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\\n'  + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
                input: ChatTemplateInputs {
                    messages: vec![
OlivierDehaene's avatar
OlivierDehaene committed
938
                        TextMessage {
939
                            role: "system".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
940
                            content: "You are a friendly chatbot who always responds in the style of a pirate".to_string(),
941
                        },
OlivierDehaene's avatar
OlivierDehaene committed
942
                        TextMessage {
943
                            role: "user".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
944
                            content: "How many helicopters can a human eat in one sitting?".to_string(),
945
946
947
948
949
                        },
                    ],
                    add_generation_prompt: true,
                    bos_token: Some(""),
                    eos_token: Some("</s>"),
950
                    ..Default::default()
951
                },
952
                target: "<|system|>\nYou are a friendly chatbot who always responds in the style of a pirate</s><|user|>\nHow many helicopters can a human eat in one sitting?</s><|assistant|>",
953
954
955
956
957
958
959
960
961
            },
            ChatTemplateTestItem {
                name: "HuggingFaceH4/zephyr-7b-gemma-v0.1",
                chat_template: "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<bos>"),
                    eos_token: Some("<eos>"),
962
                    ..Default::default()
963
964
965
966
967
968
969
970
971
972
973
                },
                target: "<bos><|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
            },
            ChatTemplateTestItem {
                name: "mistralai/Mistral-7B-Instruct-v0.1",
                chat_template: "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
974
                    ..Default::default()
975
                },
976
                target: "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]",
977
978
979
980
981
982
983
984
985
            },
            ChatTemplateTestItem {
                name: "mistralai/Mixtral-8x7B-Instruct-v0.1",
                chat_template: "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
986
                    ..Default::default()
987
988
989
990
991
992
993
                },
                target: "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s>[INST] I'd like to show off how chat templating works! [/INST]",
            },
            ChatTemplateTestItem {
                name: "cognitivecomputations/dolphin-2.5-mixtral-8x7b",
                chat_template: "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}",
                input: ChatTemplateInputs {
994
                    messages: example_chat.clone(),
995
996
997
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
998
                    ..Default::default()
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
                },
                target: "<|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
            },
            ChatTemplateTestItem {
                name: "openchat/openchat-3.5-0106",
                // `.title()` has been replaced with `| upper` in the following template
                chat_template: "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + (message['role'] | title) + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1011
                    ..Default::default()
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
                },
                target: "<s>GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>",
            },
            ChatTemplateTestItem {
                name: "upstage/SOLAR-10.7B-Instruct-v1.0",
                chat_template: "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1023
                    ..Default::default()
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
                },
                target: "Hello, how are you?</s>I'm doing great. How can I help you today?</s>I'd like to show off how chat templating works!</s>",
            },
            ChatTemplateTestItem {
                name: "codellama/CodeLlama-70b-Instruct-hf",
                // NOTE: `.strip()` has been replaced with `| trim` in the following template
                chat_template: "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '<s>' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\\n\\n ' + message['content'] | trim %}{{ content + ' <step> ' }}{% endfor %}{{'Source: assistant\\nDestination: user\\n\\n '}}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1036
                    ..Default::default()
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
                },
                target: "<s>Source: user\n\n Hello, how are you? <step> Source: assistant\n\n I'm doing great. How can I help you today? <step> Source: user\n\n I'd like to show off how chat templating works! <step> Source: assistant\nDestination: user\n\n ",
            },
            ChatTemplateTestItem {
                name: "Deci/DeciLM-7B-instruct",
                chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '### User:\\n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ '### System:\\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '### Assistant:\\n'  + message['content'] }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '### Assistant:' }}\n{% endif %}\n{% endfor %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1048
                    ..Default::default()
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
                },
                target: "### User:\nHello, how are you?### Assistant:\nI'm doing great. How can I help you today?### User:\nI'd like to show off how chat templating works!",
            },
            ChatTemplateTestItem {
                name: "Qwen/Qwen1.5-72B-Chat",
                chat_template: "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\\n' }}{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1060
                    ..Default::default()
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
                },
                target: "<|im_start|>system\nYou are a helpful assistant<|im_end|>\n<|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!",
            },
            ChatTemplateTestItem {
                name: "deepseek-ai/deepseek-llm-7b-chat",
                chat_template: "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\\n\\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<|begin▁of▁sentence|>"),
                    eos_token: Some("<|end▁of▁sentence|>"),
1072
                    ..Default::default()
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
                },
                target: "<|begin▁of▁sentence|>User: Hello, how are you?\n\nAssistant: I'm doing great. How can I help you today?<|end▁of▁sentence|>User: I'd like to show off how chat templating works!\n\n",
            },
            ChatTemplateTestItem {
                name: "h2oai/h2o-danube-1.8b-chat",
                chat_template: "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|prompt|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ '<|system|>' + message['content'] + eos_token }}{% elif message['role'] == 'assistant' %}{{ '<|answer|>'  + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|answer|>' }}{% endif %}{% endfor %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1084
                    ..Default::default()
1085
                },
1086
                target: "<|prompt|>Hello, how are you?</s><|answer|>I'm doing great. How can I help you today?</s><|prompt|>I'd like to show off how chat templating works!</s>",
1087
1088
1089
1090
1091
1092
1093
1094
1095
            },
            ChatTemplateTestItem {
                name: "internlm/internlm2-chat-7b",
                chat_template: "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1096
                    ..Default::default()
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
                },
                target: "<s><|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
            },
            ChatTemplateTestItem {
                name: "TheBloke/deepseek-coder-33B-instruct-AWQ",
                chat_template: "{%- set found_item = false -%}\n{%- for message in messages -%}\n    {%- if message['role'] == 'system' -%}\n        {%- set found_item = true -%}\n    {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n    {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n    {%- else %}\n        {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n        {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n        {%- endif %}\n    {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<|begin▁of▁sentence|>"),
                    eos_token: Some("<|EOT|>"),
1108
                    ..Default::default()
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
                },
                target: "You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\n### Instruction:\nHello, how are you?\n### Response:\nI'm doing great. How can I help you today?\n<|EOT|>\n### Instruction:\nI'd like to show off how chat templating works!\n### Response:\n",
            },
            ChatTemplateTestItem {
                name: "ericzzz/falcon-rw-1b-chat",
                // `.strip()` has been replaced with `| trim` in the following template
                chat_template: "{% for message in messages %}{% if loop.index > 1 and loop.previtem['role'] != 'assistant' %}{{ ' ' }}{% endif %}{% if message['role'] == 'system' %}{{ '[SYS] ' + message['content'] | trim }}{% elif message['role'] == 'user' %}{{ '[INST] ' + message['content'] | trim }}{% elif message['role'] == 'assistant' %}{{ '[RESP] '  + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' [RESP] ' }}{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<|endoftext|>"),
                    eos_token: Some("<|endoftext|>"),
1121
                    ..Default::default()
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
                },
                target: "[INST] Hello, how are you? [RESP] I'm doing great. How can I help you today?<|endoftext|>[INST] I'd like to show off how chat templating works!",
            },
            ChatTemplateTestItem {
                name: "abacusai/Smaug-34B-v0.1",
                chat_template: "{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' '  + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1133
                    ..Default::default()
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
                },
                target: "Hello, how are you? [/INST] I'm doing great. How can I help you today? </s><s>[INST] I'd like to show off how chat templating works! [/INST]",
            },
            ChatTemplateTestItem {
                name: "maywell/Synatra-Mixtral-8x7B",
                chat_template: "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n{% for message in messages %}{% if message['role'] == 'user' %}### Instruction:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'assistant' %}### Response:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'system' %}{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}\n### Response:\n{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1145
                    ..Default::default()
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
                },
                target: "Below is an instruction that describes a task. Write a response that appropriately completes the request.### Instruction:Hello, how are you?### Response:I'm doing great. How can I help you today?### Instruction:I'd like to show off how chat templating works!",
            },
            ChatTemplateTestItem {
                name: "deepseek-ai/deepseek-coder-33b-instruct",
                chat_template: "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n    {%- if message['role'] == 'system' -%}\n        {%- set ns.found = true -%}\n    {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n    {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n    {%- else %}\n        {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n        {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n        {%- endif %}\n    {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<|begin▁of▁sentence|>"),
                    eos_token: Some("</EOT>"),
1157
                    ..Default::default()
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
                },
                target: "<|begin▁of▁sentence|>You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\n### Instruction:\nHello, how are you?\n### Response:\nI'm doing great. How can I help you today?\n<|EOT|>\n### Instruction:\nI'd like to show off how chat templating works!\n",
            },
            // NOT INCLUDED
            // - meetkai/functionary-medium-v2.2
            // - fireworks-ai/firefunction-v1
            // https://github
            ChatTemplateTestItem {
                name: "maywell/PiVoT-MoE",
                chat_template: "{{ (messages|selectattr('role', 'equalto', 'system')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'system')|list) else '' }}{% for message in messages %}{% if message['role'] == 'system' %}{{ message['content']|trim }}{% elif message['role'] == 'user' %}### Instruction: {{ message['content']|trim }}{% elif message['role'] == 'assistant' %}### Response: {{ message['content']|trim }}{% elif message['role'] == 'user_context' %}### Input: {{ message['content']|trim }}{% endif %}{% if not loop.last %}\n{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}### Response:{% endif %}",
                input: ChatTemplateInputs {
                    messages: example_chat_with_system.clone(),
                    add_generation_prompt: false,
                    bos_token: Some("<s>"),
                    eos_token: Some("</s>"),
1173
                    ..Default::default()
1174
1175
                },
                target: "You are a friendly chatbot who always responds in the style of a pirateYou are a friendly chatbot who always responds in the style of a pirate### Instruction: Hello, how are you?### Response: I'm doing great. How can I help you today?### Instruction: I'd like to show off how chat templating works!",
1176
            },
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
        ];

        #[allow(unused_variables)] // name is unused
        for ChatTemplateTestItem {
            name,
            chat_template,
            input,
            target,
        } in test_custom_templates
        {
            let mut env = Environment::new();
            env.add_function("raise_exception", raise_exception);
            // trim all the whitespace
            let chat_template = chat_template
                .lines()
                .map(|line| line.trim())
                .collect::<Vec<&str>>()
                .join("");

            let tmpl = env.template_from_str(&chat_template);
            let result = tmpl.unwrap().render(input).unwrap();
            assert_eq!(result, target);
        }
    }
1201
}