chat_template_integration.rs 10.3 KB
Newer Older
1
2
use sglang_router_rs::protocols::spec;
use sglang_router_rs::tokenizer::chat_template::{
3
4
    detect_chat_template_content_format, ChatTemplateContentFormat, ChatTemplateParams,
    ChatTemplateProcessor,
5
6
7
8
9
10
11
12
13
14
15
16
17
};

#[test]
fn test_simple_chat_template() {
    let template = r#"
{%- for message in messages %}
<|{{ message.role }}|>{{ message.content }}<|end|>
{% endfor -%}
{%- if add_generation_prompt %}
<|assistant|>
{%- endif %}
"#;

18
    let processor = ChatTemplateProcessor::new(template.to_string());
19
20
21
22
23
24
25
26
27
28
29
30
31

    let messages = [spec::ChatMessage::User {
        role: "user".to_string(),
        content: spec::UserMessageContent::Text("Test".to_string()),
        name: None,
    }];

    // Convert to JSON values like the router does
    let message_values: Vec<serde_json::Value> = messages
        .iter()
        .map(|msg| serde_json::to_value(msg).unwrap())
        .collect();

32
33
34
35
    let params = ChatTemplateParams {
        add_generation_prompt: true,
        ..Default::default()
    };
36
    let result = processor
37
        .apply_chat_template(&message_values, params)
38
39
40
41
42
43
44
        .unwrap();
    assert!(result.contains("<|user|>Test<|end|>"));
    assert!(result.contains("<|assistant|>"));
}

#[test]
fn test_chat_template_with_tokens() {
45
    // Template that uses template kwargs for tokens
46
    let template = r#"
47
{%- if bos_token -%}{{ bos_token }}{%- endif -%}
48
{%- for message in messages -%}
49
{{ message.role }}: {{ message.content }}{%- if eos_token -%}{{ eos_token }}{%- endif -%}
50
51
52
{% endfor -%}
"#;

53
    let processor = ChatTemplateProcessor::new(template.to_string());
54
55
56
57
58
59
60
61
62
63
64
65
66

    let messages = [spec::ChatMessage::User {
        role: "user".to_string(),
        content: spec::UserMessageContent::Text("Test".to_string()),
        name: None,
    }];

    // Convert to JSON values like the router does
    let message_values: Vec<serde_json::Value> = messages
        .iter()
        .map(|msg| serde_json::to_value(msg).unwrap())
        .collect();

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
    // Use template_kwargs to pass tokens
    let mut template_kwargs = std::collections::HashMap::new();
    template_kwargs.insert(
        "bos_token".to_string(),
        serde_json::Value::String("<s>".to_string()),
    );
    template_kwargs.insert(
        "eos_token".to_string(),
        serde_json::Value::String("</s>".to_string()),
    );

    let params = ChatTemplateParams {
        template_kwargs: Some(&template_kwargs),
        ..Default::default()
    };

83
    let result = processor
84
        .apply_chat_template(&message_values, params)
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
        .unwrap();
    assert!(result.contains("<s>"));
    assert!(result.contains("</s>"));
}

#[test]
fn test_llama_style_template() {
    // Test a Llama-style chat template
    let template = r#"
{%- if messages[0]['role'] == 'system' -%}
    {%- set system_message = messages[0]['content'] -%}
    {%- set messages = messages[1:] -%}
{%- else -%}
    {%- set system_message = '' -%}
{%- endif -%}

101
{{- bos_token if bos_token else '<|begin_of_text|>' }}
102
103
104
105
106
107
108
109
110
111
112
113
114
{%- if system_message %}
{{- '<|start_header_id|>system<|end_header_id|>\n\n' + system_message + '<|eot_id|>' }}
{%- endif %}

{%- for message in messages %}
    {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }}
{%- endfor %}

{%- if add_generation_prompt %}
    {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %}
"#;

115
    let processor = ChatTemplateProcessor::new(template.to_string());
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

    let messages = vec![
        spec::ChatMessage::System {
            role: "system".to_string(),
            content: "You are a helpful assistant".to_string(),
            name: None,
        },
        spec::ChatMessage::User {
            role: "user".to_string(),
            content: spec::UserMessageContent::Text("What is 2+2?".to_string()),
            name: None,
        },
    ];

    // Convert to JSON values
    let json_messages: Vec<serde_json::Value> = messages
        .iter()
        .map(|msg| serde_json::to_value(msg).unwrap())
        .collect();

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    // Use template_kwargs to pass the token
    let mut template_kwargs = std::collections::HashMap::new();
    template_kwargs.insert(
        "bos_token".to_string(),
        serde_json::Value::String("<|begin_of_text|>".to_string()),
    );

    let params = ChatTemplateParams {
        add_generation_prompt: true,
        template_kwargs: Some(&template_kwargs),
        ..Default::default()
    };
    let result = processor
        .apply_chat_template(&json_messages, params)
        .unwrap();
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172

    // Check that the result contains expected markers
    assert!(result.contains("<|begin_of_text|>"));
    assert!(result.contains("<|start_header_id|>system<|end_header_id|>"));
    assert!(result.contains("You are a helpful assistant"));
    assert!(result.contains("<|start_header_id|>user<|end_header_id|>"));
    assert!(result.contains("What is 2+2?"));
    assert!(result.contains("<|start_header_id|>assistant<|end_header_id|>"));
}

#[test]
fn test_chatml_template() {
    // Test a ChatML-style template
    let template = r#"
{%- for message in messages %}
    {{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
{%- endfor %}
{%- if add_generation_prompt %}
    {{- '<|im_start|>assistant\n' }}
{%- endif %}
"#;

173
    let processor = ChatTemplateProcessor::new(template.to_string());
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201

    let messages = vec![
        spec::ChatMessage::User {
            role: "user".to_string(),
            content: spec::UserMessageContent::Text("Hello".to_string()),
            name: None,
        },
        spec::ChatMessage::Assistant {
            role: "assistant".to_string(),
            content: Some("Hi there!".to_string()),
            name: None,
            tool_calls: None,
            function_call: None,
            reasoning_content: None,
        },
        spec::ChatMessage::User {
            role: "user".to_string(),
            content: spec::UserMessageContent::Text("How are you?".to_string()),
            name: None,
        },
    ];

    // Convert to JSON values
    let json_messages: Vec<serde_json::Value> = messages
        .iter()
        .map(|msg| serde_json::to_value(msg).unwrap())
        .collect();

202
203
204
205
206
207
208
209
210
    let result = processor
        .apply_chat_template(
            &json_messages,
            ChatTemplateParams {
                add_generation_prompt: true,
                ..Default::default()
            },
        )
        .unwrap();
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229

    // Check ChatML format
    assert!(result.contains("<|im_start|>user\nHello<|im_end|>"));
    assert!(result.contains("<|im_start|>assistant\nHi there!<|im_end|>"));
    assert!(result.contains("<|im_start|>user\nHow are you?<|im_end|>"));
    assert!(result.ends_with("<|im_start|>assistant\n"));
}

#[test]
fn test_template_without_generation_prompt() {
    let template = r#"
{%- for message in messages -%}
{{ message.role }}: {{ message.content }}
{% endfor -%}
{%- if add_generation_prompt -%}
assistant:
{%- endif -%}
"#;

230
    let processor = ChatTemplateProcessor::new(template.to_string());
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245

    let messages = [spec::ChatMessage::User {
        role: "user".to_string(),
        content: spec::UserMessageContent::Text("Test".to_string()),
        name: None,
    }];

    // Convert to JSON values
    let json_messages: Vec<serde_json::Value> = messages
        .iter()
        .map(|msg| serde_json::to_value(msg).unwrap())
        .collect();

    // Test without generation prompt
    let result = processor
246
        .apply_chat_template(&json_messages, ChatTemplateParams::default())
247
248
249
250
        .unwrap();
    assert_eq!(result.trim(), "user: Test");

    // Test with generation prompt
251
252
253
254
255
256
257
258
259
    let result_with_prompt = processor
        .apply_chat_template(
            &json_messages,
            ChatTemplateParams {
                add_generation_prompt: true,
                ..Default::default()
            },
        )
        .unwrap();
260
261
262
263
264
265
266
    assert!(result_with_prompt.contains("assistant:"));
}

#[test]
fn test_empty_messages_template() {
    let template = r#"{% for msg in messages %}{{ msg.role }}: {{ msg.content }}\n{% endfor %}"#;

267
    let processor = ChatTemplateProcessor::new(template.to_string());
268
269

    let messages: Vec<serde_json::Value> = vec![];
270
271
272
    let result = processor
        .apply_chat_template(&messages, ChatTemplateParams::default())
        .unwrap();
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
    assert_eq!(result, "");
}

#[test]
fn test_content_format_detection() {
    // Test string format detection
    let string_template = r#"
{%- for message in messages -%}
{{ message.role }}: {{ message.content }}
{%- endfor -%}
"#;
    assert_eq!(
        detect_chat_template_content_format(string_template),
        ChatTemplateContentFormat::String
    );

    // Test OpenAI format detection
    let openai_template = r#"
{%- for message in messages -%}
  {%- for content in message.content -%}
    {{ content.type }}: {{ content.text }}
  {%- endfor -%}
{%- endfor -%}
"#;
    assert_eq!(
        detect_chat_template_content_format(openai_template),
        ChatTemplateContentFormat::OpenAI
    );
}

#[test]
fn test_template_with_multimodal_content() {
    // Test that multimodal messages work correctly when serialized to JSON
    let template = r#"
{%- for message in messages %}
{{ message.role }}:
{%- if message.content is string %}
{{ message.content }}
{%- else %}
{%- for part in message.content %}
  {%- if part.type == "text" %}
{{ part.text }}
  {%- elif part.type == "image_url" %}
[IMAGE]
  {%- endif %}
{%- endfor %}
{%- endif %}
{% endfor %}
"#;

323
    let processor = ChatTemplateProcessor::new(template.to_string());
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347

    let messages = [spec::ChatMessage::User {
        role: "user".to_string(),
        content: spec::UserMessageContent::Parts(vec![
            spec::ContentPart::Text {
                text: "Look at this:".to_string(),
            },
            spec::ContentPart::ImageUrl {
                image_url: spec::ImageUrl {
                    url: "https://example.com/image.jpg".to_string(),
                    detail: None,
                },
            },
        ]),
        name: None,
    }];

    // Convert to JSON values
    let json_messages: Vec<serde_json::Value> = messages
        .iter()
        .map(|msg| serde_json::to_value(msg).unwrap())
        .collect();

    let result = processor
348
        .apply_chat_template(&json_messages, ChatTemplateParams::default())
349
350
351
352
353
354
355
        .unwrap();

    // Should contain both text and image parts
    assert!(result.contains("user:"));
    assert!(result.contains("Look at this:"));
    assert!(result.contains("[IMAGE]"));
}