assert_eq!(result,"<|im_start|>user\nHi!<|im_end|>\n<|im_start|>assistant\nHello how can I help?<|im_end|>\n<|im_start|>user\nWhat is Deep Learning?<|im_end|>\n<|im_start|>assistant\nmagic!<|im_end|>\n<|im_start|>assistant\n");
}
structChatTemplateTestItem{
name:&'staticstr,
chat_template:&'staticstr,
input:ChatTemplateInputs<'static>,
target:&'staticstr,
}
#[test]
fntest_many_chat_templates(){
letexample_chat=vec![
Message{
role:"user".to_string(),
content:Some("Hello, how are you?".to_string()),
name:None,
tool_calls:None,
},
Message{
role:"assistant".to_string(),
content:Some("I'm doing great. How can I help you today?".to_string()),
name:None,
tool_calls:None,
},
Message{
role:"user".to_string(),
content:Some("I'd like to show off how chat templating works!".to_string()),
name:None,
tool_calls:None,
},
];
letexample_chat_with_system=vec![Message{
role:"system".to_string(),
content:Some(
"You are a friendly chatbot who always responds in the style of a pirate"
.to_string(),
),
name:None,
tool_calls:None,
}]
.iter()
.chain(&example_chat)
.cloned()
.collect::<Vec<_>>();
lettest_default_templates=vec![
ChatTemplateTestItem{
name:"_base",
chat_template:"{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some(""),
eos_token:Some(""),
..Default::default()
},
target:"<|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
},
ChatTemplateTestItem{
name:"blenderbot",
chat_template:"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some(""),
eos_token:Some("</s>"),
..Default::default()
},
target:" Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>",
},
ChatTemplateTestItem{
name:"blenderbot_small",
chat_template:"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some(""),
eos_token:Some("</s>"),
..Default::default()
},
target:" Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>",
},
ChatTemplateTestItem{
name:"bloom",
chat_template:"{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some(""),
eos_token:Some("</s>"),
..Default::default()
},
target:"Hello, how are you?</s>I'm doing great. How can I help you today?</s>I'd like to show off how chat templating works!</s>",
},
ChatTemplateTestItem{
name:"gpt_neox",
chat_template:"{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some(""),
eos_token:Some("<|endoftext|>"),
..Default::default()
},
target:"Hello, how are you?<|endoftext|>I'm doing great. How can I help you today?<|endoftext|>I'd like to show off how chat templating works!<|endoftext|>",
},
ChatTemplateTestItem{
name:"gpt2",
chat_template:"{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some(""),
eos_token:Some("<|endoftext|>"),
..Default::default()
},
target:"Hello, how are you?<|endoftext|>I'm doing great. How can I help you today?<|endoftext|>I'd like to show off how chat templating works!<|endoftext|>",
},
ChatTemplateTestItem{
name:"llama",
// NOTE: the `.strip()` has been replaced with `| trim` in the following template
chat_template:"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token +'[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\\n' + content | trim + '\\n<</SYS>>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat_with_system.clone(),
add_generation_prompt:true,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"<s>[INST] <<SYS>>\nYou are a friendly chatbot who always responds in the style of a pirate\n<</SYS>>\n\nHello, how are you? [/INST] I'm doing great. How can I help you today? </s><s>[INST] I'd like to show off how chat templating works! [/INST]",
},
ChatTemplateTestItem{
name:"whisper",
chat_template:"{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:true,
bos_token:Some(""),
eos_token:Some("<|endoftext|>"),
..Default::default()
},
target:"Hello, how are you?<|endoftext|>I'm doing great. How can I help you today?<|endoftext|>I'd like to show off how chat templating works!<|endoftext|>",
target:"<|system|>\nYou are a friendly chatbot who always responds in the style of a pirate</s><|user|>\nHello, how are you?</s><|assistant|>\nI'm doing great. How can I help you today?</s><|user|>\nI'd like to show off how chat templating works!</s>",
content:Some("You are a friendly chatbot who always responds in the style of a pirate".to_string()),
name:None,
tool_calls:None,
},
Message{
role:"user".to_string(),
content:Some("How many helicopters can a human eat in one sitting?".to_string()),
name:None,
tool_calls:None,
},
],
add_generation_prompt:true,
bos_token:Some(""),
eos_token:Some("</s>"),
..Default::default()
},
target:"<|system|>\nYou are a friendly chatbot who always responds in the style of a pirate</s><|user|>\nHow many helicopters can a human eat in one sitting?</s><|assistant|>",
},
ChatTemplateTestItem{
name:"HuggingFaceH4/zephyr-7b-gemma-v0.1",
chat_template:"{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<bos>"),
eos_token:Some("<eos>"),
..Default::default()
},
target:"<bos><|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
},
ChatTemplateTestItem{
name:"mistralai/Mistral-7B-Instruct-v0.1",
chat_template:"{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]",
},
ChatTemplateTestItem{
name:"mistralai/Mixtral-8x7B-Instruct-v0.1",
chat_template:"{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s>[INST] I'd like to show off how chat templating works! [/INST]",
chat_template:"{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"<|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
},
ChatTemplateTestItem{
name:"openchat/openchat-3.5-0106",
// `.title()` has been replaced with `| upper` in the following template
target:"<s>GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>",
},
ChatTemplateTestItem{
name:"upstage/SOLAR-10.7B-Instruct-v1.0",
chat_template:"{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"Hello, how are you?</s>I'm doing great. How can I help you today?</s>I'd like to show off how chat templating works!</s>",
},
ChatTemplateTestItem{
name:"codellama/CodeLlama-70b-Instruct-hf",
// NOTE: `.strip()` has been replaced with `| trim` in the following template
chat_template:"{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '<s>' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\\n\\n ' + message['content'] | trim %}{{ content + ' <step> ' }}{% endfor %}{{'Source: assistant\\nDestination: user\\n\\n '}}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"<s>Source: user\n\n Hello, how are you? <step> Source: assistant\n\n I'm doing great. How can I help you today? <step> Source: user\n\n I'd like to show off how chat templating works! <step> Source: assistant\nDestination: user\n\n ",
},
ChatTemplateTestItem{
name:"Deci/DeciLM-7B-instruct",
chat_template:"{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '### User:\\n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ '### System:\\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '### Assistant:\\n' + message['content'] }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '### Assistant:' }}\n{% endif %}\n{% endfor %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"### User:\nHello, how are you?### Assistant:\nI'm doing great. How can I help you today?### User:\nI'd like to show off how chat templating works!",
},
ChatTemplateTestItem{
name:"Qwen/Qwen1.5-72B-Chat",
chat_template:"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\\n' }}{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"<|im_start|>system\nYou are a helpful assistant<|im_end|>\n<|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!",
},
ChatTemplateTestItem{
name:"deepseek-ai/deepseek-llm-7b-chat",
chat_template:"{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\\n\\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<|begin▁of▁sentence|>"),
eos_token:Some("<|end▁of▁sentence|>"),
..Default::default()
},
target:"<|begin▁of▁sentence|>User: Hello, how are you?\n\nAssistant: I'm doing great. How can I help you today?<|end▁of▁sentence|>User: I'd like to show off how chat templating works!\n\n",
target:"<|prompt|>Hello, how are you?</s><|answer|>I'm doing great. How can I help you today?</s><|prompt|>I'd like to show off how chat templating works!</s>",
},
ChatTemplateTestItem{
name:"internlm/internlm2-chat-7b",
chat_template:"{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"<s><|im_start|>user\nHello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing great. How can I help you today?<|im_end|>\n<|im_start|>user\nI'd like to show off how chat templating works!<|im_end|>\n",
},
ChatTemplateTestItem{
name:"TheBloke/deepseek-coder-33B-instruct-AWQ",
chat_template:"{%- set found_item = false -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set found_item = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<|begin▁of▁sentence|>"),
eos_token:Some("<|EOT|>"),
..Default::default()
},
target:"You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\n### Instruction:\nHello, how are you?\n### Response:\nI'm doing great. How can I help you today?\n<|EOT|>\n### Instruction:\nI'd like to show off how chat templating works!\n### Response:\n",
},
ChatTemplateTestItem{
name:"ericzzz/falcon-rw-1b-chat",
// `.strip()` has been replaced with `| trim` in the following template
chat_template:"{% for message in messages %}{% if loop.index > 1 and loop.previtem['role'] != 'assistant' %}{{ ' ' }}{% endif %}{% if message['role'] == 'system' %}{{ '[SYS] ' + message['content'] | trim }}{% elif message['role'] == 'user' %}{{ '[INST] ' + message['content'] | trim }}{% elif message['role'] == 'assistant' %}{{ '[RESP] ' + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' [RESP] ' }}{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<|endoftext|>"),
eos_token:Some("<|endoftext|>"),
..Default::default()
},
target:"[INST] Hello, how are you? [RESP] I'm doing great. How can I help you today?<|endoftext|>[INST] I'd like to show off how chat templating works!",
target:"Hello, how are you? [/INST] I'm doing great. How can I help you today? </s><s>[INST] I'd like to show off how chat templating works! [/INST]",
},
ChatTemplateTestItem{
name:"maywell/Synatra-Mixtral-8x7B",
chat_template:"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n{% for message in messages %}{% if message['role'] == 'user' %}### Instruction:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'assistant' %}### Response:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'system' %}{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}\n### Response:\n{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"Below is an instruction that describes a task. Write a response that appropriately completes the request.### Instruction:Hello, how are you?### Response:I'm doing great. How can I help you today?### Instruction:I'd like to show off how chat templating works!",
},
ChatTemplateTestItem{
name:"deepseek-ai/deepseek-coder-33b-instruct",
chat_template:"{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}",
input:ChatTemplateInputs{
messages:example_chat.clone(),
add_generation_prompt:false,
bos_token:Some("<|begin▁of▁sentence|>"),
eos_token:Some("</EOT>"),
..Default::default()
},
target:"<|begin▁of▁sentence|>You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\n### Instruction:\nHello, how are you?\n### Response:\nI'm doing great. How can I help you today?\n<|EOT|>\n### Instruction:\nI'd like to show off how chat templating works!\n",
},
// NOT INCLUDED
// - meetkai/functionary-medium-v2.2
// - fireworks-ai/firefunction-v1
// https://github
ChatTemplateTestItem{
name:"maywell/PiVoT-MoE",
chat_template:"{{ (messages|selectattr('role', 'equalto', 'system')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'system')|list) else '' }}{% for message in messages %}{% if message['role'] == 'system' %}{{ message['content']|trim }}{% elif message['role'] == 'user' %}### Instruction: {{ message['content']|trim }}{% elif message['role'] == 'assistant' %}### Response: {{ message['content']|trim }}{% elif message['role'] == 'user_context' %}### Input: {{ message['content']|trim }}{% endif %}{% if not loop.last %}\n{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}### Response:{% endif %}",
input:ChatTemplateInputs{
messages:example_chat_with_system.clone(),
add_generation_prompt:false,
bos_token:Some("<s>"),
eos_token:Some("</s>"),
..Default::default()
},
target:"You are a friendly chatbot who always responds in the style of a pirateYou are a friendly chatbot who always responds in the style of a pirate### Instruction: Hello, how are you?### Response: I'm doing great. How can I help you today?### Instruction: I'd like to show off how chat templating works!",
key=['x_size'] # the two above configs will be evaluated anytime
# the value of x_size changes
)
@triton.jit
def kernel(x_ptr, x_size, **META):
BLOCK_SIZE = META['BLOCK_SIZE']
:note: When all the configurations are evaluated, the kernel will run multiple time.
This means that whatever value the kernel updates will be updated multiple times.
To avoid this undesired behavior, you can use the `reset_to_zero` argument, which
reset the value of the provided tensor to `zero` before running any configuration.
:param configs: a list of :code:`triton.Config` objects
:type configs: list[triton.Config]
:param key: a list of argument names whose change in value will trigger the evaluation of all provided configs.
:type key: list[str]
:param prune_configs_by: a dict of functions that are used to prune configs, fields:
'perf_model': performance model used to predicate running time with different configs, returns running time
'top_k': number of configs to bench
'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It take configs:List[Config] as its input, and returns pruned configs.
:param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs.
:type reset_to_zero: list[str]
"""
defdecorator(fn):
returnAutotuner(
fn,
fn.arg_names,
configs,
key,
reset_to_zero,
prune_configs_by,
nearest_power_of_two,
)
returndecorator
defmatmul248_kernel_config_pruner(configs,nargs):
"""
The main purpose of this function is to shrink BLOCK_SIZE_* when the corresponding dimension is smaller.
f"The passed weight is not `gptq` compatible, loader needs to be updated."
)
ifuse_exllama:
linear=ExllamaQuantLinear(
qweight,qzeros,scales,g_idx,bias,bits,groupsize
)
else:
linear=QuantLinear(
qweight,
qzeros,
scales,
g_idx,
bias,
bits,
groupsize,
)
elifquantize=="awq":
try:
qweight,qzeros,scales,_,bits,groupsize,_=weight
exceptException:
raiseNotImplementedError(
f"The passed weight is not `awq` compatible, loader needs to be updated."
)
ifIS_ROCM_SYSTEM:
raiseNotImplementedError(
"AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead "
"to use Exllama/GPTQ kernels for AWQ inference."
)
ifnotHAS_AWQ:
raiseNotImplementedError(
"You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly"
)
linear=WQLinear(
w_bit=bits,
group_size=groupsize,
qweight=qweight,
qzeros=qzeros,
scales=scales,
bias=biasisnotNone,
)
else:
raiseNotImplementedError(f"Quantization `{quantize}` is not implemented yet.")
# We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not.
ifresidualisnotNone:
hidden_states+=residual
residual=hidden_states
out=torch.empty_like(hidden_states)
layernorm_ops.rms_norm(
out,
hidden_states,
self.weight.data,
self.variance_epsilon,
)
returnout,residual
else:
raiseValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
# NOTE: On RoCm systems, we use a ROPE implementatation adapted from VLLM which launches a single kernel for both query/key, contrary to flash-attn implementation used on NVIDIA systems.
# Compiling flash-attn rotary on RoCm, it appears hipcc is unable to unroll loops, resulting in an even slower inference compared to eager: https://github.com/pytorch/pytorch/issues/113773
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
# For RoCm, we always use float cos/sin to avoid a cast.
# For NVIDIA, for some reason, the flash-attn rotary kernel requires cos/sin and query/key to be of same dtype: https://github.com/Dao-AILab/flash-attention/blob/017716451d446e464dde9aca3a3c1ed2209caaa9/csrc/rotary/rotary.cpp#L26
# But later on goes and cast cos/sin to float anyway: https://github.com/Dao-AILab/flash-attention/blob/017716451d446e464dde9aca3a3c1ed2209caaa9/csrc/rotary/rotary_cuda.cu#L29, which looks suboptimal.