"""Return the prompt that is concatenated with other elements in the
...
...
@@ -103,31 +136,40 @@ class BaseModel:
returnself.get_prompt(messages)
# chat history processing in derived classes
@property
defsampling_param(self):
returnSamplingParam(top_p=self.top_p,
top_k=self.top_k,
temperature=self.temperature,
repetition_penalty=self.repetition_penalty)
@classmethod
defmatch(cls,model_path:str)->Optional[str]:
"""Return the model_name that was registered to MODELS.
Args:
model_path (str): the model path used for matching.
"""
returnNone
@MODELS.register_module(name='wizardlM')
@MODELS.register_module(name='vicuna')
classVicuna(BaseModel):
"""Chat template of vicuna model."""
def__init__(
self,
system="""A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. """,# noqa: E501
meta_instruction="""A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.""",# noqa: E501
"""Return the model_name that was registered to MODELS.
Args:
model_path (str): the model path used for matching.
"""
if'vicuna'inmodel_path.lower():
return'vicuna'
if'wizardlm'inmodel_path.lower():
return'wizardlm'
@MODELS.register_module(name='internlm-chat')
@MODELS.register_module(name='internlm-chat-7b')
classInternLMChat7B(BaseModel):
@MODELS.register_module(name='internlm')
classInternLMChat7B(BaseChatTemplate):
"""Chat template of InternLM model."""
def__init__(
...
...
@@ -179,67 +274,36 @@ class InternLMChat7B(BaseModel):
- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",# noqa: E501
@@ -659,147 +703,222 @@ class UltraChat(BaseModel):
def__init__(
self,
system="""User: A one-turn chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, very detailed, and polite answers to the user's questions.</s>""",# noqa: E501
eos='</s>',
system='User: ',
meta_instruction="""A one-turn chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, very detailed, and polite answers to the user's questions.""",# noqa: E501
"""Return the model_name that was registered to MODELS.
fire.Fire(main)
Args:
model_path (str): the model path used for matching.
"""
path=model_path.lower()
if'deepseek'inpathand'chat'inpath:
return'deepseek'
@MODELS.register_module(name=['yi-vl'])
classYiVL(BaseChatTemplate):
def__init__(
self,
meta_instruction="""This is a chat between an inquisitive human and an AI assistant. Assume the role of the AI assistant. Read all the images carefully, and respond to the human's questions with informative, helpful, detailed and polite answers. 这是一个好奇的人类和一个人工智能助手之间的对话。假设你扮演这个AI助手的角色。仔细阅读所有的图像,并对人类的问题做出信息丰富、有帮助、详细的和礼貌的回答。\n\n""",# noqa: E501
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""# noqa: E501
classLlama2Adapter(BasicAdapterFast):
"""Adapter for llama2.
Llama2 use the following template and the first user prompt
should contain a system prompt.
User can specify the system prompt using a <<SYS>> tag otherwise
the default system prompt is prepended to user's input.