Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -4,8 +4,8 @@ from opencompass.models import VLLM
_meta_template = dict(
begin='<s>',
round=[
dict(role="HUMAN", begin='Human: ', end='\n'),
dict(role="BOT", begin="Assistant: ", end='</s>', generate=True),
dict(role='HUMAN', begin='Human: ', end='\n'),
dict(role='BOT', begin='Assistant: ', end='</s>', generate=True),
],
)
......
......@@ -4,7 +4,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen1.5-moe-a2-7b-hf',
path="Qwen/Qwen1.5-MoE-A2.7B",
path='Qwen/Qwen1.5-MoE-A2.7B',
tokenizer_path='Qwen/Qwen1.5-MoE-A2.7B',
model_kwargs=dict(
device_map='auto',
......
......@@ -2,8 +2,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
)
......@@ -11,7 +11,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen1.5-moe-a2-7b-chat-hf',
path="Qwen/Qwen1.5-MoE-A2.7B-Chat",
path='Qwen/Qwen1.5-MoE-A2.7B-Chat',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True
......
......@@ -3,8 +3,8 @@ from opencompass.models import ModelScopeCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
],
)
......@@ -12,7 +12,7 @@ models = [
dict(
type=ModelScopeCausalLM,
abbr='qwen-7b-chat-ms',
path="qwen/Qwen-7B-Chat",
path='qwen/Qwen-7B-Chat',
tokenizer_path='qwen/Qwen-7B-Chat',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -3,8 +3,8 @@ from opencompass.models import VLLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
)
......@@ -12,7 +12,7 @@ models = [
dict(
type=VLLM,
abbr='qwen1.5-14b-chat-vllm',
path="Qwen/Qwen1.5-14B-Chat",
path='Qwen/Qwen1.5-14B-Chat',
model_kwargs=dict(tensor_parallel_size=2),
meta_template=_meta_template,
max_out_len=100,
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=VLLM,
abbr='qwen1.5-72b-vllm',
path="Qwen/Qwen1.5-72B",
path='Qwen/Qwen1.5-72B',
model_kwargs=dict(tensor_parallel_size=4),
max_out_len=100,
max_seq_len=2048,
......
......@@ -3,8 +3,8 @@ from opencompass.models import VLLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
)
......@@ -12,7 +12,7 @@ models = [
dict(
type=VLLM,
abbr='qwen1.5-72b-chat-vllm',
path="Qwen/Qwen1.5-72B-Chat",
path='Qwen/Qwen1.5-72B-Chat',
model_kwargs=dict(tensor_parallel_size=4),
meta_template=_meta_template,
max_out_len=100,
......
......@@ -3,8 +3,8 @@ from opencompass.models import VLLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
],
)
......@@ -12,7 +12,7 @@ models = [
dict(
type=VLLM,
abbr='qwen-14b-chat-vllm',
path="Qwen/Qwen-14B-Chat",
path='Qwen/Qwen-14B-Chat',
model_kwargs=dict(tensor_parallel_size=4),
meta_template=_meta_template,
max_out_len=100,
......
......@@ -3,8 +3,8 @@ from opencompass.models import VLLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
],
)
......@@ -12,7 +12,7 @@ models = [
dict(
type=VLLM,
abbr='qwen-72b-chat-vllm',
path="Qwen/Qwen-72B-Chat",
path='Qwen/Qwen-72B-Chat',
model_kwargs=dict(tensor_parallel_size=4),
meta_template=_meta_template,
max_out_len=100,
......
......@@ -4,7 +4,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='rwkv-5-3b',
path="RWKV/rwkv-5-world-3b",
path='RWKV/rwkv-5-world-3b',
tokenizer_path='RWKV/rwkv-5-world-3b',
model_kwargs=dict(
device_map='auto',
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='tigerbot-13b-chat-v1-hf',
path="TigerResearch/tigerbot-13b-chat-v1",
path='TigerResearch/tigerbot-13b-chat-v1',
tokenizer_path='TigerResearch/tigerbot-13b-chat-v1',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='tigerbot-13b-chat-v2-hf',
path="TigerResearch/tigerbot-13b-chat",
path='TigerResearch/tigerbot-13b-chat',
tokenizer_path='TigerResearch/tigerbot-13b-chat',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='tigerbot-70b-chat-v2-hf',
path="TigerResearch/tigerbot-70b-chat-v2",
path='TigerResearch/tigerbot-70b-chat-v2',
tokenizer_path='TigerResearch/tigerbot-70b-chat-v2',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='tigerbot-70b-chat-v3-hf',
path="TigerResearch/tigerbot-70b-chat-v3",
path='TigerResearch/tigerbot-70b-chat-v3',
tokenizer_path='TigerResearch/tigerbot-70b-chat-v3',
model_kwargs=dict(
trust_remote_code=True,
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='tigerbot-7b-chat-v3-hf',
path="TigerResearch/tigerbot-7b-chat",
path='TigerResearch/tigerbot-7b-chat',
tokenizer_path='TigerResearch/tigerbot-7b-chat',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='tigerbot-sft-7b-hf',
path="TigerResearch/tigerbot-7b-sft",
path='TigerResearch/tigerbot-7b-sft',
tokenizer_path='TigerResearch/tigerbot-7b-sft',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -2,8 +2,8 @@ from opencompass.models import VLLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='USER: '),
dict(role="BOT", begin=" ASSISTANT:", end='</s>', generate=True),
dict(role='HUMAN', begin='USER: '),
dict(role='BOT', begin=' ASSISTANT:', end='</s>', generate=True),
],
)
......@@ -11,7 +11,7 @@ models = [
dict(
type=VLLM,
abbr='vicuna-13b-v1.5-16k-vllm',
path="lmsys/vicuna-13b-v1.5-16k",
path='lmsys/vicuna-13b-v1.5-16k',
meta_template=_meta_template,
max_out_len=100,
max_seq_len=2048,
......
......@@ -2,8 +2,8 @@ from opencompass.models import VLLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='USER: '),
dict(role="BOT", begin=" ASSISTANT:", end='</s>', generate=True),
dict(role='HUMAN', begin='USER: '),
dict(role='BOT', begin=' ASSISTANT:', end='</s>', generate=True),
],
)
......@@ -11,7 +11,7 @@ models = [
dict(
type=VLLM,
abbr='vicuna-7b-v1.5-16k-vllm',
path="lmsys/vicuna-7b-v1.5-16k",
path='lmsys/vicuna-7b-v1.5-16k',
meta_template=_meta_template,
max_out_len=100,
max_seq_len=2048,
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='WizardCoder-15B-V1.0',
path="WizardLM/WizardCoder-15B-V1.0",
path='WizardLM/WizardCoder-15B-V1.0',
tokenizer_path='WizardLM/WizardCoder-15B-V1.0',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='WizardCoder-1B-V1.0',
path="WizardLM/WizardCoder-1B-V1.0",
path='WizardLM/WizardCoder-1B-V1.0',
tokenizer_path='WizardLM/WizardCoder-1B-V1.0',
tokenizer_kwargs=dict(
padding_side='left',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment